file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
router_module.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {APP_BASE_HREF, HashLocationStrategy, Location, LOCATION_INITIALIZED, LocationStrategy, PathLocationStrategy, PlatformLocation, ViewportScroller} from '@angular/common';
import {ANALYZE_FOR_ENTRY_COMPONENTS, APP_BOOTSTRAP_LISTENER, APP_INITIALIZER, ApplicationRef, Compiler, ComponentRef, Inject, Injectable, InjectionToken, Injector, ModuleWithProviders, NgModule, NgProbeToken, OnDestroy, Optional, Provider, SkipSelf} from '@angular/core';
import {of, Subject} from 'rxjs';
import {EmptyOutletComponent} from './components/empty_outlet';
import {Route, Routes} from './config';
import {RouterLink, RouterLinkWithHref} from './directives/router_link';
import {RouterLinkActive} from './directives/router_link_active';
import {RouterOutlet} from './directives/router_outlet';
import {Event} from './events';
import {RouteReuseStrategy} from './route_reuse_strategy';
import {ErrorHandler, Router} from './router';
import {ROUTES} from './router_config_loader';
import {ChildrenOutletContexts} from './router_outlet_context';
import {NoPreloading, PreloadAllModules, PreloadingStrategy, RouterPreloader} from './router_preloader';
import {RouterScroller} from './router_scroller';
import {ActivatedRoute} from './router_state';
import {UrlHandlingStrategy} from './url_handling_strategy';
import {DefaultUrlSerializer, UrlSerializer, UrlTree} from './url_tree';
import {flatten} from './utils/collection';
/**
* The directives defined in the `RouterModule`.
*/
const ROUTER_DIRECTIVES =
[RouterOutlet, RouterLink, RouterLinkWithHref, RouterLinkActive, EmptyOutletComponent];
/**
* A [DI token](guide/glossary/#di-token) for the router service.
*
* @publicApi
*/
export const ROUTER_CONFIGURATION = new InjectionToken<ExtraOptions>('ROUTER_CONFIGURATION');
/**
* @docsNotRequired
*/
export const ROUTER_FORROOT_GUARD = new InjectionToken<void>('ROUTER_FORROOT_GUARD');
export const ROUTER_PROVIDERS: Provider[] = [
Location,
{provide: UrlSerializer, useClass: DefaultUrlSerializer},
{
provide: Router,
useFactory: setupRouter,
deps: [
UrlSerializer, ChildrenOutletContexts, Location, Injector, Compiler, ROUTES,
ROUTER_CONFIGURATION, [UrlHandlingStrategy, new Optional()],
[RouteReuseStrategy, new Optional()]
]
},
ChildrenOutletContexts,
{provide: ActivatedRoute, useFactory: rootRoute, deps: [Router]},
RouterPreloader,
NoPreloading,
PreloadAllModules,
{provide: ROUTER_CONFIGURATION, useValue: {enableTracing: false}},
];
export function routerNgProbeToken() {
return new NgProbeToken('Router', Router);
}
/**
* @description
*
* Adds directives and providers for in-app navigation among views defined in an application.
* Use the Angular `Router` service to declaratively specify application states and manage state
* transitions.
*
* You can import this NgModule multiple times, once for each lazy-loaded bundle.
* However, only one `Router` service can be active.
* To ensure this, there are two ways to register routes when importing this module:
*
* * The `forRoot()` method creates an `NgModule` that contains all the directives, the given
* routes, and the `Router` service itself.
* * The `forChild()` method creates an `NgModule` that contains all the directives and the given
* routes, but does not include the `Router` service.
*
* @see [Routing and Navigation guide](guide/router) for an
* overview of how the `Router` service should be used.
*
* @publicApi
*/
@NgModule({
declarations: ROUTER_DIRECTIVES,
exports: ROUTER_DIRECTIVES,
})
export class RouterModule {
// Note: We are injecting the Router so it gets created eagerly...
constructor(@Optional() @Inject(ROUTER_FORROOT_GUARD) guard: any, @Optional() router: Router) {}
/**
* Creates and configures a module with all the router providers and directives.
* Optionally sets up an application listener to perform an initial navigation.
*
* When registering the NgModule at the root, import as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forRoot(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the application.
* @param config An `ExtraOptions` configuration object that controls how navigation is performed.
* @return The new `NgModule`.
*
*/
static forRoot(routes: Routes, config?: ExtraOptions): ModuleWithProviders<RouterModule> {
return {
ngModule: RouterModule,
providers: [
ROUTER_PROVIDERS,
provideRoutes(routes),
{
provide: ROUTER_FORROOT_GUARD,
useFactory: provideForRootGuard,
deps: [[Router, new Optional(), new SkipSelf()]]
},
{provide: ROUTER_CONFIGURATION, useValue: config ? config : {}},
{
provide: LocationStrategy,
useFactory: provideLocationStrategy,
deps:
[PlatformLocation, [new Inject(APP_BASE_HREF), new Optional()], ROUTER_CONFIGURATION]
},
{
provide: RouterScroller,
useFactory: createRouterScroller,
deps: [Router, ViewportScroller, ROUTER_CONFIGURATION]
},
{
provide: PreloadingStrategy,
useExisting: config && config.preloadingStrategy ? config.preloadingStrategy :
NoPreloading
},
{provide: NgProbeToken, multi: true, useFactory: routerNgProbeToken},
provideRouterInitializer(),
],
};
}
/**
* Creates a module with all the router directives and a provider registering routes,
* without creating a new Router service.
* When registering for submodules and lazy-loaded submodules, create the NgModule as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the submodule.
* @return The new NgModule.
*
*/
static forChild(routes: Routes): ModuleWithProviders<RouterModule> {
return {ngModule: RouterModule, providers: [provideRoutes(routes)]};
}
}
export function createRouterScroller(
router: Router, viewportScroller: ViewportScroller, config: ExtraOptions): RouterScroller {
if (config.scrollOffset) {
viewportScroller.setOffset(config.scrollOffset);
}
return new RouterScroller(router, viewportScroller, config);
}
export function provideLocationStrategy(
platformLocationStrategy: PlatformLocation, baseHref: string, options: ExtraOptions = {}) {
return options.useHash ? new HashLocationStrategy(platformLocationStrategy, baseHref) :
new PathLocationStrategy(platformLocationStrategy, baseHref);
}
export function provideForRootGuard(router: Router): any {
if ((typeof ngDevMode === 'undefined' || ngDevMode) && router) {
throw new Error(
`RouterModule.forRoot() called twice. Lazy loaded modules should use RouterModule.forChild() instead.`);
}
return 'guarded';
}
/**
* Registers a [DI provider](guide/glossary#provider) for a set of routes.
* @param routes The route configuration to provide.
*
* @usageNotes
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)],
* providers: [provideRoutes(EXTRA_ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @publicApi
*/
export function provideRoutes(routes: Routes): any {
return [
{provide: ANALYZE_FOR_ENTRY_COMPONENTS, multi: true, useValue: routes},
{provide: ROUTES, multi: true, useValue: routes},
];
}
/**
* Allowed values in an `ExtraOptions` object that configure
* when the router performs the initial navigation operation.
*
* * 'enabledNonBlocking' - (default) The initial navigation starts after the
* root component has been created. The bootstrap is not blocked on the completion of the initial
* navigation.
* * 'enabledBlocking' - The initial navigation starts before the root component is created.
* The bootstrap is blocked until the initial navigation is complete. This value is required
* for [server-side rendering](guide/universal) to work.
* * 'disabled' - The initial navigation is not performed. The location listener is set up before
* the root component gets created. Use if there is a reason to have
* more control over when the router starts its initial navigation due to some complex
* initialization logic.
*
* The following values have been [deprecated](guide/releases#deprecation-practices) since v11,
* and should not be used for new applications.
*
* * 'enabled' - This option is 1:1 replaceable with `enabledBlocking`.
*
* @see `forRoot()`
*
* @publicApi
*/
export type InitialNavigation = 'disabled'|'enabled'|'enabledBlocking'|'enabledNonBlocking';
/**
* A set of configuration options for a router module, provided in the
* `forRoot()` method.
*
* @see `forRoot()`
*
*
* @publicApi
*/
export interface ExtraOptions {
/**
* When true, log all internal navigation events to the console.
* Use for debugging.
*/
enableTracing?: boolean;
/**
* When true, enable the location strategy that uses the URL fragment
* instead of the history API.
*/
useHash?: boolean;
/**
* One of `enabled`, `enabledBlocking`, `enabledNonBlocking` or `disabled`.
* When set to `enabled` or `enabledBlocking`, the initial navigation starts before the root
* component is created. The bootstrap is blocked until the initial navigation is complete. This
* value is required for [server-side rendering](guide/universal) to work. When set to
* `enabledNonBlocking`, the initial navigation starts after the root component has been created.
* The bootstrap is not blocked on the completion of the initial navigation. When set to
* `disabled`, the initial navigation is not performed. The location listener is set up before the
* root component gets created. Use if there is a reason to have more control over when the router
* starts its initial navigation due to some complex initialization logic.
*/
initialNavigation?: InitialNavigation;
/**
* A custom error handler for failed navigations.
* If the handler returns a value, the navigation Promise is resolved with this value.
* If the handler throws an exception, the navigation Promise is rejected with the exception.
*
*/
errorHandler?: ErrorHandler;
/**
* Configures a preloading strategy.
* One of `PreloadAllModules` or `NoPreloading` (the default).
*/
preloadingStrategy?: any;
/**
* Define what the router should do if it receives a navigation request to the current URL.
* Default is `ignore`, which causes the router ignores the navigation.
* This can disable features such as a "refresh" button.
* Use this option to configure the behavior when navigating to the
* current URL. Default is 'ignore'.
*/
onSameUrlNavigation?: 'reload'|'ignore';
/**
* Configures if the scroll position needs to be restored when navigating back.
*
* * 'disabled'- (Default) Does nothing. Scroll position is maintained on navigation.
* * 'top'- Sets the scroll position to x = 0, y = 0 on all navigation.
* * 'enabled'- Restores the previous scroll position on backward navigation, else sets the
* position to the anchor if one is provided, or sets the scroll position to [0, 0] (forward
* navigation). This option will be the default in the future.
*
* You can implement custom scroll restoration behavior by adapting the enabled behavior as
* in the following example.
*
* ```typescript
* class AppModule {
* constructor(router: Router, viewportScroller: ViewportScroller) {
* router.events.pipe(
* filter((e: Event): e is Scroll => e instanceof Scroll)
* ).subscribe(e => {
* if (e.position) {
* // backward navigation
* viewportScroller.scrollToPosition(e.position);
* } else if (e.anchor) {
* // anchor navigation
* viewportScroller.scrollToAnchor(e.anchor);
* } else {
* // forward navigation
* viewportScroller.scrollToPosition([0, 0]);
* }
* });
* }
* }
* ```
*/
scrollPositionRestoration?: 'disabled'|'enabled'|'top';
/**
* When set to 'enabled', scrolls to the anchor element when the URL has a fragment.
* Anchor scrolling is disabled by default.
*
* Anchor scrolling does not happen on 'popstate'. Instead, we restore the position
* that we stored or scroll to the top.
*/
anchorScrolling?: 'disabled'|'enabled';
/**
* Configures the scroll offset the router will use when scrolling to an element.
*
* When given a tuple with x and y position value,
* the router uses that offset each time it scrolls.
* When given a function, the router invokes the function every time
* it restores scroll position.
*/
scrollOffset?: [number, number]|(() => [number, number]);
/**
* Defines how the router merges parameters, data, and resolved data from parent to child
* routes. By default ('emptyOnly'), inherits parent parameters only for
* path-less or component-less routes.
*
* Set to 'always' to enable unconditional inheritance of parent parameters.
*
* Note that when dealing with matrix parameters, "parent" refers to the parent `Route`
* config which does not necessarily mean the "URL segment to the left". When the `Route` `path`
* contains multiple segments, the matrix parameters must appear on the last segment. For example,
* matrix parameters for `{path: 'a/b', component: MyComp}` should appear as `a/b;foo=bar` and not
* `a;foo=bar/b`.
*
*/
paramsInheritanceStrategy?: 'emptyOnly'|'always';
/**
* A custom handler for malformed URI errors. The handler is invoked when `encodedURI` contains
* invalid character sequences.
* The default implementation is to redirect to the root URL, dropping
* any path or parameter information. The function takes three parameters:
*
* - `'URIError'` - Error thrown when parsing a bad URL.
* - `'UrlSerializer'` - UrlSerializer that’s configured with the router.
* - `'url'` - The malformed URL that caused the URIError
* */
malformedUriErrorHandler?:
(error: URIError, urlSerializer: UrlSerializer, url: string) => UrlTree;
/**
* Defines when the router updates the browser URL. By default ('deferred'),
* update after successful navigation.
* Set to 'eager' if prefer to update the URL at the beginning of navigation.
* Updating the URL early allows you to handle a failure of navigation by
* showing an error message with the URL that failed.
*/
urlUpdateStrategy?: 'deferred'|'eager';
/**
* Enables a bug fix that corrects relative link resolution in components with empty paths.
* Example:
*
* ```
* const routes = [
* {
* path: '',
* component: ContainerComponent,
* children: [
* { path: 'a', component: AComponent },
* { path: 'b', component: BComponent },
* ]
* }
* ];
* ```
*
* From the `ContainerComponent`, you should be able to navigate to `AComponent` using
* the following `routerLink`, but it will not work if `relativeLinkResolution` is set
* to `'legacy'`:
*
* `<a [routerLink]="['./a']">Link to A</a>`
*
* However, this will work:
*
* `<a [routerLink]="['../a']">Link to A</a>`
*
* In other words, you're required to use `../` rather than `./` when the relative link
* resolution is set to `'legacy'`.
*
* The default in v11 is `corrected`.
*/
relativeLinkResolution?: 'legacy'|'corrected';
/**
* Configures how the Router attempts to restore state when a navigation is cancelled.
*
* 'replace' - Always uses `location.replaceState` to set the browser state to the state of the
* router before the navigation started. This means that if the URL of the browser is updated
* _before_ the navigation is canceled, the Router will simply replace the item in history rather
* than trying to restore to the previous location in the session history. This happens most
* frequently with `urlUpdateStrategy: 'eager'` and navigations with the browser back/forward
* buttons.
*
* 'computed' - Will attempt to return to the same index in the session history that corresponds
* to the Angular route when the navigation gets cancelled. For example, if the browser back
* button is clicked and the navigation is cancelled, the Router will trigger a forward navigation
* and vice versa.
*
* Note: the 'computed' option is incompatible with any `UrlHandlingStrategy` which only
* handles a portion of the URL because the history restoration navigates to the previous place in
* the browser history rather than simply resetting a portion of the URL.
*
* The default value is `replace` when not set.
*/
canceledNavigationResolution?: 'replace'|'computed';
}
export function setupRouter(
urlSerializer: UrlSerializer, contexts: ChildrenOutletContexts, location: Location,
injector: Injector, compiler: Compiler, config: Route[][], opts: ExtraOptions = {},
urlHandlingStrategy?: UrlHandlingStrategy, routeReuseStrategy?: RouteReuseStrategy) {
const router =
new Router(null, urlSerializer, contexts, location, injector, compiler, flatten(config));
if (urlHandlingStrategy) {
router.urlHandlingStrategy = urlHandlingStrategy;
}
if (routeReuseStrategy) {
router.routeReuseStrategy = routeReuseStrategy;
}
assignExtraOptionsToRouter(opts, router);
if (opts.enableTracing) {
router.events.subscribe((e: Event) => {
// tslint:disable:no-console
console.group?.(`Router Event: ${(<any>e.constructor).name}`);
console.log(e.toString());
console.log(e);
console.groupEnd?.();
// tslint:enable:no-console
});
}
return router;
}
export function assignExtraOptionsToRouter(opts: ExtraOptions, router: Router): void {
if (opts.errorHandler) {
router.errorHandler = opts.errorHandler;
}
if (opts.malformedUriErrorHandler) {
router.malformedUriErrorHandler = opts.malformedUriErrorHandler;
}
if (opts.onSameUrlNavigation) {
router.onSameUrlNavigation = opts.onSameUrlNavigation;
}
if (opts.paramsInheritanceStrategy) {
router.paramsInheritanceStrategy = opts.paramsInheritanceStrategy;
}
if (opts.relativeLinkResolution) {
router.relativeLinkResolution = opts.relativeLinkResolution;
}
if (opts.urlUpdateStrategy) {
router.urlUpdateStrategy = opts.urlUpdateStrategy;
}
if (opts.canceledNavigationResolution) {
router.canceledNavigationResolution = opts.canceledNavigationResolution;
}
}
export function rootRoute(router: Router): ActivatedRoute {
return router.routerState.root;
}
/**
* Router initialization requires two steps:
*
* First, we start the navigation in a `APP_INITIALIZER` to block the bootstrap if
* a resolver or a guard executes asynchronously.
*
* Next, we actually run activation in a `BOOTSTRAP_LISTENER`, using the
* `afterPreactivation` hook provided by the router.
* The router navigation starts, reaches the point when preactivation is done, and then
* pauses. It waits for the hook to be resolved. We then resolve it only in a bootstrap listener.
*/
@Injectable()
export class RouterInitializer implements OnDestroy {
private initNavigation = false;
private destroyed = false;
private resultOfPreactivationDone = new Subject<void>();
constructor(private injector: Injector) {}
appInitializer(): Promise<any> {
const p: Promise<any> = this.injector.get(LOCATION_INITIALIZED, Promise.resolve(null));
return p.then(() => {
// If the injector was destroyed, the DI lookups below will fail.
if (this.destroyed) {
return Promise.resolve(true);
}
let resolve: Function = null!;
const res = new Promise(r => resolve = r);
const router = this.injector.get(Router);
const opts = this.injector.get(ROUTER_CONFIGURATION);
if (opts.initialNavigation === 'disabled') {
router.setUpLocationChangeListener();
resolve(true);
} else if (
// TODO: enabled is deprecated as of v11, can be removed in v13
opts.initialNavigation === 'enabled' || opts.initialNavigation === 'enabledBlocking') {
router.hooks.afterPreactivation = () => {
// only the initial navigation should be delayed
if (!this.initNavigation) {
this.initNavigation = true;
resolve(true);
return this.resultOfPreactivationDone;
// subsequent navigations should not be delayed
} else {
return of(null) as any;
}
};
router.initialNavigation();
} else {
resolve(true);
}
return res;
});
}
bootstrapListener(bootstrappedComponentRef: ComponentRef<any>): void {
const opts = this.injector.get(ROUTER_CONFIGURATION);
const preloader = this.injector.get(RouterPreloader);
const routerScroller = this.injector.get(RouterScroller);
const router = this.injector.get(Router);
const ref = this.injector.get<ApplicationRef>(ApplicationRef);
if (bootstrappedComponentRef !== ref.components[0]) {
return;
}
// Default case
if (opts.initialNavigation === 'enabledNonBlocking' || opts.initialNavigation === undefined) {
router.initialNavigation();
}
preloader.setUpPreloading();
routerScroller.init();
router.resetRootComponentType(ref.componentTypes[0]);
this.resultOfPreactivationDone.next(null!);
this.resultOfPreactivationDone.complete();
}
ngOnDestroy() {
this.destroyed = true;
}
}
export function ge | : RouterInitializer) {
return r.appInitializer.bind(r);
}
export function getBootstrapListener(r: RouterInitializer) {
return r.bootstrapListener.bind(r);
}
/**
* A [DI token](guide/glossary/#di-token) for the router initializer that
* is called after the app is bootstrapped.
*
* @publicApi
*/
export const ROUTER_INITIALIZER =
new InjectionToken<(compRef: ComponentRef<any>) => void>('Router Initializer');
export function provideRouterInitializer(): ReadonlyArray<Provider> {
return [
RouterInitializer,
{
provide: APP_INITIALIZER,
multi: true,
useFactory: getAppInitializer,
deps: [RouterInitializer]
},
{provide: ROUTER_INITIALIZER, useFactory: getBootstrapListener, deps: [RouterInitializer]},
{provide: APP_BOOTSTRAP_LISTENER, multi: true, useExisting: ROUTER_INITIALIZER},
];
}
| tAppInitializer(r | identifier_name |
router_module.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {APP_BASE_HREF, HashLocationStrategy, Location, LOCATION_INITIALIZED, LocationStrategy, PathLocationStrategy, PlatformLocation, ViewportScroller} from '@angular/common';
import {ANALYZE_FOR_ENTRY_COMPONENTS, APP_BOOTSTRAP_LISTENER, APP_INITIALIZER, ApplicationRef, Compiler, ComponentRef, Inject, Injectable, InjectionToken, Injector, ModuleWithProviders, NgModule, NgProbeToken, OnDestroy, Optional, Provider, SkipSelf} from '@angular/core';
import {of, Subject} from 'rxjs';
import {EmptyOutletComponent} from './components/empty_outlet';
import {Route, Routes} from './config';
import {RouterLink, RouterLinkWithHref} from './directives/router_link';
import {RouterLinkActive} from './directives/router_link_active';
import {RouterOutlet} from './directives/router_outlet';
import {Event} from './events';
import {RouteReuseStrategy} from './route_reuse_strategy';
import {ErrorHandler, Router} from './router';
import {ROUTES} from './router_config_loader';
import {ChildrenOutletContexts} from './router_outlet_context';
import {NoPreloading, PreloadAllModules, PreloadingStrategy, RouterPreloader} from './router_preloader';
import {RouterScroller} from './router_scroller';
import {ActivatedRoute} from './router_state';
import {UrlHandlingStrategy} from './url_handling_strategy';
import {DefaultUrlSerializer, UrlSerializer, UrlTree} from './url_tree';
import {flatten} from './utils/collection';
/**
* The directives defined in the `RouterModule`.
*/
const ROUTER_DIRECTIVES =
[RouterOutlet, RouterLink, RouterLinkWithHref, RouterLinkActive, EmptyOutletComponent];
/**
* A [DI token](guide/glossary/#di-token) for the router service.
*
* @publicApi
*/
export const ROUTER_CONFIGURATION = new InjectionToken<ExtraOptions>('ROUTER_CONFIGURATION');
/**
* @docsNotRequired
*/
export const ROUTER_FORROOT_GUARD = new InjectionToken<void>('ROUTER_FORROOT_GUARD');
export const ROUTER_PROVIDERS: Provider[] = [
Location,
{provide: UrlSerializer, useClass: DefaultUrlSerializer},
{
provide: Router,
useFactory: setupRouter,
deps: [
UrlSerializer, ChildrenOutletContexts, Location, Injector, Compiler, ROUTES,
ROUTER_CONFIGURATION, [UrlHandlingStrategy, new Optional()],
[RouteReuseStrategy, new Optional()]
]
},
ChildrenOutletContexts,
{provide: ActivatedRoute, useFactory: rootRoute, deps: [Router]},
RouterPreloader,
NoPreloading,
PreloadAllModules,
{provide: ROUTER_CONFIGURATION, useValue: {enableTracing: false}},
];
export function routerNgProbeToken() {
return new NgProbeToken('Router', Router);
}
/**
* @description
*
* Adds directives and providers for in-app navigation among views defined in an application.
* Use the Angular `Router` service to declaratively specify application states and manage state
* transitions.
*
* You can import this NgModule multiple times, once for each lazy-loaded bundle.
* However, only one `Router` service can be active.
* To ensure this, there are two ways to register routes when importing this module:
*
* * The `forRoot()` method creates an `NgModule` that contains all the directives, the given
* routes, and the `Router` service itself.
* * The `forChild()` method creates an `NgModule` that contains all the directives and the given
* routes, but does not include the `Router` service.
*
* @see [Routing and Navigation guide](guide/router) for an
* overview of how the `Router` service should be used.
*
* @publicApi
*/
@NgModule({
declarations: ROUTER_DIRECTIVES,
exports: ROUTER_DIRECTIVES,
})
export class RouterModule {
// Note: We are injecting the Router so it gets created eagerly...
constructor(@Optional() @Inject(ROUTER_FORROOT_GUARD) guard: any, @Optional() router: Router) {}
/**
* Creates and configures a module with all the router providers and directives.
* Optionally sets up an application listener to perform an initial navigation.
*
* When registering the NgModule at the root, import as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forRoot(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the application.
* @param config An `ExtraOptions` configuration object that controls how navigation is performed.
* @return The new `NgModule`.
*
*/
static forRoot(routes: Routes, config?: ExtraOptions): ModuleWithProviders<RouterModule> {
return {
ngModule: RouterModule,
providers: [
ROUTER_PROVIDERS,
provideRoutes(routes),
{
provide: ROUTER_FORROOT_GUARD,
useFactory: provideForRootGuard,
deps: [[Router, new Optional(), new SkipSelf()]]
},
{provide: ROUTER_CONFIGURATION, useValue: config ? config : {}},
{
provide: LocationStrategy,
useFactory: provideLocationStrategy,
deps:
[PlatformLocation, [new Inject(APP_BASE_HREF), new Optional()], ROUTER_CONFIGURATION]
},
{
provide: RouterScroller,
useFactory: createRouterScroller,
deps: [Router, ViewportScroller, ROUTER_CONFIGURATION]
},
{
provide: PreloadingStrategy,
useExisting: config && config.preloadingStrategy ? config.preloadingStrategy :
NoPreloading
},
{provide: NgProbeToken, multi: true, useFactory: routerNgProbeToken},
provideRouterInitializer(),
],
};
}
/**
* Creates a module with all the router directives and a provider registering routes,
* without creating a new Router service.
* When registering for submodules and lazy-loaded submodules, create the NgModule as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the submodule.
* @return The new NgModule.
*
*/
static forChild(routes: Routes): ModuleWithProviders<RouterModule> {
return {ngModule: RouterModule, providers: [provideRoutes(routes)]};
}
}
export function createRouterScroller(
router: Router, viewportScroller: ViewportScroller, config: ExtraOptions): RouterScroller {
if (config.scrollOffset) {
viewportScroller.setOffset(config.scrollOffset);
}
return new RouterScroller(router, viewportScroller, config);
}
export function provideLocationStrategy(
platformLocationStrategy: PlatformLocation, baseHref: string, options: ExtraOptions = {}) {
return options.useHash ? new HashLocationStrategy(platformLocationStrategy, baseHref) :
new PathLocationStrategy(platformLocationStrategy, baseHref);
}
export function provideForRootGuard(router: Router): any {
if ((typeof ngDevMode === 'undefined' || ngDevMode) && router) {
throw new Error(
`RouterModule.forRoot() called twice. Lazy loaded modules should use RouterModule.forChild() instead.`);
}
return 'guarded';
}
/**
* Registers a [DI provider](guide/glossary#provider) for a set of routes.
* @param routes The route configuration to provide.
*
* @usageNotes
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)],
* providers: [provideRoutes(EXTRA_ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @publicApi
*/
export function provideRoutes(routes: Routes): any {
return [
{provide: ANALYZE_FOR_ENTRY_COMPONENTS, multi: true, useValue: routes},
{provide: ROUTES, multi: true, useValue: routes},
];
}
/**
* Allowed values in an `ExtraOptions` object that configure
* when the router performs the initial navigation operation.
*
* * 'enabledNonBlocking' - (default) The initial navigation starts after the
* root component has been created. The bootstrap is not blocked on the completion of the initial
* navigation.
* * 'enabledBlocking' - The initial navigation starts before the root component is created.
* The bootstrap is blocked until the initial navigation is complete. This value is required
* for [server-side rendering](guide/universal) to work.
* * 'disabled' - The initial navigation is not performed. The location listener is set up before
* the root component gets created. Use if there is a reason to have
* more control over when the router starts its initial navigation due to some complex
* initialization logic.
*
* The following values have been [deprecated](guide/releases#deprecation-practices) since v11,
* and should not be used for new applications.
*
* * 'enabled' - This option is 1:1 replaceable with `enabledBlocking`.
*
* @see `forRoot()`
*
* @publicApi
*/
export type InitialNavigation = 'disabled'|'enabled'|'enabledBlocking'|'enabledNonBlocking';
/**
* A set of configuration options for a router module, provided in the
* `forRoot()` method.
*
* @see `forRoot()`
*
*
* @publicApi
*/
export interface ExtraOptions {
/**
* When true, log all internal navigation events to the console.
* Use for debugging.
*/
enableTracing?: boolean;
/**
* When true, enable the location strategy that uses the URL fragment
* instead of the history API.
*/
useHash?: boolean;
/**
* One of `enabled`, `enabledBlocking`, `enabledNonBlocking` or `disabled`.
* When set to `enabled` or `enabledBlocking`, the initial navigation starts before the root
* component is created. The bootstrap is blocked until the initial navigation is complete. This
* value is required for [server-side rendering](guide/universal) to work. When set to
* `enabledNonBlocking`, the initial navigation starts after the root component has been created.
* The bootstrap is not blocked on the completion of the initial navigation. When set to
* `disabled`, the initial navigation is not performed. The location listener is set up before the
* root component gets created. Use if there is a reason to have more control over when the router
* starts its initial navigation due to some complex initialization logic.
*/
initialNavigation?: InitialNavigation;
/**
* A custom error handler for failed navigations.
* If the handler returns a value, the navigation Promise is resolved with this value.
* If the handler throws an exception, the navigation Promise is rejected with the exception.
*
*/
errorHandler?: ErrorHandler;
/**
* Configures a preloading strategy.
* One of `PreloadAllModules` or `NoPreloading` (the default).
*/
preloadingStrategy?: any;
/**
* Define what the router should do if it receives a navigation request to the current URL.
* Default is `ignore`, which causes the router ignores the navigation.
* This can disable features such as a "refresh" button.
* Use this option to configure the behavior when navigating to the
* current URL. Default is 'ignore'.
*/
onSameUrlNavigation?: 'reload'|'ignore';
/**
* Configures if the scroll position needs to be restored when navigating back.
*
* * 'disabled'- (Default) Does nothing. Scroll position is maintained on navigation.
* * 'top'- Sets the scroll position to x = 0, y = 0 on all navigation.
* * 'enabled'- Restores the previous scroll position on backward navigation, else sets the
* position to the anchor if one is provided, or sets the scroll position to [0, 0] (forward
* navigation). This option will be the default in the future.
*
* You can implement custom scroll restoration behavior by adapting the enabled behavior as
* in the following example.
*
* ```typescript
* class AppModule {
* constructor(router: Router, viewportScroller: ViewportScroller) {
* router.events.pipe(
* filter((e: Event): e is Scroll => e instanceof Scroll)
* ).subscribe(e => {
* if (e.position) {
* // backward navigation
* viewportScroller.scrollToPosition(e.position);
* } else if (e.anchor) {
* // anchor navigation
* viewportScroller.scrollToAnchor(e.anchor);
* } else {
* // forward navigation
* viewportScroller.scrollToPosition([0, 0]);
* }
* });
* }
* }
* ```
*/
scrollPositionRestoration?: 'disabled'|'enabled'|'top';
/**
* When set to 'enabled', scrolls to the anchor element when the URL has a fragment.
* Anchor scrolling is disabled by default.
*
* Anchor scrolling does not happen on 'popstate'. Instead, we restore the position
* that we stored or scroll to the top.
*/
anchorScrolling?: 'disabled'|'enabled';
/**
* Configures the scroll offset the router will use when scrolling to an element.
*
* When given a tuple with x and y position value,
* the router uses that offset each time it scrolls.
* When given a function, the router invokes the function every time
* it restores scroll position.
*/
scrollOffset?: [number, number]|(() => [number, number]);
/**
* Defines how the router merges parameters, data, and resolved data from parent to child
* routes. By default ('emptyOnly'), inherits parent parameters only for
* path-less or component-less routes.
*
* Set to 'always' to enable unconditional inheritance of parent parameters.
*
* Note that when dealing with matrix parameters, "parent" refers to the parent `Route`
* config which does not necessarily mean the "URL segment to the left". When the `Route` `path`
* contains multiple segments, the matrix parameters must appear on the last segment. For example,
* matrix parameters for `{path: 'a/b', component: MyComp}` should appear as `a/b;foo=bar` and not
* `a;foo=bar/b`.
*
*/
paramsInheritanceStrategy?: 'emptyOnly'|'always';
/**
* A custom handler for malformed URI errors. The handler is invoked when `encodedURI` contains
* invalid character sequences.
* The default implementation is to redirect to the root URL, dropping
* any path or parameter information. The function takes three parameters:
*
* - `'URIError'` - Error thrown when parsing a bad URL.
* - `'UrlSerializer'` - UrlSerializer that’s configured with the router.
* - `'url'` - The malformed URL that caused the URIError
* */
malformedUriErrorHandler?:
(error: URIError, urlSerializer: UrlSerializer, url: string) => UrlTree;
/**
* Defines when the router updates the browser URL. By default ('deferred'),
* update after successful navigation.
* Set to 'eager' if prefer to update the URL at the beginning of navigation.
* Updating the URL early allows you to handle a failure of navigation by
* showing an error message with the URL that failed.
*/
urlUpdateStrategy?: 'deferred'|'eager';
/**
* Enables a bug fix that corrects relative link resolution in components with empty paths.
* Example:
*
* ```
* const routes = [
* {
* path: '',
* component: ContainerComponent,
* children: [
* { path: 'a', component: AComponent },
* { path: 'b', component: BComponent },
* ]
* }
* ];
* ```
*
* From the `ContainerComponent`, you should be able to navigate to `AComponent` using
* the following `routerLink`, but it will not work if `relativeLinkResolution` is set
* to `'legacy'`:
*
* `<a [routerLink]="['./a']">Link to A</a>`
*
* However, this will work:
*
* `<a [routerLink]="['../a']">Link to A</a>`
*
* In other words, you're required to use `../` rather than `./` when the relative link
* resolution is set to `'legacy'`.
*
* The default in v11 is `corrected`.
*/
relativeLinkResolution?: 'legacy'|'corrected';
/**
* Configures how the Router attempts to restore state when a navigation is cancelled.
*
* 'replace' - Always uses `location.replaceState` to set the browser state to the state of the
* router before the navigation started. This means that if the URL of the browser is updated
* _before_ the navigation is canceled, the Router will simply replace the item in history rather
* than trying to restore to the previous location in the session history. This happens most
* frequently with `urlUpdateStrategy: 'eager'` and navigations with the browser back/forward
* buttons.
*
* 'computed' - Will attempt to return to the same index in the session history that corresponds
* to the Angular route when the navigation gets cancelled. For example, if the browser back
* button is clicked and the navigation is cancelled, the Router will trigger a forward navigation
* and vice versa.
*
* Note: the 'computed' option is incompatible with any `UrlHandlingStrategy` which only
* handles a portion of the URL because the history restoration navigates to the previous place in
* the browser history rather than simply resetting a portion of the URL.
*
* The default value is `replace` when not set.
*/
canceledNavigationResolution?: 'replace'|'computed';
}
export function setupRouter(
urlSerializer: UrlSerializer, contexts: ChildrenOutletContexts, location: Location,
injector: Injector, compiler: Compiler, config: Route[][], opts: ExtraOptions = {},
urlHandlingStrategy?: UrlHandlingStrategy, routeReuseStrategy?: RouteReuseStrategy) {
const router =
new Router(null, urlSerializer, contexts, location, injector, compiler, flatten(config));
if (urlHandlingStrategy) {
router.urlHandlingStrategy = urlHandlingStrategy;
}
if (routeReuseStrategy) {
router.routeReuseStrategy = routeReuseStrategy;
}
assignExtraOptionsToRouter(opts, router);
if (opts.enableTracing) {
router.events.subscribe((e: Event) => {
// tslint:disable:no-console
console.group?.(`Router Event: ${(<any>e.constructor).name}`);
console.log(e.toString());
console.log(e);
console.groupEnd?.();
// tslint:enable:no-console
});
}
return router;
}
export function assignExtraOptionsToRouter(opts: ExtraOptions, router: Router): void {
if (opts.errorHandler) {
router.errorHandler = opts.errorHandler;
}
if (opts.malformedUriErrorHandler) {
router.malformedUriErrorHandler = opts.malformedUriErrorHandler;
}
if (opts.onSameUrlNavigation) {
router.onSameUrlNavigation = opts.onSameUrlNavigation;
}
if (opts.paramsInheritanceStrategy) {
router.paramsInheritanceStrategy = opts.paramsInheritanceStrategy;
}
if (opts.relativeLinkResolution) {
router.relativeLinkResolution = opts.relativeLinkResolution;
}
if (opts.urlUpdateStrategy) {
router.urlUpdateStrategy = opts.urlUpdateStrategy;
}
if (opts.canceledNavigationResolution) {
router.canceledNavigationResolution = opts.canceledNavigationResolution;
}
}
export function rootRoute(router: Router): ActivatedRoute {
return router.routerState.root;
}
/**
* Router initialization requires two steps:
*
* First, we start the navigation in a `APP_INITIALIZER` to block the bootstrap if
* a resolver or a guard executes asynchronously.
*
* Next, we actually run activation in a `BOOTSTRAP_LISTENER`, using the
* `afterPreactivation` hook provided by the router.
* The router navigation starts, reaches the point when preactivation is done, and then | private destroyed = false;
private resultOfPreactivationDone = new Subject<void>();
constructor(private injector: Injector) {}
appInitializer(): Promise<any> {
const p: Promise<any> = this.injector.get(LOCATION_INITIALIZED, Promise.resolve(null));
return p.then(() => {
// If the injector was destroyed, the DI lookups below will fail.
if (this.destroyed) {
return Promise.resolve(true);
}
let resolve: Function = null!;
const res = new Promise(r => resolve = r);
const router = this.injector.get(Router);
const opts = this.injector.get(ROUTER_CONFIGURATION);
if (opts.initialNavigation === 'disabled') {
router.setUpLocationChangeListener();
resolve(true);
} else if (
// TODO: enabled is deprecated as of v11, can be removed in v13
opts.initialNavigation === 'enabled' || opts.initialNavigation === 'enabledBlocking') {
router.hooks.afterPreactivation = () => {
// only the initial navigation should be delayed
if (!this.initNavigation) {
this.initNavigation = true;
resolve(true);
return this.resultOfPreactivationDone;
// subsequent navigations should not be delayed
} else {
return of(null) as any;
}
};
router.initialNavigation();
} else {
resolve(true);
}
return res;
});
}
bootstrapListener(bootstrappedComponentRef: ComponentRef<any>): void {
const opts = this.injector.get(ROUTER_CONFIGURATION);
const preloader = this.injector.get(RouterPreloader);
const routerScroller = this.injector.get(RouterScroller);
const router = this.injector.get(Router);
const ref = this.injector.get<ApplicationRef>(ApplicationRef);
if (bootstrappedComponentRef !== ref.components[0]) {
return;
}
// Default case
if (opts.initialNavigation === 'enabledNonBlocking' || opts.initialNavigation === undefined) {
router.initialNavigation();
}
preloader.setUpPreloading();
routerScroller.init();
router.resetRootComponentType(ref.componentTypes[0]);
this.resultOfPreactivationDone.next(null!);
this.resultOfPreactivationDone.complete();
}
ngOnDestroy() {
this.destroyed = true;
}
}
export function getAppInitializer(r: RouterInitializer) {
return r.appInitializer.bind(r);
}
export function getBootstrapListener(r: RouterInitializer) {
return r.bootstrapListener.bind(r);
}
/**
* A [DI token](guide/glossary/#di-token) for the router initializer that
* is called after the app is bootstrapped.
*
* @publicApi
*/
export const ROUTER_INITIALIZER =
new InjectionToken<(compRef: ComponentRef<any>) => void>('Router Initializer');
export function provideRouterInitializer(): ReadonlyArray<Provider> {
return [
RouterInitializer,
{
provide: APP_INITIALIZER,
multi: true,
useFactory: getAppInitializer,
deps: [RouterInitializer]
},
{provide: ROUTER_INITIALIZER, useFactory: getBootstrapListener, deps: [RouterInitializer]},
{provide: APP_BOOTSTRAP_LISTENER, multi: true, useExisting: ROUTER_INITIALIZER},
];
} | * pauses. It waits for the hook to be resolved. We then resolve it only in a bootstrap listener.
*/
@Injectable()
export class RouterInitializer implements OnDestroy {
private initNavigation = false; | random_line_split |
router_module.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {APP_BASE_HREF, HashLocationStrategy, Location, LOCATION_INITIALIZED, LocationStrategy, PathLocationStrategy, PlatformLocation, ViewportScroller} from '@angular/common';
import {ANALYZE_FOR_ENTRY_COMPONENTS, APP_BOOTSTRAP_LISTENER, APP_INITIALIZER, ApplicationRef, Compiler, ComponentRef, Inject, Injectable, InjectionToken, Injector, ModuleWithProviders, NgModule, NgProbeToken, OnDestroy, Optional, Provider, SkipSelf} from '@angular/core';
import {of, Subject} from 'rxjs';
import {EmptyOutletComponent} from './components/empty_outlet';
import {Route, Routes} from './config';
import {RouterLink, RouterLinkWithHref} from './directives/router_link';
import {RouterLinkActive} from './directives/router_link_active';
import {RouterOutlet} from './directives/router_outlet';
import {Event} from './events';
import {RouteReuseStrategy} from './route_reuse_strategy';
import {ErrorHandler, Router} from './router';
import {ROUTES} from './router_config_loader';
import {ChildrenOutletContexts} from './router_outlet_context';
import {NoPreloading, PreloadAllModules, PreloadingStrategy, RouterPreloader} from './router_preloader';
import {RouterScroller} from './router_scroller';
import {ActivatedRoute} from './router_state';
import {UrlHandlingStrategy} from './url_handling_strategy';
import {DefaultUrlSerializer, UrlSerializer, UrlTree} from './url_tree';
import {flatten} from './utils/collection';
/**
* The directives defined in the `RouterModule`.
*/
const ROUTER_DIRECTIVES =
[RouterOutlet, RouterLink, RouterLinkWithHref, RouterLinkActive, EmptyOutletComponent];
/**
* A [DI token](guide/glossary/#di-token) for the router service.
*
* @publicApi
*/
export const ROUTER_CONFIGURATION = new InjectionToken<ExtraOptions>('ROUTER_CONFIGURATION');
/**
* @docsNotRequired
*/
export const ROUTER_FORROOT_GUARD = new InjectionToken<void>('ROUTER_FORROOT_GUARD');
export const ROUTER_PROVIDERS: Provider[] = [
Location,
{provide: UrlSerializer, useClass: DefaultUrlSerializer},
{
provide: Router,
useFactory: setupRouter,
deps: [
UrlSerializer, ChildrenOutletContexts, Location, Injector, Compiler, ROUTES,
ROUTER_CONFIGURATION, [UrlHandlingStrategy, new Optional()],
[RouteReuseStrategy, new Optional()]
]
},
ChildrenOutletContexts,
{provide: ActivatedRoute, useFactory: rootRoute, deps: [Router]},
RouterPreloader,
NoPreloading,
PreloadAllModules,
{provide: ROUTER_CONFIGURATION, useValue: {enableTracing: false}},
];
export function routerNgProbeToken() {
return new NgProbeToken('Router', Router);
}
/**
* @description
*
* Adds directives and providers for in-app navigation among views defined in an application.
* Use the Angular `Router` service to declaratively specify application states and manage state
* transitions.
*
* You can import this NgModule multiple times, once for each lazy-loaded bundle.
* However, only one `Router` service can be active.
* To ensure this, there are two ways to register routes when importing this module:
*
* * The `forRoot()` method creates an `NgModule` that contains all the directives, the given
* routes, and the `Router` service itself.
* * The `forChild()` method creates an `NgModule` that contains all the directives and the given
* routes, but does not include the `Router` service.
*
* @see [Routing and Navigation guide](guide/router) for an
* overview of how the `Router` service should be used.
*
* @publicApi
*/
@NgModule({
declarations: ROUTER_DIRECTIVES,
exports: ROUTER_DIRECTIVES,
})
export class RouterModule {
// Note: We are injecting the Router so it gets created eagerly...
constructor(@Optional() @Inject(ROUTER_FORROOT_GUARD) guard: any, @Optional() router: Router) {}
/**
* Creates and configures a module with all the router providers and directives.
* Optionally sets up an application listener to perform an initial navigation.
*
* When registering the NgModule at the root, import as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forRoot(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the application.
* @param config An `ExtraOptions` configuration object that controls how navigation is performed.
* @return The new `NgModule`.
*
*/
static forRoot(routes: Routes, config?: ExtraOptions): ModuleWithProviders<RouterModule> {
return {
ngModule: RouterModule,
providers: [
ROUTER_PROVIDERS,
provideRoutes(routes),
{
provide: ROUTER_FORROOT_GUARD,
useFactory: provideForRootGuard,
deps: [[Router, new Optional(), new SkipSelf()]]
},
{provide: ROUTER_CONFIGURATION, useValue: config ? config : {}},
{
provide: LocationStrategy,
useFactory: provideLocationStrategy,
deps:
[PlatformLocation, [new Inject(APP_BASE_HREF), new Optional()], ROUTER_CONFIGURATION]
},
{
provide: RouterScroller,
useFactory: createRouterScroller,
deps: [Router, ViewportScroller, ROUTER_CONFIGURATION]
},
{
provide: PreloadingStrategy,
useExisting: config && config.preloadingStrategy ? config.preloadingStrategy :
NoPreloading
},
{provide: NgProbeToken, multi: true, useFactory: routerNgProbeToken},
provideRouterInitializer(),
],
};
}
/**
* Creates a module with all the router directives and a provider registering routes,
* without creating a new Router service.
* When registering for submodules and lazy-loaded submodules, create the NgModule as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the submodule.
* @return The new NgModule.
*
*/
static forChild(routes: Routes): ModuleWithProviders<RouterModule> {
return {ngModule: RouterModule, providers: [provideRoutes(routes)]};
}
}
export function createRouterScroller(
router: Router, viewportScroller: ViewportScroller, config: ExtraOptions): RouterScroller {
if (config.scrollOffset) {
viewportScroller.setOffset(config.scrollOffset);
}
return new RouterScroller(router, viewportScroller, config);
}
export function provideLocationStrategy(
platformLocationStrategy: PlatformLocation, baseHref: string, options: ExtraOptions = {}) {
return options.useHash ? new HashLocationStrategy(platformLocationStrategy, baseHref) :
new PathLocationStrategy(platformLocationStrategy, baseHref);
}
export function provideForRootGuard(router: Router): any {
if ((typeof ngDevMode === 'undefined' || ngDevMode) && router) {
throw new Error(
`RouterModule.forRoot() called twice. Lazy loaded modules should use RouterModule.forChild() instead.`);
}
return 'guarded';
}
/**
* Registers a [DI provider](guide/glossary#provider) for a set of routes.
* @param routes The route configuration to provide.
*
* @usageNotes
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)],
* providers: [provideRoutes(EXTRA_ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @publicApi
*/
export function provideRoutes(routes: Routes): any {
return [
{provide: ANALYZE_FOR_ENTRY_COMPONENTS, multi: true, useValue: routes},
{provide: ROUTES, multi: true, useValue: routes},
];
}
/**
* Allowed values in an `ExtraOptions` object that configure
* when the router performs the initial navigation operation.
*
* * 'enabledNonBlocking' - (default) The initial navigation starts after the
* root component has been created. The bootstrap is not blocked on the completion of the initial
* navigation.
* * 'enabledBlocking' - The initial navigation starts before the root component is created.
* The bootstrap is blocked until the initial navigation is complete. This value is required
* for [server-side rendering](guide/universal) to work.
* * 'disabled' - The initial navigation is not performed. The location listener is set up before
* the root component gets created. Use if there is a reason to have
* more control over when the router starts its initial navigation due to some complex
* initialization logic.
*
* The following values have been [deprecated](guide/releases#deprecation-practices) since v11,
* and should not be used for new applications.
*
* * 'enabled' - This option is 1:1 replaceable with `enabledBlocking`.
*
* @see `forRoot()`
*
* @publicApi
*/
export type InitialNavigation = 'disabled'|'enabled'|'enabledBlocking'|'enabledNonBlocking';
/**
* A set of configuration options for a router module, provided in the
* `forRoot()` method.
*
* @see `forRoot()`
*
*
* @publicApi
*/
export interface ExtraOptions {
/**
* When true, log all internal navigation events to the console.
* Use for debugging.
*/
enableTracing?: boolean;
/**
* When true, enable the location strategy that uses the URL fragment
* instead of the history API.
*/
useHash?: boolean;
/**
* One of `enabled`, `enabledBlocking`, `enabledNonBlocking` or `disabled`.
* When set to `enabled` or `enabledBlocking`, the initial navigation starts before the root
* component is created. The bootstrap is blocked until the initial navigation is complete. This
* value is required for [server-side rendering](guide/universal) to work. When set to
* `enabledNonBlocking`, the initial navigation starts after the root component has been created.
* The bootstrap is not blocked on the completion of the initial navigation. When set to
* `disabled`, the initial navigation is not performed. The location listener is set up before the
* root component gets created. Use if there is a reason to have more control over when the router
* starts its initial navigation due to some complex initialization logic.
*/
initialNavigation?: InitialNavigation;
/**
* A custom error handler for failed navigations.
* If the handler returns a value, the navigation Promise is resolved with this value.
* If the handler throws an exception, the navigation Promise is rejected with the exception.
*
*/
errorHandler?: ErrorHandler;
/**
* Configures a preloading strategy.
* One of `PreloadAllModules` or `NoPreloading` (the default).
*/
preloadingStrategy?: any;
/**
* Define what the router should do if it receives a navigation request to the current URL.
* Default is `ignore`, which causes the router ignores the navigation.
* This can disable features such as a "refresh" button.
* Use this option to configure the behavior when navigating to the
* current URL. Default is 'ignore'.
*/
onSameUrlNavigation?: 'reload'|'ignore';
/**
* Configures if the scroll position needs to be restored when navigating back.
*
* * 'disabled'- (Default) Does nothing. Scroll position is maintained on navigation.
* * 'top'- Sets the scroll position to x = 0, y = 0 on all navigation.
* * 'enabled'- Restores the previous scroll position on backward navigation, else sets the
* position to the anchor if one is provided, or sets the scroll position to [0, 0] (forward
* navigation). This option will be the default in the future.
*
* You can implement custom scroll restoration behavior by adapting the enabled behavior as
* in the following example.
*
* ```typescript
* class AppModule {
* constructor(router: Router, viewportScroller: ViewportScroller) {
* router.events.pipe(
* filter((e: Event): e is Scroll => e instanceof Scroll)
* ).subscribe(e => {
* if (e.position) {
* // backward navigation
* viewportScroller.scrollToPosition(e.position);
* } else if (e.anchor) {
* // anchor navigation
* viewportScroller.scrollToAnchor(e.anchor);
* } else {
* // forward navigation
* viewportScroller.scrollToPosition([0, 0]);
* }
* });
* }
* }
* ```
*/
scrollPositionRestoration?: 'disabled'|'enabled'|'top';
/**
* When set to 'enabled', scrolls to the anchor element when the URL has a fragment.
* Anchor scrolling is disabled by default.
*
* Anchor scrolling does not happen on 'popstate'. Instead, we restore the position
* that we stored or scroll to the top.
*/
anchorScrolling?: 'disabled'|'enabled';
/**
* Configures the scroll offset the router will use when scrolling to an element.
*
* When given a tuple with x and y position value,
* the router uses that offset each time it scrolls.
* When given a function, the router invokes the function every time
* it restores scroll position.
*/
scrollOffset?: [number, number]|(() => [number, number]);
/**
* Defines how the router merges parameters, data, and resolved data from parent to child
* routes. By default ('emptyOnly'), inherits parent parameters only for
* path-less or component-less routes.
*
* Set to 'always' to enable unconditional inheritance of parent parameters.
*
* Note that when dealing with matrix parameters, "parent" refers to the parent `Route`
* config which does not necessarily mean the "URL segment to the left". When the `Route` `path`
* contains multiple segments, the matrix parameters must appear on the last segment. For example,
* matrix parameters for `{path: 'a/b', component: MyComp}` should appear as `a/b;foo=bar` and not
* `a;foo=bar/b`.
*
*/
paramsInheritanceStrategy?: 'emptyOnly'|'always';
/**
* A custom handler for malformed URI errors. The handler is invoked when `encodedURI` contains
* invalid character sequences.
* The default implementation is to redirect to the root URL, dropping
* any path or parameter information. The function takes three parameters:
*
* - `'URIError'` - Error thrown when parsing a bad URL.
* - `'UrlSerializer'` - UrlSerializer that’s configured with the router.
* - `'url'` - The malformed URL that caused the URIError
* */
malformedUriErrorHandler?:
(error: URIError, urlSerializer: UrlSerializer, url: string) => UrlTree;
/**
* Defines when the router updates the browser URL. By default ('deferred'),
* update after successful navigation.
* Set to 'eager' if prefer to update the URL at the beginning of navigation.
* Updating the URL early allows you to handle a failure of navigation by
* showing an error message with the URL that failed.
*/
urlUpdateStrategy?: 'deferred'|'eager';
/**
* Enables a bug fix that corrects relative link resolution in components with empty paths.
* Example:
*
* ```
* const routes = [
* {
* path: '',
* component: ContainerComponent,
* children: [
* { path: 'a', component: AComponent },
* { path: 'b', component: BComponent },
* ]
* }
* ];
* ```
*
* From the `ContainerComponent`, you should be able to navigate to `AComponent` using
* the following `routerLink`, but it will not work if `relativeLinkResolution` is set
* to `'legacy'`:
*
* `<a [routerLink]="['./a']">Link to A</a>`
*
* However, this will work:
*
* `<a [routerLink]="['../a']">Link to A</a>`
*
* In other words, you're required to use `../` rather than `./` when the relative link
* resolution is set to `'legacy'`.
*
* The default in v11 is `corrected`.
*/
relativeLinkResolution?: 'legacy'|'corrected';
/**
* Configures how the Router attempts to restore state when a navigation is cancelled.
*
* 'replace' - Always uses `location.replaceState` to set the browser state to the state of the
* router before the navigation started. This means that if the URL of the browser is updated
* _before_ the navigation is canceled, the Router will simply replace the item in history rather
* than trying to restore to the previous location in the session history. This happens most
* frequently with `urlUpdateStrategy: 'eager'` and navigations with the browser back/forward
* buttons.
*
* 'computed' - Will attempt to return to the same index in the session history that corresponds
* to the Angular route when the navigation gets cancelled. For example, if the browser back
* button is clicked and the navigation is cancelled, the Router will trigger a forward navigation
* and vice versa.
*
* Note: the 'computed' option is incompatible with any `UrlHandlingStrategy` which only
* handles a portion of the URL because the history restoration navigates to the previous place in
* the browser history rather than simply resetting a portion of the URL.
*
* The default value is `replace` when not set.
*/
canceledNavigationResolution?: 'replace'|'computed';
}
export function setupRouter(
urlSerializer: UrlSerializer, contexts: ChildrenOutletContexts, location: Location,
injector: Injector, compiler: Compiler, config: Route[][], opts: ExtraOptions = {},
urlHandlingStrategy?: UrlHandlingStrategy, routeReuseStrategy?: RouteReuseStrategy) {
const router =
new Router(null, urlSerializer, contexts, location, injector, compiler, flatten(config));
if (urlHandlingStrategy) {
router.urlHandlingStrategy = urlHandlingStrategy;
}
if (routeReuseStrategy) {
router.routeReuseStrategy = routeReuseStrategy;
}
assignExtraOptionsToRouter(opts, router);
if (opts.enableTracing) {
router.events.subscribe((e: Event) => {
// tslint:disable:no-console
console.group?.(`Router Event: ${(<any>e.constructor).name}`);
console.log(e.toString());
console.log(e);
console.groupEnd?.();
// tslint:enable:no-console
});
}
return router;
}
export function assignExtraOptionsToRouter(opts: ExtraOptions, router: Router): void {
if (opts.errorHandler) {
router.errorHandler = opts.errorHandler;
}
if (opts.malformedUriErrorHandler) {
router.malformedUriErrorHandler = opts.malformedUriErrorHandler;
}
if (opts.onSameUrlNavigation) {
router.onSameUrlNavigation = opts.onSameUrlNavigation;
}
if (opts.paramsInheritanceStrategy) {
router.paramsInheritanceStrategy = opts.paramsInheritanceStrategy;
}
if (opts.relativeLinkResolution) {
router.relativeLinkResolution = opts.relativeLinkResolution;
}
if (opts.urlUpdateStrategy) {
router.urlUpdateStrategy = opts.urlUpdateStrategy;
}
if (opts.canceledNavigationResolution) {
router.canceledNavigationResolution = opts.canceledNavigationResolution;
}
}
export function rootRoute(router: Router): ActivatedRoute {
return router.routerState.root;
}
/**
* Router initialization requires two steps:
*
* First, we start the navigation in a `APP_INITIALIZER` to block the bootstrap if
* a resolver or a guard executes asynchronously.
*
* Next, we actually run activation in a `BOOTSTRAP_LISTENER`, using the
* `afterPreactivation` hook provided by the router.
* The router navigation starts, reaches the point when preactivation is done, and then
* pauses. It waits for the hook to be resolved. We then resolve it only in a bootstrap listener.
*/
@Injectable()
export class RouterInitializer implements OnDestroy {
private initNavigation = false;
private destroyed = false;
private resultOfPreactivationDone = new Subject<void>();
constructor(private injector: Injector) {}
appInitializer(): Promise<any> {
const p: Promise<any> = this.injector.get(LOCATION_INITIALIZED, Promise.resolve(null));
return p.then(() => {
// If the injector was destroyed, the DI lookups below will fail.
if (this.destroyed) {
return Promise.resolve(true);
}
let resolve: Function = null!;
const res = new Promise(r => resolve = r);
const router = this.injector.get(Router);
const opts = this.injector.get(ROUTER_CONFIGURATION);
if (opts.initialNavigation === 'disabled') {
router.setUpLocationChangeListener();
resolve(true);
} else if (
// TODO: enabled is deprecated as of v11, can be removed in v13
opts.initialNavigation === 'enabled' || opts.initialNavigation === 'enabledBlocking') {
router.hooks.afterPreactivation = () => {
// only the initial navigation should be delayed
if (!this.initNavigation) {
this.initNavigation = true;
resolve(true);
return this.resultOfPreactivationDone;
// subsequent navigations should not be delayed
} else {
return of(null) as any;
}
};
router.initialNavigation();
} else {
resolve(true);
}
return res;
});
}
bootstrapListener(bootstrappedComponentRef: ComponentRef<any>): void {
const opts = this.injector.get(ROUTER_CONFIGURATION);
const preloader = this.injector.get(RouterPreloader);
const routerScroller = this.injector.get(RouterScroller);
const router = this.injector.get(Router);
const ref = this.injector.get<ApplicationRef>(ApplicationRef);
if (bootstrappedComponentRef !== ref.components[0]) {
return;
}
// Default case
if (opts.initialNavigation === 'enabledNonBlocking' || opts.initialNavigation === undefined) {
router.initialNavigation();
}
preloader.setUpPreloading();
routerScroller.init();
router.resetRootComponentType(ref.componentTypes[0]);
this.resultOfPreactivationDone.next(null!);
this.resultOfPreactivationDone.complete();
}
ngOnDestroy() {
this.destroyed = true;
}
}
export function getAppInitializer(r: RouterInitializer) {
return r.appInitializer.bind(r);
}
export function getBootstrapListener(r: RouterInitializer) {
| /**
* A [DI token](guide/glossary/#di-token) for the router initializer that
* is called after the app is bootstrapped.
*
* @publicApi
*/
export const ROUTER_INITIALIZER =
new InjectionToken<(compRef: ComponentRef<any>) => void>('Router Initializer');
export function provideRouterInitializer(): ReadonlyArray<Provider> {
return [
RouterInitializer,
{
provide: APP_INITIALIZER,
multi: true,
useFactory: getAppInitializer,
deps: [RouterInitializer]
},
{provide: ROUTER_INITIALIZER, useFactory: getBootstrapListener, deps: [RouterInitializer]},
{provide: APP_BOOTSTRAP_LISTENER, multi: true, useExisting: ROUTER_INITIALIZER},
];
}
| return r.bootstrapListener.bind(r);
}
| identifier_body |
router_module.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {APP_BASE_HREF, HashLocationStrategy, Location, LOCATION_INITIALIZED, LocationStrategy, PathLocationStrategy, PlatformLocation, ViewportScroller} from '@angular/common';
import {ANALYZE_FOR_ENTRY_COMPONENTS, APP_BOOTSTRAP_LISTENER, APP_INITIALIZER, ApplicationRef, Compiler, ComponentRef, Inject, Injectable, InjectionToken, Injector, ModuleWithProviders, NgModule, NgProbeToken, OnDestroy, Optional, Provider, SkipSelf} from '@angular/core';
import {of, Subject} from 'rxjs';
import {EmptyOutletComponent} from './components/empty_outlet';
import {Route, Routes} from './config';
import {RouterLink, RouterLinkWithHref} from './directives/router_link';
import {RouterLinkActive} from './directives/router_link_active';
import {RouterOutlet} from './directives/router_outlet';
import {Event} from './events';
import {RouteReuseStrategy} from './route_reuse_strategy';
import {ErrorHandler, Router} from './router';
import {ROUTES} from './router_config_loader';
import {ChildrenOutletContexts} from './router_outlet_context';
import {NoPreloading, PreloadAllModules, PreloadingStrategy, RouterPreloader} from './router_preloader';
import {RouterScroller} from './router_scroller';
import {ActivatedRoute} from './router_state';
import {UrlHandlingStrategy} from './url_handling_strategy';
import {DefaultUrlSerializer, UrlSerializer, UrlTree} from './url_tree';
import {flatten} from './utils/collection';
/**
* The directives defined in the `RouterModule`.
*/
const ROUTER_DIRECTIVES =
[RouterOutlet, RouterLink, RouterLinkWithHref, RouterLinkActive, EmptyOutletComponent];
/**
* A [DI token](guide/glossary/#di-token) for the router service.
*
* @publicApi
*/
export const ROUTER_CONFIGURATION = new InjectionToken<ExtraOptions>('ROUTER_CONFIGURATION');
/**
* @docsNotRequired
*/
export const ROUTER_FORROOT_GUARD = new InjectionToken<void>('ROUTER_FORROOT_GUARD');
export const ROUTER_PROVIDERS: Provider[] = [
Location,
{provide: UrlSerializer, useClass: DefaultUrlSerializer},
{
provide: Router,
useFactory: setupRouter,
deps: [
UrlSerializer, ChildrenOutletContexts, Location, Injector, Compiler, ROUTES,
ROUTER_CONFIGURATION, [UrlHandlingStrategy, new Optional()],
[RouteReuseStrategy, new Optional()]
]
},
ChildrenOutletContexts,
{provide: ActivatedRoute, useFactory: rootRoute, deps: [Router]},
RouterPreloader,
NoPreloading,
PreloadAllModules,
{provide: ROUTER_CONFIGURATION, useValue: {enableTracing: false}},
];
export function routerNgProbeToken() {
return new NgProbeToken('Router', Router);
}
/**
* @description
*
* Adds directives and providers for in-app navigation among views defined in an application.
* Use the Angular `Router` service to declaratively specify application states and manage state
* transitions.
*
* You can import this NgModule multiple times, once for each lazy-loaded bundle.
* However, only one `Router` service can be active.
* To ensure this, there are two ways to register routes when importing this module:
*
* * The `forRoot()` method creates an `NgModule` that contains all the directives, the given
* routes, and the `Router` service itself.
* * The `forChild()` method creates an `NgModule` that contains all the directives and the given
* routes, but does not include the `Router` service.
*
* @see [Routing and Navigation guide](guide/router) for an
* overview of how the `Router` service should be used.
*
* @publicApi
*/
@NgModule({
declarations: ROUTER_DIRECTIVES,
exports: ROUTER_DIRECTIVES,
})
export class RouterModule {
// Note: We are injecting the Router so it gets created eagerly...
constructor(@Optional() @Inject(ROUTER_FORROOT_GUARD) guard: any, @Optional() router: Router) {}
/**
* Creates and configures a module with all the router providers and directives.
* Optionally sets up an application listener to perform an initial navigation.
*
* When registering the NgModule at the root, import as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forRoot(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the application.
* @param config An `ExtraOptions` configuration object that controls how navigation is performed.
* @return The new `NgModule`.
*
*/
static forRoot(routes: Routes, config?: ExtraOptions): ModuleWithProviders<RouterModule> {
return {
ngModule: RouterModule,
providers: [
ROUTER_PROVIDERS,
provideRoutes(routes),
{
provide: ROUTER_FORROOT_GUARD,
useFactory: provideForRootGuard,
deps: [[Router, new Optional(), new SkipSelf()]]
},
{provide: ROUTER_CONFIGURATION, useValue: config ? config : {}},
{
provide: LocationStrategy,
useFactory: provideLocationStrategy,
deps:
[PlatformLocation, [new Inject(APP_BASE_HREF), new Optional()], ROUTER_CONFIGURATION]
},
{
provide: RouterScroller,
useFactory: createRouterScroller,
deps: [Router, ViewportScroller, ROUTER_CONFIGURATION]
},
{
provide: PreloadingStrategy,
useExisting: config && config.preloadingStrategy ? config.preloadingStrategy :
NoPreloading
},
{provide: NgProbeToken, multi: true, useFactory: routerNgProbeToken},
provideRouterInitializer(),
],
};
}
/**
* Creates a module with all the router directives and a provider registering routes,
* without creating a new Router service.
* When registering for submodules and lazy-loaded submodules, create the NgModule as follows:
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @param routes An array of `Route` objects that define the navigation paths for the submodule.
* @return The new NgModule.
*
*/
static forChild(routes: Routes): ModuleWithProviders<RouterModule> {
return {ngModule: RouterModule, providers: [provideRoutes(routes)]};
}
}
export function createRouterScroller(
router: Router, viewportScroller: ViewportScroller, config: ExtraOptions): RouterScroller {
if (config.scrollOffset) {
viewportScroller.setOffset(config.scrollOffset);
}
return new RouterScroller(router, viewportScroller, config);
}
export function provideLocationStrategy(
platformLocationStrategy: PlatformLocation, baseHref: string, options: ExtraOptions = {}) {
return options.useHash ? new HashLocationStrategy(platformLocationStrategy, baseHref) :
new PathLocationStrategy(platformLocationStrategy, baseHref);
}
export function provideForRootGuard(router: Router): any {
if ((typeof ngDevMode === 'undefined' || ngDevMode) && router) {
throw new Error(
`RouterModule.forRoot() called twice. Lazy loaded modules should use RouterModule.forChild() instead.`);
}
return 'guarded';
}
/**
* Registers a [DI provider](guide/glossary#provider) for a set of routes.
* @param routes The route configuration to provide.
*
* @usageNotes
*
* ```
* @NgModule({
* imports: [RouterModule.forChild(ROUTES)],
* providers: [provideRoutes(EXTRA_ROUTES)]
* })
* class MyNgModule {}
* ```
*
* @publicApi
*/
export function provideRoutes(routes: Routes): any {
return [
{provide: ANALYZE_FOR_ENTRY_COMPONENTS, multi: true, useValue: routes},
{provide: ROUTES, multi: true, useValue: routes},
];
}
/**
* Allowed values in an `ExtraOptions` object that configure
* when the router performs the initial navigation operation.
*
* * 'enabledNonBlocking' - (default) The initial navigation starts after the
* root component has been created. The bootstrap is not blocked on the completion of the initial
* navigation.
* * 'enabledBlocking' - The initial navigation starts before the root component is created.
* The bootstrap is blocked until the initial navigation is complete. This value is required
* for [server-side rendering](guide/universal) to work.
* * 'disabled' - The initial navigation is not performed. The location listener is set up before
* the root component gets created. Use if there is a reason to have
* more control over when the router starts its initial navigation due to some complex
* initialization logic.
*
* The following values have been [deprecated](guide/releases#deprecation-practices) since v11,
* and should not be used for new applications.
*
* * 'enabled' - This option is 1:1 replaceable with `enabledBlocking`.
*
* @see `forRoot()`
*
* @publicApi
*/
export type InitialNavigation = 'disabled'|'enabled'|'enabledBlocking'|'enabledNonBlocking';
/**
* A set of configuration options for a router module, provided in the
* `forRoot()` method.
*
* @see `forRoot()`
*
*
* @publicApi
*/
export interface ExtraOptions {
/**
* When true, log all internal navigation events to the console.
* Use for debugging.
*/
enableTracing?: boolean;
/**
* When true, enable the location strategy that uses the URL fragment
* instead of the history API.
*/
useHash?: boolean;
/**
* One of `enabled`, `enabledBlocking`, `enabledNonBlocking` or `disabled`.
* When set to `enabled` or `enabledBlocking`, the initial navigation starts before the root
* component is created. The bootstrap is blocked until the initial navigation is complete. This
* value is required for [server-side rendering](guide/universal) to work. When set to
* `enabledNonBlocking`, the initial navigation starts after the root component has been created.
* The bootstrap is not blocked on the completion of the initial navigation. When set to
* `disabled`, the initial navigation is not performed. The location listener is set up before the
* root component gets created. Use if there is a reason to have more control over when the router
* starts its initial navigation due to some complex initialization logic.
*/
initialNavigation?: InitialNavigation;
/**
* A custom error handler for failed navigations.
* If the handler returns a value, the navigation Promise is resolved with this value.
* If the handler throws an exception, the navigation Promise is rejected with the exception.
*
*/
errorHandler?: ErrorHandler;
/**
* Configures a preloading strategy.
* One of `PreloadAllModules` or `NoPreloading` (the default).
*/
preloadingStrategy?: any;
/**
* Define what the router should do if it receives a navigation request to the current URL.
* Default is `ignore`, which causes the router ignores the navigation.
* This can disable features such as a "refresh" button.
* Use this option to configure the behavior when navigating to the
* current URL. Default is 'ignore'.
*/
onSameUrlNavigation?: 'reload'|'ignore';
/**
* Configures if the scroll position needs to be restored when navigating back.
*
* * 'disabled'- (Default) Does nothing. Scroll position is maintained on navigation.
* * 'top'- Sets the scroll position to x = 0, y = 0 on all navigation.
* * 'enabled'- Restores the previous scroll position on backward navigation, else sets the
* position to the anchor if one is provided, or sets the scroll position to [0, 0] (forward
* navigation). This option will be the default in the future.
*
* You can implement custom scroll restoration behavior by adapting the enabled behavior as
* in the following example.
*
* ```typescript
* class AppModule {
* constructor(router: Router, viewportScroller: ViewportScroller) {
* router.events.pipe(
* filter((e: Event): e is Scroll => e instanceof Scroll)
* ).subscribe(e => {
* if (e.position) {
* // backward navigation
* viewportScroller.scrollToPosition(e.position);
* } else if (e.anchor) {
* // anchor navigation
* viewportScroller.scrollToAnchor(e.anchor);
* } else {
* // forward navigation
* viewportScroller.scrollToPosition([0, 0]);
* }
* });
* }
* }
* ```
*/
scrollPositionRestoration?: 'disabled'|'enabled'|'top';
/**
* When set to 'enabled', scrolls to the anchor element when the URL has a fragment.
* Anchor scrolling is disabled by default.
*
* Anchor scrolling does not happen on 'popstate'. Instead, we restore the position
* that we stored or scroll to the top.
*/
anchorScrolling?: 'disabled'|'enabled';
/**
* Configures the scroll offset the router will use when scrolling to an element.
*
* When given a tuple with x and y position value,
* the router uses that offset each time it scrolls.
* When given a function, the router invokes the function every time
* it restores scroll position.
*/
scrollOffset?: [number, number]|(() => [number, number]);
/**
* Defines how the router merges parameters, data, and resolved data from parent to child
* routes. By default ('emptyOnly'), inherits parent parameters only for
* path-less or component-less routes.
*
* Set to 'always' to enable unconditional inheritance of parent parameters.
*
* Note that when dealing with matrix parameters, "parent" refers to the parent `Route`
* config which does not necessarily mean the "URL segment to the left". When the `Route` `path`
* contains multiple segments, the matrix parameters must appear on the last segment. For example,
* matrix parameters for `{path: 'a/b', component: MyComp}` should appear as `a/b;foo=bar` and not
* `a;foo=bar/b`.
*
*/
paramsInheritanceStrategy?: 'emptyOnly'|'always';
/**
* A custom handler for malformed URI errors. The handler is invoked when `encodedURI` contains
* invalid character sequences.
* The default implementation is to redirect to the root URL, dropping
* any path or parameter information. The function takes three parameters:
*
* - `'URIError'` - Error thrown when parsing a bad URL.
* - `'UrlSerializer'` - UrlSerializer that’s configured with the router.
* - `'url'` - The malformed URL that caused the URIError
* */
malformedUriErrorHandler?:
(error: URIError, urlSerializer: UrlSerializer, url: string) => UrlTree;
/**
* Defines when the router updates the browser URL. By default ('deferred'),
* update after successful navigation.
* Set to 'eager' if prefer to update the URL at the beginning of navigation.
* Updating the URL early allows you to handle a failure of navigation by
* showing an error message with the URL that failed.
*/
urlUpdateStrategy?: 'deferred'|'eager';
/**
* Enables a bug fix that corrects relative link resolution in components with empty paths.
* Example:
*
* ```
* const routes = [
* {
* path: '',
* component: ContainerComponent,
* children: [
* { path: 'a', component: AComponent },
* { path: 'b', component: BComponent },
* ]
* }
* ];
* ```
*
* From the `ContainerComponent`, you should be able to navigate to `AComponent` using
* the following `routerLink`, but it will not work if `relativeLinkResolution` is set
* to `'legacy'`:
*
* `<a [routerLink]="['./a']">Link to A</a>`
*
* However, this will work:
*
* `<a [routerLink]="['../a']">Link to A</a>`
*
* In other words, you're required to use `../` rather than `./` when the relative link
* resolution is set to `'legacy'`.
*
* The default in v11 is `corrected`.
*/
relativeLinkResolution?: 'legacy'|'corrected';
/**
* Configures how the Router attempts to restore state when a navigation is cancelled.
*
* 'replace' - Always uses `location.replaceState` to set the browser state to the state of the
* router before the navigation started. This means that if the URL of the browser is updated
* _before_ the navigation is canceled, the Router will simply replace the item in history rather
* than trying to restore to the previous location in the session history. This happens most
* frequently with `urlUpdateStrategy: 'eager'` and navigations with the browser back/forward
* buttons.
*
* 'computed' - Will attempt to return to the same index in the session history that corresponds
* to the Angular route when the navigation gets cancelled. For example, if the browser back
* button is clicked and the navigation is cancelled, the Router will trigger a forward navigation
* and vice versa.
*
* Note: the 'computed' option is incompatible with any `UrlHandlingStrategy` which only
* handles a portion of the URL because the history restoration navigates to the previous place in
* the browser history rather than simply resetting a portion of the URL.
*
* The default value is `replace` when not set.
*/
canceledNavigationResolution?: 'replace'|'computed';
}
export function setupRouter(
urlSerializer: UrlSerializer, contexts: ChildrenOutletContexts, location: Location,
injector: Injector, compiler: Compiler, config: Route[][], opts: ExtraOptions = {},
urlHandlingStrategy?: UrlHandlingStrategy, routeReuseStrategy?: RouteReuseStrategy) {
const router =
new Router(null, urlSerializer, contexts, location, injector, compiler, flatten(config));
if (urlHandlingStrategy) {
router.urlHandlingStrategy = urlHandlingStrategy;
}
if (routeReuseStrategy) {
router.routeReuseStrategy = routeReuseStrategy;
}
assignExtraOptionsToRouter(opts, router);
if (opts.enableTracing) {
router.events.subscribe((e: Event) => {
// tslint:disable:no-console
console.group?.(`Router Event: ${(<any>e.constructor).name}`);
console.log(e.toString());
console.log(e);
console.groupEnd?.();
// tslint:enable:no-console
});
}
return router;
}
export function assignExtraOptionsToRouter(opts: ExtraOptions, router: Router): void {
if (opts.errorHandler) {
router.errorHandler = opts.errorHandler;
}
if (opts.malformedUriErrorHandler) {
router.malformedUriErrorHandler = opts.malformedUriErrorHandler;
}
if (opts.onSameUrlNavigation) {
| if (opts.paramsInheritanceStrategy) {
router.paramsInheritanceStrategy = opts.paramsInheritanceStrategy;
}
if (opts.relativeLinkResolution) {
router.relativeLinkResolution = opts.relativeLinkResolution;
}
if (opts.urlUpdateStrategy) {
router.urlUpdateStrategy = opts.urlUpdateStrategy;
}
if (opts.canceledNavigationResolution) {
router.canceledNavigationResolution = opts.canceledNavigationResolution;
}
}
export function rootRoute(router: Router): ActivatedRoute {
return router.routerState.root;
}
/**
* Router initialization requires two steps:
*
* First, we start the navigation in a `APP_INITIALIZER` to block the bootstrap if
* a resolver or a guard executes asynchronously.
*
* Next, we actually run activation in a `BOOTSTRAP_LISTENER`, using the
* `afterPreactivation` hook provided by the router.
* The router navigation starts, reaches the point when preactivation is done, and then
* pauses. It waits for the hook to be resolved. We then resolve it only in a bootstrap listener.
*/
@Injectable()
export class RouterInitializer implements OnDestroy {
private initNavigation = false;
private destroyed = false;
private resultOfPreactivationDone = new Subject<void>();
constructor(private injector: Injector) {}
appInitializer(): Promise<any> {
const p: Promise<any> = this.injector.get(LOCATION_INITIALIZED, Promise.resolve(null));
return p.then(() => {
// If the injector was destroyed, the DI lookups below will fail.
if (this.destroyed) {
return Promise.resolve(true);
}
let resolve: Function = null!;
const res = new Promise(r => resolve = r);
const router = this.injector.get(Router);
const opts = this.injector.get(ROUTER_CONFIGURATION);
if (opts.initialNavigation === 'disabled') {
router.setUpLocationChangeListener();
resolve(true);
} else if (
// TODO: enabled is deprecated as of v11, can be removed in v13
opts.initialNavigation === 'enabled' || opts.initialNavigation === 'enabledBlocking') {
router.hooks.afterPreactivation = () => {
// only the initial navigation should be delayed
if (!this.initNavigation) {
this.initNavigation = true;
resolve(true);
return this.resultOfPreactivationDone;
// subsequent navigations should not be delayed
} else {
return of(null) as any;
}
};
router.initialNavigation();
} else {
resolve(true);
}
return res;
});
}
bootstrapListener(bootstrappedComponentRef: ComponentRef<any>): void {
const opts = this.injector.get(ROUTER_CONFIGURATION);
const preloader = this.injector.get(RouterPreloader);
const routerScroller = this.injector.get(RouterScroller);
const router = this.injector.get(Router);
const ref = this.injector.get<ApplicationRef>(ApplicationRef);
if (bootstrappedComponentRef !== ref.components[0]) {
return;
}
// Default case
if (opts.initialNavigation === 'enabledNonBlocking' || opts.initialNavigation === undefined) {
router.initialNavigation();
}
preloader.setUpPreloading();
routerScroller.init();
router.resetRootComponentType(ref.componentTypes[0]);
this.resultOfPreactivationDone.next(null!);
this.resultOfPreactivationDone.complete();
}
ngOnDestroy() {
this.destroyed = true;
}
}
export function getAppInitializer(r: RouterInitializer) {
return r.appInitializer.bind(r);
}
export function getBootstrapListener(r: RouterInitializer) {
return r.bootstrapListener.bind(r);
}
/**
* A [DI token](guide/glossary/#di-token) for the router initializer that
* is called after the app is bootstrapped.
*
* @publicApi
*/
export const ROUTER_INITIALIZER =
new InjectionToken<(compRef: ComponentRef<any>) => void>('Router Initializer');
export function provideRouterInitializer(): ReadonlyArray<Provider> {
return [
RouterInitializer,
{
provide: APP_INITIALIZER,
multi: true,
useFactory: getAppInitializer,
deps: [RouterInitializer]
},
{provide: ROUTER_INITIALIZER, useFactory: getBootstrapListener, deps: [RouterInitializer]},
{provide: APP_BOOTSTRAP_LISTENER, multi: true, useExisting: ROUTER_INITIALIZER},
];
}
| router.onSameUrlNavigation = opts.onSameUrlNavigation;
}
| conditional_block |
branchify.rs | #![macro_use]
use std::str::Chars;
use std::vec::Vec;
use std::io::IoResult;
use std::iter::repeat;
use std::ascii::AsciiExt;
#[derive(Clone)]
pub struct ParseBranch {
matches: Vec<u8>,
result: Option<String>,
children: Vec<ParseBranch>,
}
impl ParseBranch {
fn new() -> ParseBranch {
ParseBranch {
matches: Vec::new(),
result: None,
children: Vec::new()
}
}
}
pub fn branchify(options: &[(&str, &str)], case_sensitive: bool) -> Vec<ParseBranch> {
let mut root = ParseBranch::new();
fn go_down_moses(branch: &mut ParseBranch, mut chariter: Chars, result: &str, case_sensitive: bool) | ;
for &(key, result) in options.iter() {
go_down_moses(&mut root, key.chars(), result, case_sensitive);
}
root.children
}
macro_rules! branchify(
(case sensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], true)
);
(case insensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], false)
);
);
/// Prints the contents to stdout.
///
/// :param branches: the branches to search through
/// :param indent: the level of indentation (each level representing four leading spaces)
/// :param read_call: the function call to read a byte
/// :param end: the byte which marks the end of the sequence
/// :param max_len: the maximum length a value may be before giving up and returning ``None``
/// :param valid: the function call to if a byte ``b`` is valid
/// :param unknown: the expression to call for an unknown value; in this string, ``{}`` will be
/// replaced with an expression (literal or non-literal) evaluating to a ``String`` (it is
/// ``{}`` only, not arbitrary format strings)
pub fn generate_branchified_method(
writer: &mut Writer,
branches: Vec<ParseBranch>,
indent: usize,
read_call: &str,
end: &str,
max_len: &str,
valid: &str,
unknown: &str) -> IoResult<()> {
fn r(writer: &mut Writer, branch: &ParseBranch, prefix: &str, indent: usize, read_call: &str,
end: &str, max_len: &str, valid: &str, unknown: &str) -> IoResult<()> {
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
for &c in branch.matches.iter() {
let next_prefix = format!("{}{}", prefix, c as char);
w!(format!("Ok(b'{}') => match {} {{", c as char, read_call));
for b in branch.children.iter() {
try!(r(writer, b, &next_prefix[], indent + 1, read_call, end, max_len, valid, unknown));
}
match branch.result {
Some(ref result) =>
w!(format!(" Ok(b' ') => return Ok({}),", *result)),
None => w!(format!(" Ok(b' ') => return Ok({}),",
unknown.replace("{}", &format!("String::from_str(\"{}\")", next_prefix)[]))),
}
w!(format!(" Ok(b) if {} => (\"{}\", b),", valid, next_prefix));
w!(" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),");
w!(" Err(err) => return Err(err),");
w!("},");
}
Ok(())
}
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
w!(format!("let (s, next_byte) = match {} {{", read_call));
for b in branches.iter() {
try!(r(writer, b, "", indent + 1, read_call, end, max_len, valid, unknown));
}
w!(format!(" Ok(b) if {} => (\"\", b),", valid));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( ("};"));
w!( ("// OK, that didn't pan out. Let's read the rest and see what we get."));
w!( ("let mut s = String::from_str(s);"));
w!( ("s.push(next_byte as char);"));
w!( ("loop {"));
w!(format!(" match {} {{", read_call));
w!(format!(" Ok(b) if b == {} => return Ok({}),", end, unknown.replace("{}", "s")));
w!(format!(" Ok(b) if {} => {{", valid));
w!(format!(" if s.len() == {} {{", max_len));
w!( (" // Too long; bad request"));
w!( (" return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"too long, bad request\", detail: None });"));
w!( (" }"));
w!( (" s.push(b as char);"));
w!( (" },"));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( (" }"));
w!( ("}"));
Ok(())
}
| {
match chariter.next() {
Some(c) => {
let first_case = if case_sensitive { c as u8 } else { c.to_ascii_uppercase() as u8 };
for next_branch in branch.children.iter_mut() {
if next_branch.matches[0] == first_case {
go_down_moses(next_branch, chariter, result, case_sensitive);
return;
}
}
let mut subbranch = ParseBranch::new();
subbranch.matches.push(first_case);
if !case_sensitive {
let second_case = c.to_ascii_lowercase() as u8;
if first_case != second_case {
subbranch.matches.push(second_case);
}
}
branch.children.push(subbranch);
let i = branch.children.len() - 1;
go_down_moses(&mut branch.children[i], chariter, result, case_sensitive);
},
None => {
assert!(branch.result.is_none());
branch.result = Some(String::from_str(result));
},
}
} | identifier_body |
branchify.rs | #![macro_use]
use std::str::Chars;
use std::vec::Vec;
use std::io::IoResult;
use std::iter::repeat;
use std::ascii::AsciiExt;
#[derive(Clone)]
pub struct ParseBranch {
matches: Vec<u8>,
result: Option<String>,
children: Vec<ParseBranch>,
}
impl ParseBranch {
fn new() -> ParseBranch {
ParseBranch {
matches: Vec::new(), | result: None,
children: Vec::new()
}
}
}
pub fn branchify(options: &[(&str, &str)], case_sensitive: bool) -> Vec<ParseBranch> {
let mut root = ParseBranch::new();
fn go_down_moses(branch: &mut ParseBranch, mut chariter: Chars, result: &str, case_sensitive: bool) {
match chariter.next() {
Some(c) => {
let first_case = if case_sensitive { c as u8 } else { c.to_ascii_uppercase() as u8 };
for next_branch in branch.children.iter_mut() {
if next_branch.matches[0] == first_case {
go_down_moses(next_branch, chariter, result, case_sensitive);
return;
}
}
let mut subbranch = ParseBranch::new();
subbranch.matches.push(first_case);
if !case_sensitive {
let second_case = c.to_ascii_lowercase() as u8;
if first_case != second_case {
subbranch.matches.push(second_case);
}
}
branch.children.push(subbranch);
let i = branch.children.len() - 1;
go_down_moses(&mut branch.children[i], chariter, result, case_sensitive);
},
None => {
assert!(branch.result.is_none());
branch.result = Some(String::from_str(result));
},
}
};
for &(key, result) in options.iter() {
go_down_moses(&mut root, key.chars(), result, case_sensitive);
}
root.children
}
macro_rules! branchify(
(case sensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], true)
);
(case insensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], false)
);
);
/// Prints the contents to stdout.
///
/// :param branches: the branches to search through
/// :param indent: the level of indentation (each level representing four leading spaces)
/// :param read_call: the function call to read a byte
/// :param end: the byte which marks the end of the sequence
/// :param max_len: the maximum length a value may be before giving up and returning ``None``
/// :param valid: the function call to if a byte ``b`` is valid
/// :param unknown: the expression to call for an unknown value; in this string, ``{}`` will be
/// replaced with an expression (literal or non-literal) evaluating to a ``String`` (it is
/// ``{}`` only, not arbitrary format strings)
pub fn generate_branchified_method(
writer: &mut Writer,
branches: Vec<ParseBranch>,
indent: usize,
read_call: &str,
end: &str,
max_len: &str,
valid: &str,
unknown: &str) -> IoResult<()> {
fn r(writer: &mut Writer, branch: &ParseBranch, prefix: &str, indent: usize, read_call: &str,
end: &str, max_len: &str, valid: &str, unknown: &str) -> IoResult<()> {
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
for &c in branch.matches.iter() {
let next_prefix = format!("{}{}", prefix, c as char);
w!(format!("Ok(b'{}') => match {} {{", c as char, read_call));
for b in branch.children.iter() {
try!(r(writer, b, &next_prefix[], indent + 1, read_call, end, max_len, valid, unknown));
}
match branch.result {
Some(ref result) =>
w!(format!(" Ok(b' ') => return Ok({}),", *result)),
None => w!(format!(" Ok(b' ') => return Ok({}),",
unknown.replace("{}", &format!("String::from_str(\"{}\")", next_prefix)[]))),
}
w!(format!(" Ok(b) if {} => (\"{}\", b),", valid, next_prefix));
w!(" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),");
w!(" Err(err) => return Err(err),");
w!("},");
}
Ok(())
}
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
w!(format!("let (s, next_byte) = match {} {{", read_call));
for b in branches.iter() {
try!(r(writer, b, "", indent + 1, read_call, end, max_len, valid, unknown));
}
w!(format!(" Ok(b) if {} => (\"\", b),", valid));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( ("};"));
w!( ("// OK, that didn't pan out. Let's read the rest and see what we get."));
w!( ("let mut s = String::from_str(s);"));
w!( ("s.push(next_byte as char);"));
w!( ("loop {"));
w!(format!(" match {} {{", read_call));
w!(format!(" Ok(b) if b == {} => return Ok({}),", end, unknown.replace("{}", "s")));
w!(format!(" Ok(b) if {} => {{", valid));
w!(format!(" if s.len() == {} {{", max_len));
w!( (" // Too long; bad request"));
w!( (" return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"too long, bad request\", detail: None });"));
w!( (" }"));
w!( (" s.push(b as char);"));
w!( (" },"));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( (" }"));
w!( ("}"));
Ok(())
} | random_line_split | |
branchify.rs | #![macro_use]
use std::str::Chars;
use std::vec::Vec;
use std::io::IoResult;
use std::iter::repeat;
use std::ascii::AsciiExt;
#[derive(Clone)]
pub struct ParseBranch {
matches: Vec<u8>,
result: Option<String>,
children: Vec<ParseBranch>,
}
impl ParseBranch {
fn | () -> ParseBranch {
ParseBranch {
matches: Vec::new(),
result: None,
children: Vec::new()
}
}
}
pub fn branchify(options: &[(&str, &str)], case_sensitive: bool) -> Vec<ParseBranch> {
let mut root = ParseBranch::new();
fn go_down_moses(branch: &mut ParseBranch, mut chariter: Chars, result: &str, case_sensitive: bool) {
match chariter.next() {
Some(c) => {
let first_case = if case_sensitive { c as u8 } else { c.to_ascii_uppercase() as u8 };
for next_branch in branch.children.iter_mut() {
if next_branch.matches[0] == first_case {
go_down_moses(next_branch, chariter, result, case_sensitive);
return;
}
}
let mut subbranch = ParseBranch::new();
subbranch.matches.push(first_case);
if !case_sensitive {
let second_case = c.to_ascii_lowercase() as u8;
if first_case != second_case {
subbranch.matches.push(second_case);
}
}
branch.children.push(subbranch);
let i = branch.children.len() - 1;
go_down_moses(&mut branch.children[i], chariter, result, case_sensitive);
},
None => {
assert!(branch.result.is_none());
branch.result = Some(String::from_str(result));
},
}
};
for &(key, result) in options.iter() {
go_down_moses(&mut root, key.chars(), result, case_sensitive);
}
root.children
}
macro_rules! branchify(
(case sensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], true)
);
(case insensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], false)
);
);
/// Prints the contents to stdout.
///
/// :param branches: the branches to search through
/// :param indent: the level of indentation (each level representing four leading spaces)
/// :param read_call: the function call to read a byte
/// :param end: the byte which marks the end of the sequence
/// :param max_len: the maximum length a value may be before giving up and returning ``None``
/// :param valid: the function call to if a byte ``b`` is valid
/// :param unknown: the expression to call for an unknown value; in this string, ``{}`` will be
/// replaced with an expression (literal or non-literal) evaluating to a ``String`` (it is
/// ``{}`` only, not arbitrary format strings)
pub fn generate_branchified_method(
writer: &mut Writer,
branches: Vec<ParseBranch>,
indent: usize,
read_call: &str,
end: &str,
max_len: &str,
valid: &str,
unknown: &str) -> IoResult<()> {
fn r(writer: &mut Writer, branch: &ParseBranch, prefix: &str, indent: usize, read_call: &str,
end: &str, max_len: &str, valid: &str, unknown: &str) -> IoResult<()> {
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
for &c in branch.matches.iter() {
let next_prefix = format!("{}{}", prefix, c as char);
w!(format!("Ok(b'{}') => match {} {{", c as char, read_call));
for b in branch.children.iter() {
try!(r(writer, b, &next_prefix[], indent + 1, read_call, end, max_len, valid, unknown));
}
match branch.result {
Some(ref result) =>
w!(format!(" Ok(b' ') => return Ok({}),", *result)),
None => w!(format!(" Ok(b' ') => return Ok({}),",
unknown.replace("{}", &format!("String::from_str(\"{}\")", next_prefix)[]))),
}
w!(format!(" Ok(b) if {} => (\"{}\", b),", valid, next_prefix));
w!(" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),");
w!(" Err(err) => return Err(err),");
w!("},");
}
Ok(())
}
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
w!(format!("let (s, next_byte) = match {} {{", read_call));
for b in branches.iter() {
try!(r(writer, b, "", indent + 1, read_call, end, max_len, valid, unknown));
}
w!(format!(" Ok(b) if {} => (\"\", b),", valid));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( ("};"));
w!( ("// OK, that didn't pan out. Let's read the rest and see what we get."));
w!( ("let mut s = String::from_str(s);"));
w!( ("s.push(next_byte as char);"));
w!( ("loop {"));
w!(format!(" match {} {{", read_call));
w!(format!(" Ok(b) if b == {} => return Ok({}),", end, unknown.replace("{}", "s")));
w!(format!(" Ok(b) if {} => {{", valid));
w!(format!(" if s.len() == {} {{", max_len));
w!( (" // Too long; bad request"));
w!( (" return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"too long, bad request\", detail: None });"));
w!( (" }"));
w!( (" s.push(b as char);"));
w!( (" },"));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( (" }"));
w!( ("}"));
Ok(())
}
| new | identifier_name |
branchify.rs | #![macro_use]
use std::str::Chars;
use std::vec::Vec;
use std::io::IoResult;
use std::iter::repeat;
use std::ascii::AsciiExt;
#[derive(Clone)]
pub struct ParseBranch {
matches: Vec<u8>,
result: Option<String>,
children: Vec<ParseBranch>,
}
impl ParseBranch {
fn new() -> ParseBranch {
ParseBranch {
matches: Vec::new(),
result: None,
children: Vec::new()
}
}
}
pub fn branchify(options: &[(&str, &str)], case_sensitive: bool) -> Vec<ParseBranch> {
let mut root = ParseBranch::new();
fn go_down_moses(branch: &mut ParseBranch, mut chariter: Chars, result: &str, case_sensitive: bool) {
match chariter.next() {
Some(c) => {
let first_case = if case_sensitive { c as u8 } else { c.to_ascii_uppercase() as u8 };
for next_branch in branch.children.iter_mut() {
if next_branch.matches[0] == first_case {
go_down_moses(next_branch, chariter, result, case_sensitive);
return;
}
}
let mut subbranch = ParseBranch::new();
subbranch.matches.push(first_case);
if !case_sensitive {
let second_case = c.to_ascii_lowercase() as u8;
if first_case != second_case {
subbranch.matches.push(second_case);
}
}
branch.children.push(subbranch);
let i = branch.children.len() - 1;
go_down_moses(&mut branch.children[i], chariter, result, case_sensitive);
},
None => | ,
}
};
for &(key, result) in options.iter() {
go_down_moses(&mut root, key.chars(), result, case_sensitive);
}
root.children
}
macro_rules! branchify(
(case sensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], true)
);
(case insensitive, $($key:expr => $value:ident),*) => (
::branchify::branchify(&[$(($key, stringify!($value))),*], false)
);
);
/// Prints the contents to stdout.
///
/// :param branches: the branches to search through
/// :param indent: the level of indentation (each level representing four leading spaces)
/// :param read_call: the function call to read a byte
/// :param end: the byte which marks the end of the sequence
/// :param max_len: the maximum length a value may be before giving up and returning ``None``
/// :param valid: the function call to if a byte ``b`` is valid
/// :param unknown: the expression to call for an unknown value; in this string, ``{}`` will be
/// replaced with an expression (literal or non-literal) evaluating to a ``String`` (it is
/// ``{}`` only, not arbitrary format strings)
pub fn generate_branchified_method(
writer: &mut Writer,
branches: Vec<ParseBranch>,
indent: usize,
read_call: &str,
end: &str,
max_len: &str,
valid: &str,
unknown: &str) -> IoResult<()> {
fn r(writer: &mut Writer, branch: &ParseBranch, prefix: &str, indent: usize, read_call: &str,
end: &str, max_len: &str, valid: &str, unknown: &str) -> IoResult<()> {
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
for &c in branch.matches.iter() {
let next_prefix = format!("{}{}", prefix, c as char);
w!(format!("Ok(b'{}') => match {} {{", c as char, read_call));
for b in branch.children.iter() {
try!(r(writer, b, &next_prefix[], indent + 1, read_call, end, max_len, valid, unknown));
}
match branch.result {
Some(ref result) =>
w!(format!(" Ok(b' ') => return Ok({}),", *result)),
None => w!(format!(" Ok(b' ') => return Ok({}),",
unknown.replace("{}", &format!("String::from_str(\"{}\")", next_prefix)[]))),
}
w!(format!(" Ok(b) if {} => (\"{}\", b),", valid, next_prefix));
w!(" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),");
w!(" Err(err) => return Err(err),");
w!("},");
}
Ok(())
}
let indentstr = repeat(' ').take(indent * 4).collect::<String>();
macro_rules! w (
($s:expr) => {
try!(write!(writer, "{}{}\n", indentstr, $s))
}
);
w!(format!("let (s, next_byte) = match {} {{", read_call));
for b in branches.iter() {
try!(r(writer, b, "", indent + 1, read_call, end, max_len, valid, unknown));
}
w!(format!(" Ok(b) if {} => (\"\", b),", valid));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( ("};"));
w!( ("// OK, that didn't pan out. Let's read the rest and see what we get."));
w!( ("let mut s = String::from_str(s);"));
w!( ("s.push(next_byte as char);"));
w!( ("loop {"));
w!(format!(" match {} {{", read_call));
w!(format!(" Ok(b) if b == {} => return Ok({}),", end, unknown.replace("{}", "s")));
w!(format!(" Ok(b) if {} => {{", valid));
w!(format!(" if s.len() == {} {{", max_len));
w!( (" // Too long; bad request"));
w!( (" return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"too long, bad request\", detail: None });"));
w!( (" }"));
w!( (" s.push(b as char);"));
w!( (" },"));
w!( (" Ok(_) => return Err(::std::io::IoError { kind: ::std::io::OtherIoError, desc: \"bad value\", detail: None }),"));
w!( (" Err(err) => return Err(err),"));
w!( (" }"));
w!( ("}"));
Ok(())
}
| {
assert!(branch.result.is_none());
branch.result = Some(String::from_str(result));
} | conditional_block |
speech.js | /**
* @fileoverview Driver script for speech processing.
* @author aravart@cs.wisc.edu (Ara Vartanian), dliang@cs.wisc.edu (David Liang)
*/
goog.provide('SpeechGames');
goog.provide('SpeechGames.Speech');
goog.require('Blockly.Workspace');
goog.require('SpeechBlocks.Controller');
goog.require('SpeechBlocks.Interpreter');
goog.require('Turtle.Answers');
/**
* A global reference to the current workspace's controller.
* @public {SpeechBlocks.Controller}
*/
SpeechGames.controller = null;
/**
* A global reference to the workspace itself.
* @public {Blockly.Workspace}
*/
SpeechGames.workspace = null;
/**
* A global reference to the interpreter.
* @public {Blockly.Interpreter}
*/
SpeechGames.interpreter = null;
/**
* Maximum number of levels. Common to all apps.
* @public @const
*/
SpeechGames.MAX_LEVEL = 12;
/**
* User's current level (e.g. 5).
* @public
*/
SpeechGames.LEVEL = null;
/**
* Bind a function to a button's click event.
* On touch-enabled browsers, ontouchend is treated as equivalent to onclick.
* @param {!Element|string} el Button element or ID thereof.
* @param {!Function} func Event handler to bind.
* @public
*/
SpeechGames.bindClick = function(el, func) {
if (typeof el == 'string') {
el = document.getElementById(el);
}
el.addEventListener('click', func, true);
el.addEventListener('touchend', func, true);
};
/**
* Instantiates a speech object, used for controlling speech input to interpreter.
* @public
* @constructor
*/
SpeechGames.Speech = function() {
this.oldQ = null;
this.parseTimer = null;
this.previousRecognitionTime = null;
this.output = null;
this.timeout = null;
this.animating = false;
this.listening = false;
this.recognition = null;
this.mic_animate = 'assets/img/mic-animate.gif';
this.mic = 'assets/img/mic.gif';
this.parseTimer = null;
// this.misrecognized = [];
this.corrector_ = new Corrector();
};
/**
* Sets the interval that ensures the mic is lietning (when applicable).
* @private
*/
SpeechGames.Speech.prototype.setMicInterval_ = function() {
if (!window.location.origin.includes("file")) {
if (this.interval) {
clearInterval(this.interval);
}
this.interval = setInterval(function() {
if (!this.listening) {
this.listening = true;
this.startDictation_();
}
}.bind(this), 100);
} else {
console.log("Cannot use speech from local files!");
}
};
/**
* Hard replacements for certain phrases.
* @param {string} speech Utterance to be corrected.
* @return {string} The corrected utterance.
* @private
*/
SpeechGames.Speech.prototype.correctSpeech_ = function(speech) {
var workspaceState = SpeechGames.controller.workspaceState_;
var blockIds = Object.values(workspaceState.blockIds.map_.map_);
var blockValuesetMap = workspaceState.blockValuesetMap;
var blockTypeMap = new goog.structs.Map();
blockTypeMap.set('controls_if', 'if');
blockTypeMap.set('controls_repeat_ext', 'repeat');
blockTypeMap.set('turtle_turn_internal', 'turn');
blockTypeMap.set('turtle_move', 'move');
blockTypeMap.set('turtle_pen', 'pen');
blockTypeMap.set('turtle_repeat_internal', 'repeat');
blockTypeMap.set('turtle_colour_internal', 'color');
var blockTypes = Turtle.blockTypes[SpeechGames.LEVEL].slice(0);
for (var i = 0; i < blockTypes.length; i++) {
if (blockTypeMap.containsKey(blockTypes[i])) {
blockTypes[i] = blockTypeMap.get(blockTypes[i]);
}
}
return this.corrector_.correct(speech, blockIds, blockValuesetMap, blockTypes);
};
/**
* Restarts the listening for a new utterance.
* @private
*/
SpeechGames.Speech.prototype.startDictation_ = function() {
if (window.hasOwnProperty('webkitSpeechRecognition')) {
if (this.recognition) {
this.recognition.stop();
}
this.recognition = new webkitSpeechRecognition();
this.recognition.continuous = false;
this.recognition.interimResults = false;
this.recognition.lang = 'en-US';
this.recognition.onstart = function() {
this.listening = true;
if (!this.animating) {
this.animating = true;
document.getElementById('microphone').src = this.mic_animate;
}
}.bind(this);
this.recognition.onresult = function(e) {
this.recognition.stop();
this.rawSpeech = e.results[0][0].transcript.toLowerCase();
$('#q').val(this.rawSpeech);
this.parseSpeech_();
}.bind(this);
this.recognition.onerror = function(e) {
this.listening = false;
}.bind(this);
this.recognition.onend = function(e) {
this.listening = false;
}.bind(this);
this.recognition.start();
}
};
/**
* Toggles the listening from the microphone.
* @private
*/
SpeechGames.Speech.prototype.toggleDictation_ = function() {
if (this.listening) {
this.listening = !this.listening;
this.recognition.stop();
this.recognition = null;
this.animating = false;
document.getElementById('microphone').src = this.mic;
clearInterval(this.interval);
this.interval = null;
} else {
document.getElementById('microphone').src = this.mic_animate;
this.setMicInterval_();
}
};
/**
* Parses the input and sends the result to SpeechGames.interpreter. Then outputs the response
* to the user interface.
* @private
*/
SpeechGames.Speech.prototype.parseSpeech_ = function() {
this.previousRecognitionTime = Date.now();
this.oldQ = $('#q').val();
var result = false;
this.rawSpeech = $('#q').val();
try {
this.correctedSpeech = this.correctSpeech_(this.rawSpeech);
$('#q').val(this.correctedSpeech);
this.output = parser.parse(this.correctedSpeech);
this.response = this.interpretSpeech_(this.output);
clearTimeout(this.timeout);
this.result = true;
$("#user-message").hide().text('I heard "' + this.correctedSpeech + '"').fadeIn(200);
} catch (e) {
console.log(e);
if (e instanceof SpeechBlocks.UserError) {
$('#user-message').text(e.message);
} else {
$('#parse-message').attr('class', 'message error').text(this.buildErrorMessage_(e));
if (this.rawSpeech !== '') {
$('#user-message').hide().text('Sorry, I didn\'t understand.').fadeIn(200);
clearTimeout(this.timeout);
this.timeout = setTimeout(function(){
$('#user-message').hide().text("Awaiting your command!").fadeIn(200);
}.bind(this), 5000);
}
}
}
return result;
};
/**
* Calls the SpeechGames.interpreter's interpret function.
* @return {string} Response for user (i.e. 'Added a move block.').
* @private
*/
SpeechGames.Speech.prototype.interpretSpeech_ = function(parsedSpeech) {
if (parsedSpeech !== undefined) {
return SpeechGames.interpreter.interpret(parsedSpeech);
}
};
/**
* Schedule's the parsing to occur after a second of inactivity.
* @private
*/
SpeechGames.Speech.prototype.scheduleParse_ = function() {
if ($('#q').val() !== this.oldQ) {
if (this.parseTimer !== null) {
clearTimeout(this.parseTimer);
this.parseTimer = null;
}
this.parseTimer = setTimeout(function() {
this.parseSpeech_();
this.parseTimer = null;
}.bind(this), 1000);
}
};
/**
* Builds an error message.
* @param {exception} e Exception from which to build the error message.
* @return {exception} The error message.
*/
SpeechGames.Speech.prototype.buildErrorMessage_ = function(e) {
return e.location !== undefined ? 'Line ' + e.location.start.line + ', column ' + e.location.start.column + ': ' + e.message : e.message;
};
/**
* Extracts a parameter from the URL.
* If the parameter is absent default_value is returned.
* @param {!string} name The name of the parameter.
* @param {!string} defaultValue Value to return if paramater not found.
* @return {string} The parameter value or the default value if not found.
*/
SpeechGames.getStringParamFromURL_ = function(name, defaultValue) {
var val =
window.location.search.match(new RegExp('[?&]' + name + '=([^&]+)'));
return val ? decodeURIComponent(val[1].replace(/\+/g, '%20')) : defaultValue;
};
/**
* Extracts a numeric parameter from the URL.
* If the parameter is absent or less than min_value, min_value is
* returned. If it is greater than max_value, max_value is returned.
* @param {!string} name The name of the parameter.
* @param {!number} minValue The minimum legal value.
* @param {!number} maxValue The maximum legal value.
* @return {number} A number in the range [min_value, max_value].
*/
SpeechGames.getNumberParamFromURL_ = function(name, minValue, maxValue) {
var val = Number(SpeechGames.getStringParamFromURL_(name, 'NaN'));
return isNaN(val) ? minValue : goog.math.clamp(minValue, val, maxValue);
};
/**
* Generates code from the blockly workspace.
* @private
*/
SpeechGames.createCode_ = function() {
Blockly.JavaScript.addReservedWords('code');
return Blockly.JavaScript.workspaceToCode(SpeechGames.workspace);
};
/**
* Shows the generated code.
* @private
*/
SpeechGames.showCode_ = function() {
var modalEl = document.createElement('generatedCode');
modalEl.style.width = '400px';
modalEl.style.height = '300px';
modalEl.style.margin = '100px auto';
modalEl.style.backgroundColor = '#ff';
modalEl.textContent = createCode_();
mui.overlay('on', modalEl);
};
/**
* Gets a parameter from the url.
* @param {!string} name Name of the param (i.e. debug)
* @param {string} url Optional url, otherwise window url is used.
*/
SpeechGames.getParameterByName_ = function(name, url) {
if (!url) {
url = window.location.href;
}
name = name.replace(/[\[\]]/g, '\\$&');
var regex = new RegExp('[?&]' + name + '(=([^&#]*)|&|#|$)'),
results = regex.exec(url);
if (!results) return '';
if (!results[2]) return '';
return decodeURIComponent(results[2].replace(/\+/g, ' '));
};
/**
* Gets all params except for level flag from the url.
* @return {string} extraParams Extra params, besides level.
*/
SpeechGames.getExtraParams = function() {
var href = window.location.href;
var extraParams = href.substring(href.indexOf('?')).replace('?level='+SpeechGames.LEVEL, '');
if (!extraParams.includes('?microphone') && SpeechGames.speech.listening) {
extraParams += '&?microphone=1';
} else {
if (!SpeechGames.speech.listening) {
extraParams = extraParams.replace('&?microphone=1','');
}
}
if (extraParams[0] != '&') {
extraParams = '&' + extraParams;
}
return extraParams;
}
/**
* Edit toolbox xml.
* @param {$document} document Index of speech games
* @param {array} blockTypes Block types to be included in the toolbox XML
*/
SpeechGames.editToolboxXml_ = function(document, blockTypes) {
var $xmls = document.getElementsByTagName('xml');
var $toolbox = $xmls.toolbox.children;
for(var i = 0; i < $toolbox.length; ) {
if (!blockTypes.includes($toolbox[i].getAttribute('type')))
$toolbox[i].remove();
else
i++;
}
}
/**
* Initializes all of the SpeechGames objects and begins listening.
*/
$(document).ready(function() {
SpeechGames.LEVEL = SpeechGames.getNumberParamFromURL_('level', 1, SpeechGames.MAX_LEVEL);
blockTypes = Turtle.blockTypes[SpeechGames.LEVEL];
SpeechGames.editToolboxXml_(document, blockTypes);
SpeechGames.speech = new SpeechGames.Speech();
SpeechGames.workspace = Blockly.inject('blocklyDiv', {
media: 'lib/google-blockly/media/',
trashcan: false,
scrollbars: false,
toolbox: document.getElementById('toolbox')
});
SpeechGames.controller = new SpeechBlocks.Controller(
SpeechGames.workspace,
SpeechGames.getParameterByName_('animate'));
SpeechGames.interpreter = new SpeechBlocks.Interpreter(SpeechGames.controller, blockTypes); |
if (SpeechGames.getParameterByName_('microphone')) {
SpeechGames.speech.setMicInterval_();
}
if (SpeechGames.getParameterByName_('debug')) {
$('#q').css('visibility','visible');
} else {
$('#debug').hide();
}
// listen for mouse clicks, key presses
$('#q')
.change(SpeechGames.speech.scheduleParse_.bind(SpeechGames.speech))
.mousedown(SpeechGames.speech.scheduleParse_.bind(SpeechGames.speech))
.mouseup(SpeechGames.speech.scheduleParse_.bind(SpeechGames.speech))
.click(SpeechGames.speech.scheduleParse_.bind(SpeechGames.speech))
.keydown(SpeechGames.speech.scheduleParse_.bind(SpeechGames.speech))
.keyup(SpeechGames.speech.scheduleParse_.bind(SpeechGames.speech))
.keypress(SpeechGames.speech.scheduleParse_.bind(SpeechGames.speech));
// if microphone icon clicked
$('#microphone')
.click(SpeechGames.speech.toggleDictation_.bind(SpeechGames.speech));
$("#user-message").hide().text("Awaiting your command!").fadeIn(500);
// }
// $('#runButton').on('click', run);
// $('#showButton').on('click', showCode_);
// $('#debugButton').on('click', function() {
// $('#debug').toggle();
// });
// $('#buttonRow').hide();
$('#levelDescription').text(Turtle.descriptions[SpeechGames.LEVEL]);
}); | random_line_split | |
dcf.py | #!/usr/bin/python
import sys
sys.path.append('/var/www/html/valumodel.com/scripts/dcf')
from calc_dcf import calc_dcf
def create_dcf(req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker):
assumptions = {}
try:
assumptions['Tax Rate'] = float(tax_rate)/100.0
assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0
assumptions['SGA % of sales'] = float(sga_of_sales)/100.0
assumptions['D&A % of sales'] = float(da_of_sales)/100.0
assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0
assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0
assumptions['Levered Beta'] = float(levered_beta)
assumptions['Current Yield'] = float(current_yield)/100.0
assumptions['Exit Multiple'] = float(exit_multiple)
except ValueError:
return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>'
ticker = ticker.split(' ')[0]
if not ticker.isalnum():
|
return calc_dcf(assumptions, ticker.upper())
| return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>' | conditional_block |
dcf.py | #!/usr/bin/python
import sys
sys.path.append('/var/www/html/valumodel.com/scripts/dcf')
from calc_dcf import calc_dcf
def create_dcf(req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker):
assumptions = {}
try:
assumptions['Tax Rate'] = float(tax_rate)/100.0
assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0
assumptions['SGA % of sales'] = float(sga_of_sales)/100.0
assumptions['D&A % of sales'] = float(da_of_sales)/100.0
assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0
assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0 | except ValueError:
return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>'
ticker = ticker.split(' ')[0]
if not ticker.isalnum():
return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>'
return calc_dcf(assumptions, ticker.upper()) | assumptions['Levered Beta'] = float(levered_beta)
assumptions['Current Yield'] = float(current_yield)/100.0
assumptions['Exit Multiple'] = float(exit_multiple) | random_line_split |
dcf.py | #!/usr/bin/python
import sys
sys.path.append('/var/www/html/valumodel.com/scripts/dcf')
from calc_dcf import calc_dcf
def | (req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker):
assumptions = {}
try:
assumptions['Tax Rate'] = float(tax_rate)/100.0
assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0
assumptions['SGA % of sales'] = float(sga_of_sales)/100.0
assumptions['D&A % of sales'] = float(da_of_sales)/100.0
assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0
assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0
assumptions['Levered Beta'] = float(levered_beta)
assumptions['Current Yield'] = float(current_yield)/100.0
assumptions['Exit Multiple'] = float(exit_multiple)
except ValueError:
return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>'
ticker = ticker.split(' ')[0]
if not ticker.isalnum():
return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>'
return calc_dcf(assumptions, ticker.upper())
| create_dcf | identifier_name |
dcf.py | #!/usr/bin/python
import sys
sys.path.append('/var/www/html/valumodel.com/scripts/dcf')
from calc_dcf import calc_dcf
def create_dcf(req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker):
| assumptions = {}
try:
assumptions['Tax Rate'] = float(tax_rate)/100.0
assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0
assumptions['SGA % of sales'] = float(sga_of_sales)/100.0
assumptions['D&A % of sales'] = float(da_of_sales)/100.0
assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0
assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0
assumptions['Levered Beta'] = float(levered_beta)
assumptions['Current Yield'] = float(current_yield)/100.0
assumptions['Exit Multiple'] = float(exit_multiple)
except ValueError:
return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>'
ticker = ticker.split(' ')[0]
if not ticker.isalnum():
return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>'
return calc_dcf(assumptions, ticker.upper()) | identifier_body | |
vai-Vaii.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
const u = undefined;
function plural(n: number): number |
export default [
'vai-Vaii',
[['AM', 'PM'], u, u],
u,
[
['S', 'M', 'T', 'W', 'T', 'F', 'S'],
[
'ꕞꕌꔵ', 'ꗳꗡꘉ', 'ꕚꕞꕚ', 'ꕉꕞꕒ', 'ꕉꔤꕆꕢ', 'ꕉꔤꕀꕮ',
'ꔻꔬꔳ'
],
u, u
],
u,
[
['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'],
[
'ꖨꖕꔞ', 'ꕒꕡ', 'ꕾꖺ', 'ꖢꖕ', 'ꖑꕱ', 'ꖱꘋ', 'ꖱꕞ', 'ꗛꔕ', 'ꕢꕌ',
'ꕭꖃ', 'ꔞꘋ', 'ꖨꖕꗏ'
],
[
'ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ', 'ꕒꕡꖝꖕ', 'ꕾꖺ', 'ꖢꖕ', 'ꖑꕱ', 'ꖱꘋ',
'ꖱꕞꔤ', 'ꗛꔕ', 'ꕢꕌ', 'ꕭꖃ', 'ꔞꘋꕔꕿ ꕸꖃꗏ',
'ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ'
]
],
u,
[['BCE', 'CE'], u, u],
1,
[6, 0],
['dd/MM/y', 'd MMM y', 'd MMMM y', 'EEEE, d MMMM y'],
['h:mm a', 'h:mm:ss a', 'h:mm:ss a z', 'h:mm:ss a zzzz'],
['{1} {0}', u, u, u],
['.', ',', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0%', '¤#,##0.00', '#E0'],
'$',
'ꕞꔤꔫꕩ ꕜꕞꕌ',
{'JPY': ['JP¥', '¥'], 'LRD': ['$'], 'USD': ['US$', '$']},
'ltr',
plural
];
| {
return 5;
} | identifier_body |
vai-Vaii.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
const u = undefined;
function plural(n: number): number {
return 5;
}
export default [
'vai-Vaii',
[['AM', 'PM'], u, u],
u,
[
['S', 'M', 'T', 'W', 'T', 'F', 'S'],
[
'ꕞꕌꔵ', 'ꗳꗡꘉ', 'ꕚꕞꕚ', 'ꕉꕞꕒ', 'ꕉꔤꕆꕢ', 'ꕉꔤꕀꕮ', | u, u
],
u,
[
['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'],
[
'ꖨꖕꔞ', 'ꕒꕡ', 'ꕾꖺ', 'ꖢꖕ', 'ꖑꕱ', 'ꖱꘋ', 'ꖱꕞ', 'ꗛꔕ', 'ꕢꕌ',
'ꕭꖃ', 'ꔞꘋ', 'ꖨꖕꗏ'
],
[
'ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ', 'ꕒꕡꖝꖕ', 'ꕾꖺ', 'ꖢꖕ', 'ꖑꕱ', 'ꖱꘋ',
'ꖱꕞꔤ', 'ꗛꔕ', 'ꕢꕌ', 'ꕭꖃ', 'ꔞꘋꕔꕿ ꕸꖃꗏ',
'ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ'
]
],
u,
[['BCE', 'CE'], u, u],
1,
[6, 0],
['dd/MM/y', 'd MMM y', 'd MMMM y', 'EEEE, d MMMM y'],
['h:mm a', 'h:mm:ss a', 'h:mm:ss a z', 'h:mm:ss a zzzz'],
['{1} {0}', u, u, u],
['.', ',', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0%', '¤#,##0.00', '#E0'],
'$',
'ꕞꔤꔫꕩ ꕜꕞꕌ',
{'JPY': ['JP¥', '¥'], 'LRD': ['$'], 'USD': ['US$', '$']},
'ltr',
plural
]; | 'ꔻꔬꔳ'
], | random_line_split |
vai-Vaii.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
const u = undefined;
function | (n: number): number {
return 5;
}
export default [
'vai-Vaii',
[['AM', 'PM'], u, u],
u,
[
['S', 'M', 'T', 'W', 'T', 'F', 'S'],
[
'ꕞꕌꔵ', 'ꗳꗡꘉ', 'ꕚꕞꕚ', 'ꕉꕞꕒ', 'ꕉꔤꕆꕢ', 'ꕉꔤꕀꕮ',
'ꔻꔬꔳ'
],
u, u
],
u,
[
['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'],
[
'ꖨꖕꔞ', 'ꕒꕡ', 'ꕾꖺ', 'ꖢꖕ', 'ꖑꕱ', 'ꖱꘋ', 'ꖱꕞ', 'ꗛꔕ', 'ꕢꕌ',
'ꕭꖃ', 'ꔞꘋ', 'ꖨꖕꗏ'
],
[
'ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ', 'ꕒꕡꖝꖕ', 'ꕾꖺ', 'ꖢꖕ', 'ꖑꕱ', 'ꖱꘋ',
'ꖱꕞꔤ', 'ꗛꔕ', 'ꕢꕌ', 'ꕭꖃ', 'ꔞꘋꕔꕿ ꕸꖃꗏ',
'ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ'
]
],
u,
[['BCE', 'CE'], u, u],
1,
[6, 0],
['dd/MM/y', 'd MMM y', 'd MMMM y', 'EEEE, d MMMM y'],
['h:mm a', 'h:mm:ss a', 'h:mm:ss a z', 'h:mm:ss a zzzz'],
['{1} {0}', u, u, u],
['.', ',', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0%', '¤#,##0.00', '#E0'],
'$',
'ꕞꔤꔫꕩ ꕜꕞꕌ',
{'JPY': ['JP¥', '¥'], 'LRD': ['$'], 'USD': ['US$', '$']},
'ltr',
plural
];
| plural | identifier_name |
Options.js | var numSaves, _autoSaveChanges;
module("TiddlyWiki options", {
setup: function() {
config.options.chkAutoSave = true;
systemSettingSave = 0;
_autoSaveChanges = autoSaveChanges;
numSaves = 0;
autoSaveChanges = function() {
numSaves += 1;
return _autoSaveChanges.apply(this, arguments);
}
},
teardown: function() {
numSaves = null;
config.options.chkAutoSave = false;
autoSaveChanges = _autoSaveChanges;
}
});
test("save multiple system settings", function() {
saveSystemSetting("foo", true);
saveSystemSetting("foo", false);
saveSystemSetting("foo", true);
strictEqual(numSaves, 0, "The save is asynchronous so no saves have yet been made");
strictEqual(systemSettingSave > 0, true, "However there should be a timeout in progress");
});
}); | jQuery(document).ready(function(){ | random_line_split | |
base.py | # Unix SMB/CIFS implementation. | # the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This provides a wrapper around the cmd interface so that tests can
# easily be built on top of it and have minimal code to run basic tests
# of the commands. A list of the environmental variables can be found in
# ~/selftest/selftest.pl
#
# These can all be accesses via os.environ["VARIBLENAME"] when needed
import random
import string
from samba.auth import system_session
from samba.samdb import SamDB
from cStringIO import StringIO
from samba.netcmd.main import cmd_sambatool
import samba.tests
class SambaToolCmdTest(samba.tests.TestCaseInTempDir):
def getSamDB(self, *argv):
"""a convenience function to get a samdb instance so that we can query it"""
# We build a fake command to get the options created the same
# way the command classes do it. It would be better if the command
# classes had a way to more cleanly do this, but this lets us write
# tests for now
cmd = cmd_sambatool.subcommands["user"].subcommands["setexpiry"]
parser, optiongroups = cmd._create_parser("user")
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest]
kwargs.update(optiongroups)
H = kwargs.get("H", None)
sambaopts = kwargs.get("sambaopts", None)
credopts = kwargs.get("credopts", None)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
return samdb
def runcmd(self, name, *args):
"""run a single level command"""
cmd = cmd_sambatool.subcommands[name]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def runsubcmd(self, name, sub, *args):
"""run a command with sub commands"""
# The reason we need this function separate from runcmd is
# that the .outf StringIO assignment is overriden if we use
# runcmd, so we can't capture stdout and stderr
cmd = cmd_sambatool.subcommands[name].subcommands[sub]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def assertCmdSuccess(self, val, msg=""):
self.assertIsNone(val, msg)
def assertCmdFail(self, val, msg=""):
self.assertIsNotNone(val, msg)
def assertMatch(self, base, string, msg=""):
self.assertTrue(string in base, msg)
def randomName(self, count=8):
"""Create a random name, cap letters and numbers, and always starting with a letter"""
name = random.choice(string.ascii_uppercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 1))
return name
def randomPass(self, count=16):
name = random.choice(string.ascii_uppercase)
name += random.choice(string.digits)
name += random.choice(string.ascii_lowercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 3))
return name
def randomXid(self):
# pick some hopefully unused, high UID/GID range to avoid interference
# from the system the test runs on
xid = random.randint(4711000, 4799000)
return xid
def assertWithin(self, val1, val2, delta, msg=""):
"""Assert that val1 is within delta of val2, useful for time computations"""
self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg) | # Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by | random_line_split |
base.py | # Unix SMB/CIFS implementation.
# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This provides a wrapper around the cmd interface so that tests can
# easily be built on top of it and have minimal code to run basic tests
# of the commands. A list of the environmental variables can be found in
# ~/selftest/selftest.pl
#
# These can all be accesses via os.environ["VARIBLENAME"] when needed
import random
import string
from samba.auth import system_session
from samba.samdb import SamDB
from cStringIO import StringIO
from samba.netcmd.main import cmd_sambatool
import samba.tests
class SambaToolCmdTest(samba.tests.TestCaseInTempDir):
def getSamDB(self, *argv):
"""a convenience function to get a samdb instance so that we can query it"""
# We build a fake command to get the options created the same
# way the command classes do it. It would be better if the command
# classes had a way to more cleanly do this, but this lets us write
# tests for now
cmd = cmd_sambatool.subcommands["user"].subcommands["setexpiry"]
parser, optiongroups = cmd._create_parser("user")
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
|
kwargs.update(optiongroups)
H = kwargs.get("H", None)
sambaopts = kwargs.get("sambaopts", None)
credopts = kwargs.get("credopts", None)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
return samdb
def runcmd(self, name, *args):
"""run a single level command"""
cmd = cmd_sambatool.subcommands[name]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def runsubcmd(self, name, sub, *args):
"""run a command with sub commands"""
# The reason we need this function separate from runcmd is
# that the .outf StringIO assignment is overriden if we use
# runcmd, so we can't capture stdout and stderr
cmd = cmd_sambatool.subcommands[name].subcommands[sub]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def assertCmdSuccess(self, val, msg=""):
self.assertIsNone(val, msg)
def assertCmdFail(self, val, msg=""):
self.assertIsNotNone(val, msg)
def assertMatch(self, base, string, msg=""):
self.assertTrue(string in base, msg)
def randomName(self, count=8):
"""Create a random name, cap letters and numbers, and always starting with a letter"""
name = random.choice(string.ascii_uppercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 1))
return name
def randomPass(self, count=16):
name = random.choice(string.ascii_uppercase)
name += random.choice(string.digits)
name += random.choice(string.ascii_lowercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 3))
return name
def randomXid(self):
# pick some hopefully unused, high UID/GID range to avoid interference
# from the system the test runs on
xid = random.randint(4711000, 4799000)
return xid
def assertWithin(self, val1, val2, delta, msg=""):
"""Assert that val1 is within delta of val2, useful for time computations"""
self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg)
| for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest] | conditional_block |
base.py | # Unix SMB/CIFS implementation.
# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This provides a wrapper around the cmd interface so that tests can
# easily be built on top of it and have minimal code to run basic tests
# of the commands. A list of the environmental variables can be found in
# ~/selftest/selftest.pl
#
# These can all be accesses via os.environ["VARIBLENAME"] when needed
import random
import string
from samba.auth import system_session
from samba.samdb import SamDB
from cStringIO import StringIO
from samba.netcmd.main import cmd_sambatool
import samba.tests
class SambaToolCmdTest(samba.tests.TestCaseInTempDir):
def getSamDB(self, *argv):
"""a convenience function to get a samdb instance so that we can query it"""
# We build a fake command to get the options created the same
# way the command classes do it. It would be better if the command
# classes had a way to more cleanly do this, but this lets us write
# tests for now
cmd = cmd_sambatool.subcommands["user"].subcommands["setexpiry"]
parser, optiongroups = cmd._create_parser("user")
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest]
kwargs.update(optiongroups)
H = kwargs.get("H", None)
sambaopts = kwargs.get("sambaopts", None)
credopts = kwargs.get("credopts", None)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
return samdb
def runcmd(self, name, *args):
"""run a single level command"""
cmd = cmd_sambatool.subcommands[name]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def runsubcmd(self, name, sub, *args):
"""run a command with sub commands"""
# The reason we need this function separate from runcmd is
# that the .outf StringIO assignment is overriden if we use
# runcmd, so we can't capture stdout and stderr
cmd = cmd_sambatool.subcommands[name].subcommands[sub]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def assertCmdSuccess(self, val, msg=""):
self.assertIsNone(val, msg)
def | (self, val, msg=""):
self.assertIsNotNone(val, msg)
def assertMatch(self, base, string, msg=""):
self.assertTrue(string in base, msg)
def randomName(self, count=8):
"""Create a random name, cap letters and numbers, and always starting with a letter"""
name = random.choice(string.ascii_uppercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 1))
return name
def randomPass(self, count=16):
name = random.choice(string.ascii_uppercase)
name += random.choice(string.digits)
name += random.choice(string.ascii_lowercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 3))
return name
def randomXid(self):
# pick some hopefully unused, high UID/GID range to avoid interference
# from the system the test runs on
xid = random.randint(4711000, 4799000)
return xid
def assertWithin(self, val1, val2, delta, msg=""):
"""Assert that val1 is within delta of val2, useful for time computations"""
self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg)
| assertCmdFail | identifier_name |
base.py | # Unix SMB/CIFS implementation.
# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This provides a wrapper around the cmd interface so that tests can
# easily be built on top of it and have minimal code to run basic tests
# of the commands. A list of the environmental variables can be found in
# ~/selftest/selftest.pl
#
# These can all be accesses via os.environ["VARIBLENAME"] when needed
import random
import string
from samba.auth import system_session
from samba.samdb import SamDB
from cStringIO import StringIO
from samba.netcmd.main import cmd_sambatool
import samba.tests
class SambaToolCmdTest(samba.tests.TestCaseInTempDir):
def getSamDB(self, *argv):
"""a convenience function to get a samdb instance so that we can query it"""
# We build a fake command to get the options created the same
# way the command classes do it. It would be better if the command
# classes had a way to more cleanly do this, but this lets us write
# tests for now
cmd = cmd_sambatool.subcommands["user"].subcommands["setexpiry"]
parser, optiongroups = cmd._create_parser("user")
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest]
kwargs.update(optiongroups)
H = kwargs.get("H", None)
sambaopts = kwargs.get("sambaopts", None)
credopts = kwargs.get("credopts", None)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
return samdb
def runcmd(self, name, *args):
"""run a single level command"""
cmd = cmd_sambatool.subcommands[name]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def runsubcmd(self, name, sub, *args):
"""run a command with sub commands"""
# The reason we need this function separate from runcmd is
# that the .outf StringIO assignment is overriden if we use
# runcmd, so we can't capture stdout and stderr
cmd = cmd_sambatool.subcommands[name].subcommands[sub]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def assertCmdSuccess(self, val, msg=""):
self.assertIsNone(val, msg)
def assertCmdFail(self, val, msg=""):
self.assertIsNotNone(val, msg)
def assertMatch(self, base, string, msg=""):
self.assertTrue(string in base, msg)
def randomName(self, count=8):
"""Create a random name, cap letters and numbers, and always starting with a letter"""
name = random.choice(string.ascii_uppercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 1))
return name
def randomPass(self, count=16):
|
def randomXid(self):
# pick some hopefully unused, high UID/GID range to avoid interference
# from the system the test runs on
xid = random.randint(4711000, 4799000)
return xid
def assertWithin(self, val1, val2, delta, msg=""):
"""Assert that val1 is within delta of val2, useful for time computations"""
self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg)
| name = random.choice(string.ascii_uppercase)
name += random.choice(string.digits)
name += random.choice(string.ascii_lowercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 3))
return name | identifier_body |
test_production_order.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, get_datetime
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError
class TestProductionOrder(unittest.TestCase):
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, incoming_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
|
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-02"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-03"
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-04"
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
def test_make_time_log(self):
from erpnext.manufacturing.doctype.production_order.production_order import make_time_log
from frappe.utils import cstr
from frappe.utils import time_diff_in_hours
prod_order = frappe.get_doc({
"doctype": "Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company",
"planned_start_date": "2014-11-25 00:00:00"
})
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \
d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty,
operation_id=d.name)
self.assertEqual(prod_order.name, time_log.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty)
self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours)
time_log.save()
time_log.submit()
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time))
self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time))
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_log.cancel()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
time_log2 = frappe.copy_doc(time_log)
time_log2.update({
"completed_qty": 10,
"from_time": "2014-11-26 00:00:00",
"to_time": "2014-11-26 00:00:00",
"docstatus": 0
})
self.assertRaises(OverProductionLoggedError, time_log2.save)
test_records = frappe.get_test_records('Production Order')
| d.s_warehouse = "Stores - _TC" | conditional_block |
test_production_order.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, get_datetime
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError
class TestProductionOrder(unittest.TestCase):
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, incoming_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-02"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-03"
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
|
def test_make_time_log(self):
from erpnext.manufacturing.doctype.production_order.production_order import make_time_log
from frappe.utils import cstr
from frappe.utils import time_diff_in_hours
prod_order = frappe.get_doc({
"doctype": "Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company",
"planned_start_date": "2014-11-25 00:00:00"
})
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \
d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty,
operation_id=d.name)
self.assertEqual(prod_order.name, time_log.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty)
self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours)
time_log.save()
time_log.submit()
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time))
self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time))
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_log.cancel()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
time_log2 = frappe.copy_doc(time_log)
time_log2.update({
"completed_qty": 10,
"from_time": "2014-11-26 00:00:00",
"to_time": "2014-11-26 00:00:00",
"docstatus": 0
})
self.assertRaises(OverProductionLoggedError, time_log2.save)
test_records = frappe.get_test_records('Production Order')
| from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-04"
s.insert()
self.assertRaises(StockOverProductionError, s.submit) | identifier_body |
test_production_order.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, get_datetime
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError
class TestProductionOrder(unittest.TestCase):
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, incoming_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-02"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-03"
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-04"
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
def test_make_time_log(self):
from erpnext.manufacturing.doctype.production_order.production_order import make_time_log
from frappe.utils import cstr
from frappe.utils import time_diff_in_hours
prod_order = frappe.get_doc({
"doctype": "Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company",
"planned_start_date": "2014-11-25 00:00:00"
})
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \
d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty,
operation_id=d.name)
self.assertEqual(prod_order.name, time_log.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty)
self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours)
time_log.save()
time_log.submit()
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time))
self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time)) |
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
time_log2 = frappe.copy_doc(time_log)
time_log2.update({
"completed_qty": 10,
"from_time": "2014-11-26 00:00:00",
"to_time": "2014-11-26 00:00:00",
"docstatus": 0
})
self.assertRaises(OverProductionLoggedError, time_log2.save)
test_records = frappe.get_test_records('Production Order') |
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_log.cancel() | random_line_split |
test_production_order.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, get_datetime
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError
class TestProductionOrder(unittest.TestCase):
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, incoming_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-02"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-03"
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-04"
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
def | (self):
from erpnext.manufacturing.doctype.production_order.production_order import make_time_log
from frappe.utils import cstr
from frappe.utils import time_diff_in_hours
prod_order = frappe.get_doc({
"doctype": "Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company",
"planned_start_date": "2014-11-25 00:00:00"
})
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \
d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty,
operation_id=d.name)
self.assertEqual(prod_order.name, time_log.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty)
self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours)
time_log.save()
time_log.submit()
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time))
self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time))
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_log.cancel()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
time_log2 = frappe.copy_doc(time_log)
time_log2.update({
"completed_qty": 10,
"from_time": "2014-11-26 00:00:00",
"to_time": "2014-11-26 00:00:00",
"docstatus": 0
})
self.assertRaises(OverProductionLoggedError, time_log2.save)
test_records = frappe.get_test_records('Production Order')
| test_make_time_log | identifier_name |
fixed_length_vec_glue.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | // xfail-fast: check-fast screws up repr paths
use std::repr;
struct Struc { a: u8, b: [int, ..3], c: int }
pub fn main() {
let arr = [1,2,3];
let struc = Struc {a: 13u8, b: arr, c: 42};
let s = repr::repr_to_str(&struc);
assert_eq!(s, ~"Struc{a: 13u8, b: [1, 2, 3], c: 42}");
} | // option. This file may not be copied, modified, or distributed
// except according to those terms.
| random_line_split |
fixed_length_vec_glue.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast: check-fast screws up repr paths
use std::repr;
struct | { a: u8, b: [int, ..3], c: int }
pub fn main() {
let arr = [1,2,3];
let struc = Struc {a: 13u8, b: arr, c: 42};
let s = repr::repr_to_str(&struc);
assert_eq!(s, ~"Struc{a: 13u8, b: [1, 2, 3], c: 42}");
}
| Struc | identifier_name |
fixed_length_vec_glue.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast: check-fast screws up repr paths
use std::repr;
struct Struc { a: u8, b: [int, ..3], c: int }
pub fn main() | {
let arr = [1,2,3];
let struc = Struc {a: 13u8, b: arr, c: 42};
let s = repr::repr_to_str(&struc);
assert_eq!(s, ~"Struc{a: 13u8, b: [1, 2, 3], c: 42}");
} | identifier_body | |
setup.py | try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='QSTK',
version='0.1',
description='Python toolkit for stocks',
author='Tucker Balch',
author_email='tucker@cc.gatech.edu',
url='http://wiki.quantsoftware.org/',
# On ubuntu, you need to install these:
# sudo apt-get install libfreetype6-dev libpng-dev
# before you can get matplotlib to build
install_requires=[
'nose==1.2.1',
'numpy==1.6.2', | 'pytz==2012h',
'pandas==0.9.0',
'matplotlib==1.1.1',
'epydoc==3.0.1',
],
tests_require=[
'nose==1.2.1',
'coverage==3.5.3',
'pylint==0.26.0',
'pep8==1.3.3',
'pyflakes==0.5.0',
],
setup_requires=[],
packages=find_packages(exclude=['tests']),
include_package_data=True,
test_suite='nose.collector',
zip_safe=False,
) | 'python-dateutil==2.1', | random_line_split |
mediapipe_track_validator.py | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Validates generated tracks and filters out invalidated tracks."""
import dataclasses
from automl_video_ondevice.types import ObjectTrackingAnnotation
def calculate_iou(bbox1, bbox2):
"""Calculates the Intersection-Over-Union of two bounding boxes.
IOU is a ratio for determining how much two bounding boxes match.
Args:
bbox1: The first bounding box.
bbox2: The bounding box to compare with.
Returns:
The IOU as a float from 0.0 to 1.0 (1.0 being a perfect match.)
"""
x_overlap = max(0,
min(bbox1.right, bbox2.right) - max(bbox1.left, bbox2.left))
y_overlap = max(0,
min(bbox1.bottom, bbox2.bottom) - max(bbox1.top, bbox2.top))
intersection = x_overlap * y_overlap
area1 = (bbox1.right - bbox1.left) * (bbox1.bottom - bbox1.top)
area2 = (bbox2.right - bbox2.left) * (bbox2.bottom - bbox2.top)
union = area1 + area2 - intersection
return intersection / union
@dataclasses.dataclass
class TrackWrapper:
"""Wraps the tracking annotation with data only relevant to the validator."""
track: ObjectTrackingAnnotation
age: int # How old this track is.
staleness: int # How many iterations it has been since the last detection.
class MediaPipeTrackValidator:
"""Checks if an annotation is valid by measuring staleness.
Each track is stored and aged. If a track box intersects with a new detection,
then consider it an associated detection, and mark the track as fresh.
If a track exists for longer than allowed_staleness, then filter the track out
then tell the mediapipe graph to delete.
"""
def __init__(self, allowed_staleness=10, min_iou=0.6):
"""Constructor for MediaPipeTrackValidator.
Args:
allowed_staleness: How many updates the track can linger for until it is
determined to be stale.
min_iou: How much the detection box must match a tracked box to be
determined as an associated detection.
"""
self._track_map = {}
self._allowed_staleness = allowed_staleness
self._min_iou = min_iou
def forget_unmanaged_tracks(self, managed_tracks):
"""Removes unmanaged tracks from the validator's cache.
The mediapipe graph has it's own track manager and validator. This manager
also removes tracks automatically.
Unmanaged tracks are tracks no longer outputted by the mediapipe graph.
This makes sure that this validator is in sync with the mediapipe track
manager.
Args:
managed_tracks:
"""
managed_ids = list(map(lambda t: t.track_id, managed_tracks))
keys = list(self._track_map.keys())
for idx in keys:
if idx not in managed_ids:
del self._track_map[idx]
def update_tracks(self, managed_tracks):
"""Updates tracks stored in the validator with new tracking data.
Also adds new tracks if the validator does not know about the track yet.
Args:
managed_tracks: Tracks managed by mediapipe.
"""
for track in managed_tracks:
if track.track_id in self._track_map:
registered_track = self._track_map[track.track_id]
registered_track.track = track
else:
|
def age_tracks(self):
"""Increases every tracks' age as well as staleness."""
for track in self._track_map.values():
track.age = track.age + 1
track.staleness = track.staleness + 1
def reset_tracks_with_detections(self, detections):
"""Resets the staleness of tracks if there are associated detections.
Args:
detections: List of raw detections created from inferencing.
"""
for detection in detections:
max_iou = 0
associated_track = None
for track in self._track_map.values():
iou = calculate_iou(detection.bbox, track.track.bbox)
if iou > max_iou and iou > self._min_iou:
max_iou = iou
associated_track = track
if associated_track is not None:
associated_track.staleness = 0
def process(self, detections, managed_tracks):
"""Given detections and predicted tracks, calculates what tracks are stale.
Args:
detections: Raw detections generated by inferencing.
managed_tracks: Tracks to be validated, generated by mediapipe.
Returns:
Tuple where the first member is the filtered tracks, and the second is
list of track id's to be cancelled.
"""
self.forget_unmanaged_tracks(managed_tracks)
self.update_tracks(managed_tracks)
self.age_tracks()
self.reset_tracks_with_detections(detections)
healthy_tracks = filter(lambda t: t.staleness <= self._allowed_staleness,
self._track_map.values())
unwrapped_tracks = list(map(lambda t: t.track, healthy_tracks))
cancelled_tracks = filter(lambda t: t.staleness > self._allowed_staleness,
self._track_map.values())
unwrapped_cancelled_tracks = list(
map(lambda t: t.track.track_id, cancelled_tracks))
return (unwrapped_tracks, unwrapped_cancelled_tracks)
| new_track = TrackWrapper(
track=track,
age=0,
staleness=0,
)
self._track_map[track.track_id] = new_track | conditional_block |
mediapipe_track_validator.py | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Validates generated tracks and filters out invalidated tracks."""
import dataclasses
from automl_video_ondevice.types import ObjectTrackingAnnotation
def calculate_iou(bbox1, bbox2):
"""Calculates the Intersection-Over-Union of two bounding boxes.
IOU is a ratio for determining how much two bounding boxes match.
Args:
bbox1: The first bounding box.
bbox2: The bounding box to compare with.
Returns:
The IOU as a float from 0.0 to 1.0 (1.0 being a perfect match.)
"""
x_overlap = max(0,
min(bbox1.right, bbox2.right) - max(bbox1.left, bbox2.left))
y_overlap = max(0,
min(bbox1.bottom, bbox2.bottom) - max(bbox1.top, bbox2.top))
intersection = x_overlap * y_overlap
area1 = (bbox1.right - bbox1.left) * (bbox1.bottom - bbox1.top)
area2 = (bbox2.right - bbox2.left) * (bbox2.bottom - bbox2.top)
union = area1 + area2 - intersection
return intersection / union
@dataclasses.dataclass
class TrackWrapper:
|
class MediaPipeTrackValidator:
"""Checks if an annotation is valid by measuring staleness.
Each track is stored and aged. If a track box intersects with a new detection,
then consider it an associated detection, and mark the track as fresh.
If a track exists for longer than allowed_staleness, then filter the track out
then tell the mediapipe graph to delete.
"""
def __init__(self, allowed_staleness=10, min_iou=0.6):
"""Constructor for MediaPipeTrackValidator.
Args:
allowed_staleness: How many updates the track can linger for until it is
determined to be stale.
min_iou: How much the detection box must match a tracked box to be
determined as an associated detection.
"""
self._track_map = {}
self._allowed_staleness = allowed_staleness
self._min_iou = min_iou
def forget_unmanaged_tracks(self, managed_tracks):
"""Removes unmanaged tracks from the validator's cache.
The mediapipe graph has it's own track manager and validator. This manager
also removes tracks automatically.
Unmanaged tracks are tracks no longer outputted by the mediapipe graph.
This makes sure that this validator is in sync with the mediapipe track
manager.
Args:
managed_tracks:
"""
managed_ids = list(map(lambda t: t.track_id, managed_tracks))
keys = list(self._track_map.keys())
for idx in keys:
if idx not in managed_ids:
del self._track_map[idx]
def update_tracks(self, managed_tracks):
"""Updates tracks stored in the validator with new tracking data.
Also adds new tracks if the validator does not know about the track yet.
Args:
managed_tracks: Tracks managed by mediapipe.
"""
for track in managed_tracks:
if track.track_id in self._track_map:
registered_track = self._track_map[track.track_id]
registered_track.track = track
else:
new_track = TrackWrapper(
track=track,
age=0,
staleness=0,
)
self._track_map[track.track_id] = new_track
def age_tracks(self):
"""Increases every tracks' age as well as staleness."""
for track in self._track_map.values():
track.age = track.age + 1
track.staleness = track.staleness + 1
def reset_tracks_with_detections(self, detections):
"""Resets the staleness of tracks if there are associated detections.
Args:
detections: List of raw detections created from inferencing.
"""
for detection in detections:
max_iou = 0
associated_track = None
for track in self._track_map.values():
iou = calculate_iou(detection.bbox, track.track.bbox)
if iou > max_iou and iou > self._min_iou:
max_iou = iou
associated_track = track
if associated_track is not None:
associated_track.staleness = 0
def process(self, detections, managed_tracks):
"""Given detections and predicted tracks, calculates what tracks are stale.
Args:
detections: Raw detections generated by inferencing.
managed_tracks: Tracks to be validated, generated by mediapipe.
Returns:
Tuple where the first member is the filtered tracks, and the second is
list of track id's to be cancelled.
"""
self.forget_unmanaged_tracks(managed_tracks)
self.update_tracks(managed_tracks)
self.age_tracks()
self.reset_tracks_with_detections(detections)
healthy_tracks = filter(lambda t: t.staleness <= self._allowed_staleness,
self._track_map.values())
unwrapped_tracks = list(map(lambda t: t.track, healthy_tracks))
cancelled_tracks = filter(lambda t: t.staleness > self._allowed_staleness,
self._track_map.values())
unwrapped_cancelled_tracks = list(
map(lambda t: t.track.track_id, cancelled_tracks))
return (unwrapped_tracks, unwrapped_cancelled_tracks)
| """Wraps the tracking annotation with data only relevant to the validator."""
track: ObjectTrackingAnnotation
age: int # How old this track is.
staleness: int # How many iterations it has been since the last detection. | identifier_body |
mediapipe_track_validator.py | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Validates generated tracks and filters out invalidated tracks."""
import dataclasses
from automl_video_ondevice.types import ObjectTrackingAnnotation
def | (bbox1, bbox2):
"""Calculates the Intersection-Over-Union of two bounding boxes.
IOU is a ratio for determining how much two bounding boxes match.
Args:
bbox1: The first bounding box.
bbox2: The bounding box to compare with.
Returns:
The IOU as a float from 0.0 to 1.0 (1.0 being a perfect match.)
"""
x_overlap = max(0,
min(bbox1.right, bbox2.right) - max(bbox1.left, bbox2.left))
y_overlap = max(0,
min(bbox1.bottom, bbox2.bottom) - max(bbox1.top, bbox2.top))
intersection = x_overlap * y_overlap
area1 = (bbox1.right - bbox1.left) * (bbox1.bottom - bbox1.top)
area2 = (bbox2.right - bbox2.left) * (bbox2.bottom - bbox2.top)
union = area1 + area2 - intersection
return intersection / union
@dataclasses.dataclass
class TrackWrapper:
"""Wraps the tracking annotation with data only relevant to the validator."""
track: ObjectTrackingAnnotation
age: int # How old this track is.
staleness: int # How many iterations it has been since the last detection.
class MediaPipeTrackValidator:
"""Checks if an annotation is valid by measuring staleness.
Each track is stored and aged. If a track box intersects with a new detection,
then consider it an associated detection, and mark the track as fresh.
If a track exists for longer than allowed_staleness, then filter the track out
then tell the mediapipe graph to delete.
"""
def __init__(self, allowed_staleness=10, min_iou=0.6):
"""Constructor for MediaPipeTrackValidator.
Args:
allowed_staleness: How many updates the track can linger for until it is
determined to be stale.
min_iou: How much the detection box must match a tracked box to be
determined as an associated detection.
"""
self._track_map = {}
self._allowed_staleness = allowed_staleness
self._min_iou = min_iou
def forget_unmanaged_tracks(self, managed_tracks):
"""Removes unmanaged tracks from the validator's cache.
The mediapipe graph has it's own track manager and validator. This manager
also removes tracks automatically.
Unmanaged tracks are tracks no longer outputted by the mediapipe graph.
This makes sure that this validator is in sync with the mediapipe track
manager.
Args:
managed_tracks:
"""
managed_ids = list(map(lambda t: t.track_id, managed_tracks))
keys = list(self._track_map.keys())
for idx in keys:
if idx not in managed_ids:
del self._track_map[idx]
def update_tracks(self, managed_tracks):
"""Updates tracks stored in the validator with new tracking data.
Also adds new tracks if the validator does not know about the track yet.
Args:
managed_tracks: Tracks managed by mediapipe.
"""
for track in managed_tracks:
if track.track_id in self._track_map:
registered_track = self._track_map[track.track_id]
registered_track.track = track
else:
new_track = TrackWrapper(
track=track,
age=0,
staleness=0,
)
self._track_map[track.track_id] = new_track
def age_tracks(self):
"""Increases every tracks' age as well as staleness."""
for track in self._track_map.values():
track.age = track.age + 1
track.staleness = track.staleness + 1
def reset_tracks_with_detections(self, detections):
"""Resets the staleness of tracks if there are associated detections.
Args:
detections: List of raw detections created from inferencing.
"""
for detection in detections:
max_iou = 0
associated_track = None
for track in self._track_map.values():
iou = calculate_iou(detection.bbox, track.track.bbox)
if iou > max_iou and iou > self._min_iou:
max_iou = iou
associated_track = track
if associated_track is not None:
associated_track.staleness = 0
def process(self, detections, managed_tracks):
"""Given detections and predicted tracks, calculates what tracks are stale.
Args:
detections: Raw detections generated by inferencing.
managed_tracks: Tracks to be validated, generated by mediapipe.
Returns:
Tuple where the first member is the filtered tracks, and the second is
list of track id's to be cancelled.
"""
self.forget_unmanaged_tracks(managed_tracks)
self.update_tracks(managed_tracks)
self.age_tracks()
self.reset_tracks_with_detections(detections)
healthy_tracks = filter(lambda t: t.staleness <= self._allowed_staleness,
self._track_map.values())
unwrapped_tracks = list(map(lambda t: t.track, healthy_tracks))
cancelled_tracks = filter(lambda t: t.staleness > self._allowed_staleness,
self._track_map.values())
unwrapped_cancelled_tracks = list(
map(lambda t: t.track.track_id, cancelled_tracks))
return (unwrapped_tracks, unwrapped_cancelled_tracks)
| calculate_iou | identifier_name |
mediapipe_track_validator.py | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Validates generated tracks and filters out invalidated tracks."""
import dataclasses
from automl_video_ondevice.types import ObjectTrackingAnnotation
def calculate_iou(bbox1, bbox2):
"""Calculates the Intersection-Over-Union of two bounding boxes.
IOU is a ratio for determining how much two bounding boxes match.
Args:
bbox1: The first bounding box.
bbox2: The bounding box to compare with.
Returns:
The IOU as a float from 0.0 to 1.0 (1.0 being a perfect match.)
"""
x_overlap = max(0,
min(bbox1.right, bbox2.right) - max(bbox1.left, bbox2.left))
y_overlap = max(0,
min(bbox1.bottom, bbox2.bottom) - max(bbox1.top, bbox2.top))
intersection = x_overlap * y_overlap
area1 = (bbox1.right - bbox1.left) * (bbox1.bottom - bbox1.top)
area2 = (bbox2.right - bbox2.left) * (bbox2.bottom - bbox2.top)
union = area1 + area2 - intersection
return intersection / union
@dataclasses.dataclass
class TrackWrapper:
"""Wraps the tracking annotation with data only relevant to the validator."""
track: ObjectTrackingAnnotation
age: int # How old this track is.
staleness: int # How many iterations it has been since the last detection.
class MediaPipeTrackValidator:
"""Checks if an annotation is valid by measuring staleness.
Each track is stored and aged. If a track box intersects with a new detection,
then consider it an associated detection, and mark the track as fresh.
If a track exists for longer than allowed_staleness, then filter the track out
then tell the mediapipe graph to delete.
"""
def __init__(self, allowed_staleness=10, min_iou=0.6):
"""Constructor for MediaPipeTrackValidator.
Args:
allowed_staleness: How many updates the track can linger for until it is
determined to be stale.
min_iou: How much the detection box must match a tracked box to be
determined as an associated detection.
"""
self._track_map = {}
self._allowed_staleness = allowed_staleness
self._min_iou = min_iou
def forget_unmanaged_tracks(self, managed_tracks):
"""Removes unmanaged tracks from the validator's cache.
The mediapipe graph has it's own track manager and validator. This manager
also removes tracks automatically.
Unmanaged tracks are tracks no longer outputted by the mediapipe graph.
This makes sure that this validator is in sync with the mediapipe track
manager.
Args:
managed_tracks:
"""
managed_ids = list(map(lambda t: t.track_id, managed_tracks))
keys = list(self._track_map.keys())
for idx in keys:
if idx not in managed_ids:
del self._track_map[idx]
def update_tracks(self, managed_tracks):
"""Updates tracks stored in the validator with new tracking data.
Also adds new tracks if the validator does not know about the track yet.
Args:
managed_tracks: Tracks managed by mediapipe.
"""
for track in managed_tracks:
if track.track_id in self._track_map:
registered_track = self._track_map[track.track_id]
registered_track.track = track
else:
new_track = TrackWrapper(
track=track,
age=0,
staleness=0,
)
self._track_map[track.track_id] = new_track
def age_tracks(self):
"""Increases every tracks' age as well as staleness."""
for track in self._track_map.values():
track.age = track.age + 1
track.staleness = track.staleness + 1
def reset_tracks_with_detections(self, detections):
"""Resets the staleness of tracks if there are associated detections.
Args:
detections: List of raw detections created from inferencing.
"""
for detection in detections:
max_iou = 0
associated_track = None
for track in self._track_map.values():
iou = calculate_iou(detection.bbox, track.track.bbox)
if iou > max_iou and iou > self._min_iou:
max_iou = iou
associated_track = track
if associated_track is not None:
associated_track.staleness = 0
def process(self, detections, managed_tracks):
"""Given detections and predicted tracks, calculates what tracks are stale.
Args:
detections: Raw detections generated by inferencing.
managed_tracks: Tracks to be validated, generated by mediapipe.
Returns:
Tuple where the first member is the filtered tracks, and the second is
list of track id's to be cancelled.
"""
self.forget_unmanaged_tracks(managed_tracks) | self.age_tracks()
self.reset_tracks_with_detections(detections)
healthy_tracks = filter(lambda t: t.staleness <= self._allowed_staleness,
self._track_map.values())
unwrapped_tracks = list(map(lambda t: t.track, healthy_tracks))
cancelled_tracks = filter(lambda t: t.staleness > self._allowed_staleness,
self._track_map.values())
unwrapped_cancelled_tracks = list(
map(lambda t: t.track.track_id, cancelled_tracks))
return (unwrapped_tracks, unwrapped_cancelled_tracks) | self.update_tracks(managed_tracks) | random_line_split |
hypergeometric.rs | extern crate rand;
use crate::distribs::distribution::*;
use crate::util::math::*;
#[allow(non_snake_case)]
#[allow(dead_code)]
pub struct Hypergeometric {
N: u64,
m: u64,
n: u64,
}
#[allow(dead_code)]
impl Hypergeometric {
pub fn new(total: u64, n_different: u64, n_picked: u64) -> Hypergeometric {
Hypergeometric {
N: total,
m: n_different,
n: n_picked,
}
}
}
impl Distribution<u64> for Hypergeometric {
fn sample(&self) -> RandomVariable<u64> {
let prob = rand::random::<f64>();
let low_lim = if self.n < self.m { self.n } else { self.m };
let mut cum_prob: f64 = 0.0f64;
let mut k: u64 = 0u64;
for _ in 0..low_lim {
cum_prob += self.pdf(k);
if cum_prob > prob {
break;
}
k += 1
}
RandomVariable { value: Cell::new(k) }
}
fn mu(&self) -> f64 {
((self.n * self.m) as f64) / (self.N as f64)
}
fn sigma(&self) -> f64 {
let mean = self.mu();
let failure = ((self.N - self.m) as f64) / (self.N as f64);
let remaining = ((self.N - self.n) as f64) / ((self.N - 1) as f64);
(mean * failure * remaining).sqrt()
}
fn pdf(&self, x: u64) -> f64 {
binomial_coeff(self.m, x) * binomial_coeff(self.N - self.m, self.N - x) /
binomial_coeff(self.N, self.n)
}
fn cdf(&self, x: u64) -> f64 |
}
| {
(0..x).fold(0.0f64, |sum, next| sum + self.pdf(next))
} | identifier_body |
hypergeometric.rs | extern crate rand;
use crate::distribs::distribution::*;
use crate::util::math::*;
#[allow(non_snake_case)]
#[allow(dead_code)]
pub struct Hypergeometric {
N: u64,
m: u64,
n: u64,
}
#[allow(dead_code)]
impl Hypergeometric {
pub fn new(total: u64, n_different: u64, n_picked: u64) -> Hypergeometric {
Hypergeometric {
N: total,
m: n_different,
n: n_picked,
}
}
}
impl Distribution<u64> for Hypergeometric {
fn sample(&self) -> RandomVariable<u64> {
let prob = rand::random::<f64>();
let low_lim = if self.n < self.m { self.n } else { self.m };
let mut cum_prob: f64 = 0.0f64;
let mut k: u64 = 0u64;
for _ in 0..low_lim {
cum_prob += self.pdf(k);
if cum_prob > prob {
break;
}
k += 1
}
RandomVariable { value: Cell::new(k) }
}
fn mu(&self) -> f64 {
((self.n * self.m) as f64) / (self.N as f64)
}
fn sigma(&self) -> f64 {
let mean = self.mu();
let failure = ((self.N - self.m) as f64) / (self.N as f64);
let remaining = ((self.N - self.n) as f64) / ((self.N - 1) as f64);
(mean * failure * remaining).sqrt() | }
fn cdf(&self, x: u64) -> f64 {
(0..x).fold(0.0f64, |sum, next| sum + self.pdf(next))
}
} | }
fn pdf(&self, x: u64) -> f64 {
binomial_coeff(self.m, x) * binomial_coeff(self.N - self.m, self.N - x) /
binomial_coeff(self.N, self.n) | random_line_split |
hypergeometric.rs | extern crate rand;
use crate::distribs::distribution::*;
use crate::util::math::*;
#[allow(non_snake_case)]
#[allow(dead_code)]
pub struct Hypergeometric {
N: u64,
m: u64,
n: u64,
}
#[allow(dead_code)]
impl Hypergeometric {
pub fn new(total: u64, n_different: u64, n_picked: u64) -> Hypergeometric {
Hypergeometric {
N: total,
m: n_different,
n: n_picked,
}
}
}
impl Distribution<u64> for Hypergeometric {
fn sample(&self) -> RandomVariable<u64> {
let prob = rand::random::<f64>();
let low_lim = if self.n < self.m { self.n } else | ;
let mut cum_prob: f64 = 0.0f64;
let mut k: u64 = 0u64;
for _ in 0..low_lim {
cum_prob += self.pdf(k);
if cum_prob > prob {
break;
}
k += 1
}
RandomVariable { value: Cell::new(k) }
}
fn mu(&self) -> f64 {
((self.n * self.m) as f64) / (self.N as f64)
}
fn sigma(&self) -> f64 {
let mean = self.mu();
let failure = ((self.N - self.m) as f64) / (self.N as f64);
let remaining = ((self.N - self.n) as f64) / ((self.N - 1) as f64);
(mean * failure * remaining).sqrt()
}
fn pdf(&self, x: u64) -> f64 {
binomial_coeff(self.m, x) * binomial_coeff(self.N - self.m, self.N - x) /
binomial_coeff(self.N, self.n)
}
fn cdf(&self, x: u64) -> f64 {
(0..x).fold(0.0f64, |sum, next| sum + self.pdf(next))
}
}
| { self.m } | conditional_block |
hypergeometric.rs | extern crate rand;
use crate::distribs::distribution::*;
use crate::util::math::*;
#[allow(non_snake_case)]
#[allow(dead_code)]
pub struct Hypergeometric {
N: u64,
m: u64,
n: u64,
}
#[allow(dead_code)]
impl Hypergeometric {
pub fn new(total: u64, n_different: u64, n_picked: u64) -> Hypergeometric {
Hypergeometric {
N: total,
m: n_different,
n: n_picked,
}
}
}
impl Distribution<u64> for Hypergeometric {
fn sample(&self) -> RandomVariable<u64> {
let prob = rand::random::<f64>();
let low_lim = if self.n < self.m { self.n } else { self.m };
let mut cum_prob: f64 = 0.0f64;
let mut k: u64 = 0u64;
for _ in 0..low_lim {
cum_prob += self.pdf(k);
if cum_prob > prob {
break;
}
k += 1
}
RandomVariable { value: Cell::new(k) }
}
fn mu(&self) -> f64 {
((self.n * self.m) as f64) / (self.N as f64)
}
fn | (&self) -> f64 {
let mean = self.mu();
let failure = ((self.N - self.m) as f64) / (self.N as f64);
let remaining = ((self.N - self.n) as f64) / ((self.N - 1) as f64);
(mean * failure * remaining).sqrt()
}
fn pdf(&self, x: u64) -> f64 {
binomial_coeff(self.m, x) * binomial_coeff(self.N - self.m, self.N - x) /
binomial_coeff(self.N, self.n)
}
fn cdf(&self, x: u64) -> f64 {
(0..x).fold(0.0f64, |sum, next| sum + self.pdf(next))
}
}
| sigma | identifier_name |
ja.js | OC.L10N.register(
"notifications",
{
"Notifications" : "通知",
"No notifications" : "通知なし",
"Dismiss" : "閉じる",
"Admin notifications" : "管理者向けの通知",
"Unknown user session. It is not possible to set the option" : "不明なユーザーセッションです。オプションを設定することはできません。",
"Option not supported" : "サポートされていないオプションです",
"Saved" : "保存されました",
"Incomplete data" : "不正なデータです",
"Do not notify via mail" : "メール通知しない",
"Notify only when an action is required" : "アクションが必要な時にのみ通知する",
"Notify about all events" : "すべてのイベントについて通知する",
"Choose an option" : "オプションを選択してください", | "You can choose to be notified about events via mail. Some events are informative, others require an action (like accept/decline). Select your preference below:" : "メールでイベントについての通知を受け取ることができます。イベントの中には有益なものや、(受け入れ・拒否などの)アクションが必要なものがあります。以下より選択してください:",
"It was not possible to get your session. Please, try reloading the page or logout and login again" : "あなたのセッションを取得できませんでした。ページをリロードするかログアウト後の再ログインを試してください",
"To be able to receive mail notifications it is required to specify an email address for your account." : "メール通知を受信するためには、アカウントのメールアドレスを設定する必要があります。"
},
"nplurals=1; plural=0;"); | "Hello," : "こんにちは、",
"See <a href=\"%s\">%s</a> on %s for more information" : "詳細は <a href=\"%s\">%s</a> ( %s 内)を参照してください",
"See %s on %s for more information" : "詳細は %s (%s 内)を参照してください",
"Mail Notifications" : "メール通知", | random_line_split |
graph.rs | //! A container for audio devices in an acyclic graph.
//!
//! A graph can be used when many audio devices need to connect in complex
//! topologies. It can connect each output channel of a device to any input
//! channel, provided that connection does not create a cycle.
//!
//! A graph is initialized by adding each device as a node in the graph, and
//! then specifying the edges between devices. The graph will automatically
//! process the devices in order of their dependencies.
//!
//! # Example
//!
//! The following example creates a graph with two different branches into
//! a stereo output. It feeds the micropgone to the left channel, and
//! a low-passed oscillator into the right channel.
//!
//! ```no_run
//! use oxcable::filters::first_order::{Filter, LowPass};
//! use oxcable::graph::{DeviceGraph, Tick};
//! use oxcable::io::audio::AudioEngine;
//! use oxcable::oscillator::*;
//!
//! let engine = AudioEngine::with_buffer_size(256).unwrap();
//! let mut graph = DeviceGraph::new();
//!
//! // Add nodes to graph
//! let microphone = graph.add_node(engine.default_input(1).unwrap());
//! let oscillator = graph.add_node(Oscillator::new(Sine).freq(440.0));
//! let filter = graph.add_node(Filter::new(LowPass(8000f32), 1));
//! let speaker = graph.add_node(engine.default_output(2).unwrap());
//!
//! // Connect devices together
//! graph.add_edge(microphone, 0, speaker, 0);
//! graph.add_edge(oscillator, 0, filter, 0);
//! graph.add_edge(filter, 0, speaker, 1);
//!
//! // Play audio ad nauseam.
//! graph.tick_forever();
//! ```
use std::collections::VecDeque;
use error::{Error, Result};
use types::{AudioDevice, Sample, Time};
pub use tick::Tick;
/// An acyclic graph for audio devices.
pub struct DeviceGraph {
nodes: Vec<AudioNode>, // the actual nodes
topology: Vec<usize>, // the order to tick the nodes
bus: Vec<Sample>, // the audio bus to write samples to
time: Time // the next timestep
}
impl DeviceGraph {
/// Creates an empty graph.
pub fn new() -> Self {
DeviceGraph {
nodes: Vec::new(),
topology: Vec::new(),
bus: Vec::new(),
time: 0
}
}
/// Adds a new device into the graph, with no connections. Returns
/// a identifier that refers back to this device.
pub fn add_node<D>(&mut self, device: D) -> AudioNodeIdx
where D: 'static+AudioDevice {
let node = AudioNode::new(device, &mut self.bus);
let idx = self.nodes.len();
self.nodes.push(node);
self.topology.push(idx);
AudioNodeIdx(idx)
}
/// Connects two devices in the graph.
///
/// * `src` and `dest` are identifiers for the actual devices to connect.
/// * `src_ch` and `dest_ch` are the channel indices of the two devices.
///
/// If invalid indices are provided, or if the specified edge would create
/// a cycle in the graph, an Err is returned and no changes dest the graph are
/// made.
pub fn add_edge(&mut self, src: AudioNodeIdx, src_ch: usize,
dest: AudioNodeIdx, dest_ch: usize) -> Result<()> {
// Check device indices
let AudioNodeIdx(src_i) = src;
let AudioNodeIdx(dest_i) = dest;
if src_i >= self.nodes.len() {
return Err(Error::OutOfRange("src"));
} else if dest_i >= self.nodes.len() {
return Err(Error::OutOfRange("dest"));
}
// Check channels
if self.nodes[src_i].device.num_outputs() <= src_ch {
return Err(Error::OutOfRange("src_ch"));
}
if self.nodes[dest_i].device.num_inputs() <= dest_ch {
return Err(Error::OutOfRange("dest_ch"));
}
while self.nodes[dest_i].inputs.len() < dest_ch {
self.nodes[dest_i].inputs.push(None);
}
// Set input
let (start,_) = self.nodes[src_i].outputs;
self.nodes[dest_i].inputs[dest_ch] = Some(start+src_ch);
self.topological_sort(dest_i, dest_ch)
}
/// Determines the topology of our device graph. If the graph has a cycle,
/// then we remove the last edge. Otherwise, we set self.topology to
/// a topologically sorted order.
fn topological_sort(&mut self, dest_i: usize, dest_ch: usize) -> Result<()> {
// Intialize our set of input edges, and our set of edgeless nodes
let mut topology = Vec::new();
let mut inputs: Vec<Vec<_>> = self.nodes.iter().map(
|node| node.inputs.iter().filter_map(|&o| o).collect()
).collect();
let mut no_inputs: VecDeque<_> = inputs.iter().enumerate().filter_map(
|(i, ins)| if ins.len() == 0 { Some(i) } else |
).collect();
// While there are nodes with no input, we choose one, add it as the
// next node in our topology, and remove all edges from that node. Any
// nodes that lose their final edge are added to the edgeless set.
loop {
match no_inputs.pop_front() {
Some(i) => {
topology.push(i);
let (out_start, out_end) = self.nodes[i].outputs;
for out in out_start..out_end {
for (j, ins) in inputs.iter_mut().enumerate() {
let mut idx = None;
for k in 0..ins.len() {
if ins[k] == out {
idx = Some(k);
break;
}
}
match idx {
Some(k) => {
ins.swap_remove(k);
if ins.len() == 0 {
no_inputs.push_back(j);
}
},
None => ()
}
}
}
},
None => break
}
}
if topology.len() == self.nodes.len() {
self.topology = topology;
Ok(())
} else {
self.nodes[dest_i].inputs[dest_ch] = None;
Err(Error::CreatesCycle)
}
}
}
impl Tick for DeviceGraph {
fn tick(&mut self) {
for &i in self.topology.iter() {
self.nodes[i].tick(self.time, &mut self.bus);
}
self.time += 1;
}
}
/// An identifier used to refer back to a node in the graph.
#[derive(Copy, Clone, Debug)]
pub struct AudioNodeIdx(usize);
/// A wrapper for a node in the graph.
///
/// Management of indices in the bus is handled in the graph itself.
struct AudioNode {
device: Box<AudioDevice>, // wraps the device
inputs: Vec<Option<usize>>, // bus indices of the inputs
input_buf: Vec<Sample>, // an allocated buffer for containing inputs
outputs: (usize, usize) // the range of outputs in the bus
}
impl AudioNode {
/// Wraps the device in a new node
fn new<D>(device: D, bus: &mut Vec<Sample>) -> AudioNode
where D: 'static+AudioDevice {
let num_in = device.num_inputs();
let num_out = device.num_outputs();
let start = bus.len();
for _ in 0..num_out {
bus.push(0.0);
}
let end = bus.len();
AudioNode {
device: Box::new(device),
inputs: vec![None; num_in],
input_buf: vec![0.0; num_in],
outputs: (start, end)
}
}
/// Extracts the inputs out of the bus, tick the device and place the outputs
/// back into the bus.
fn tick(&mut self, t: Time, bus: &mut[Sample]) {
for (i, ch) in self.inputs.iter().enumerate() {
self.input_buf[i] = ch.map_or(0.0, |j| bus[j]);
}
let (start, end) = self.outputs;
self.device.tick(t, &self.input_buf, &mut bus[start..end]);
}
}
#[cfg(test)]
mod test {
use testing::MockAudioDevice;
use super::{DeviceGraph, Tick};
#[test]
fn test_empty_graph() {
DeviceGraph::new().tick();
}
#[test]
fn test_one_node() {
let mut mock = MockAudioDevice::new("mock", 1, 1);
mock.will_tick(&[0.0], &[1.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock);
graph.tick();
}
#[test]
fn test_disconnected() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[0.0], &[2.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock1);
graph.add_node(mock2);
graph.tick();
}
#[test]
fn test_linear() {
let mut mock1 = MockAudioDevice::new("mock1", 0, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 0);
mock1.will_tick(&[], &[1.0]);
mock2.will_tick(&[1.0], &[]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.tick();
}
#[test]
fn test_complex() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut mock3 = MockAudioDevice::new("mock3", 2, 1);
let mut mock4 = MockAudioDevice::new("mock4", 1, 1);
let mut mock5 = MockAudioDevice::new("mock5", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[4.0], &[2.0]);
mock3.will_tick(&[2.0, 4.0], &[3.0]);
mock4.will_tick(&[1.0], &[4.0]);
mock5.will_tick(&[0.0], &[5.0]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
let mock4 = graph.add_node(mock4);
let _mock5 = graph.add_node(mock5);
graph.add_edge(mock1, 0, mock4, 0).unwrap();
graph.add_edge(mock4, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock4, 0, mock3, 1).unwrap();
graph.tick();
}
#[test]
#[should_panic]
fn test_direct_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock1, 0).unwrap();
}
#[test]
#[should_panic]
fn test_indirect_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mock3 = MockAudioDevice::new("mock3", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock3, 0, mock1, 0).unwrap();
}
}
| { None } | conditional_block |
graph.rs | //! channel, provided that connection does not create a cycle.
//!
//! A graph is initialized by adding each device as a node in the graph, and
//! then specifying the edges between devices. The graph will automatically
//! process the devices in order of their dependencies.
//!
//! # Example
//!
//! The following example creates a graph with two different branches into
//! a stereo output. It feeds the micropgone to the left channel, and
//! a low-passed oscillator into the right channel.
//!
//! ```no_run
//! use oxcable::filters::first_order::{Filter, LowPass};
//! use oxcable::graph::{DeviceGraph, Tick};
//! use oxcable::io::audio::AudioEngine;
//! use oxcable::oscillator::*;
//!
//! let engine = AudioEngine::with_buffer_size(256).unwrap();
//! let mut graph = DeviceGraph::new();
//!
//! // Add nodes to graph
//! let microphone = graph.add_node(engine.default_input(1).unwrap());
//! let oscillator = graph.add_node(Oscillator::new(Sine).freq(440.0));
//! let filter = graph.add_node(Filter::new(LowPass(8000f32), 1));
//! let speaker = graph.add_node(engine.default_output(2).unwrap());
//!
//! // Connect devices together
//! graph.add_edge(microphone, 0, speaker, 0);
//! graph.add_edge(oscillator, 0, filter, 0);
//! graph.add_edge(filter, 0, speaker, 1);
//!
//! // Play audio ad nauseam.
//! graph.tick_forever();
//! ```
use std::collections::VecDeque;
use error::{Error, Result};
use types::{AudioDevice, Sample, Time};
pub use tick::Tick;
/// An acyclic graph for audio devices.
pub struct DeviceGraph {
nodes: Vec<AudioNode>, // the actual nodes
topology: Vec<usize>, // the order to tick the nodes
bus: Vec<Sample>, // the audio bus to write samples to
time: Time // the next timestep
}
impl DeviceGraph {
/// Creates an empty graph.
pub fn new() -> Self {
DeviceGraph {
nodes: Vec::new(),
topology: Vec::new(),
bus: Vec::new(),
time: 0
}
}
/// Adds a new device into the graph, with no connections. Returns
/// a identifier that refers back to this device.
pub fn add_node<D>(&mut self, device: D) -> AudioNodeIdx
where D: 'static+AudioDevice {
let node = AudioNode::new(device, &mut self.bus);
let idx = self.nodes.len();
self.nodes.push(node);
self.topology.push(idx);
AudioNodeIdx(idx)
}
/// Connects two devices in the graph.
///
/// * `src` and `dest` are identifiers for the actual devices to connect.
/// * `src_ch` and `dest_ch` are the channel indices of the two devices.
///
/// If invalid indices are provided, or if the specified edge would create
/// a cycle in the graph, an Err is returned and no changes dest the graph are
/// made.
pub fn add_edge(&mut self, src: AudioNodeIdx, src_ch: usize,
dest: AudioNodeIdx, dest_ch: usize) -> Result<()> {
// Check device indices
let AudioNodeIdx(src_i) = src;
let AudioNodeIdx(dest_i) = dest;
if src_i >= self.nodes.len() {
return Err(Error::OutOfRange("src"));
} else if dest_i >= self.nodes.len() {
return Err(Error::OutOfRange("dest"));
}
// Check channels
if self.nodes[src_i].device.num_outputs() <= src_ch {
return Err(Error::OutOfRange("src_ch"));
}
if self.nodes[dest_i].device.num_inputs() <= dest_ch {
return Err(Error::OutOfRange("dest_ch"));
}
while self.nodes[dest_i].inputs.len() < dest_ch {
self.nodes[dest_i].inputs.push(None);
}
// Set input
let (start,_) = self.nodes[src_i].outputs;
self.nodes[dest_i].inputs[dest_ch] = Some(start+src_ch);
self.topological_sort(dest_i, dest_ch)
}
/// Determines the topology of our device graph. If the graph has a cycle,
/// then we remove the last edge. Otherwise, we set self.topology to
/// a topologically sorted order.
fn topological_sort(&mut self, dest_i: usize, dest_ch: usize) -> Result<()> {
// Intialize our set of input edges, and our set of edgeless nodes
let mut topology = Vec::new();
let mut inputs: Vec<Vec<_>> = self.nodes.iter().map(
|node| node.inputs.iter().filter_map(|&o| o).collect()
).collect();
let mut no_inputs: VecDeque<_> = inputs.iter().enumerate().filter_map(
|(i, ins)| if ins.len() == 0 { Some(i) } else { None }
).collect();
// While there are nodes with no input, we choose one, add it as the
// next node in our topology, and remove all edges from that node. Any
// nodes that lose their final edge are added to the edgeless set.
loop {
match no_inputs.pop_front() {
Some(i) => {
topology.push(i);
let (out_start, out_end) = self.nodes[i].outputs;
for out in out_start..out_end {
for (j, ins) in inputs.iter_mut().enumerate() {
let mut idx = None;
for k in 0..ins.len() {
if ins[k] == out {
idx = Some(k);
break;
}
}
match idx {
Some(k) => {
ins.swap_remove(k);
if ins.len() == 0 {
no_inputs.push_back(j);
}
},
None => ()
}
}
}
},
None => break
}
}
if topology.len() == self.nodes.len() {
self.topology = topology;
Ok(())
} else {
self.nodes[dest_i].inputs[dest_ch] = None;
Err(Error::CreatesCycle)
}
}
}
impl Tick for DeviceGraph {
fn tick(&mut self) {
for &i in self.topology.iter() {
self.nodes[i].tick(self.time, &mut self.bus);
}
self.time += 1;
}
}
/// An identifier used to refer back to a node in the graph.
#[derive(Copy, Clone, Debug)]
pub struct AudioNodeIdx(usize);
/// A wrapper for a node in the graph.
///
/// Management of indices in the bus is handled in the graph itself.
struct AudioNode {
device: Box<AudioDevice>, // wraps the device
inputs: Vec<Option<usize>>, // bus indices of the inputs
input_buf: Vec<Sample>, // an allocated buffer for containing inputs
outputs: (usize, usize) // the range of outputs in the bus
}
impl AudioNode {
/// Wraps the device in a new node
fn new<D>(device: D, bus: &mut Vec<Sample>) -> AudioNode
where D: 'static+AudioDevice {
let num_in = device.num_inputs();
let num_out = device.num_outputs();
let start = bus.len();
for _ in 0..num_out {
bus.push(0.0);
}
let end = bus.len();
AudioNode {
device: Box::new(device),
inputs: vec![None; num_in],
input_buf: vec![0.0; num_in],
outputs: (start, end)
}
}
/// Extracts the inputs out of the bus, tick the device and place the outputs
/// back into the bus.
fn tick(&mut self, t: Time, bus: &mut[Sample]) {
for (i, ch) in self.inputs.iter().enumerate() {
self.input_buf[i] = ch.map_or(0.0, |j| bus[j]);
}
let (start, end) = self.outputs;
self.device.tick(t, &self.input_buf, &mut bus[start..end]);
}
}
#[cfg(test)]
mod test {
use testing::MockAudioDevice;
use super::{DeviceGraph, Tick};
#[test]
fn test_empty_graph() {
DeviceGraph::new().tick();
}
#[test]
fn test_one_node() {
let mut mock = MockAudioDevice::new("mock", 1, 1);
mock.will_tick(&[0.0], &[1.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock);
graph.tick();
}
#[test]
fn test_disconnected() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[0.0], &[2.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock1);
graph.add_node(mock2);
graph.tick();
}
#[test]
fn test_linear() {
let mut mock1 = MockAudioDevice::new("mock1", 0, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 0);
mock1.will_tick(&[], &[1.0]);
mock2.will_tick(&[1.0], &[]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.tick();
}
#[test]
fn test_complex() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut mock3 = MockAudioDevice::new("mock3", 2, 1);
let mut mock4 = MockAudioDevice::new("mock4", 1, 1);
let mut mock5 = MockAudioDevice::new("mock5", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[4.0], &[2.0]);
mock3.will_tick(&[2.0, 4.0], &[3.0]);
mock4.will_tick(&[1.0], &[4.0]);
mock5.will_tick(&[0.0], &[5.0]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
let mock4 = graph.add_node(mock4);
let _mock5 = graph.add_node(mock5);
graph.add_edge(mock1, 0, mock4, 0).unwrap();
graph.add_edge(mock4, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock4, 0, mock3, 1).unwrap();
graph.tick();
}
#[test]
#[should_panic]
fn test_direct_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock1, 0).unwrap();
}
#[test]
#[should_panic]
fn test_indirect_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mock3 = MockAudioDevice::new("mock3", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock3, 0, mock1, 0).unwrap();
}
} | //! A container for audio devices in an acyclic graph.
//!
//! A graph can be used when many audio devices need to connect in complex
//! topologies. It can connect each output channel of a device to any input | random_line_split | |
graph.rs | //! A container for audio devices in an acyclic graph.
//!
//! A graph can be used when many audio devices need to connect in complex
//! topologies. It can connect each output channel of a device to any input
//! channel, provided that connection does not create a cycle.
//!
//! A graph is initialized by adding each device as a node in the graph, and
//! then specifying the edges between devices. The graph will automatically
//! process the devices in order of their dependencies.
//!
//! # Example
//!
//! The following example creates a graph with two different branches into
//! a stereo output. It feeds the micropgone to the left channel, and
//! a low-passed oscillator into the right channel.
//!
//! ```no_run
//! use oxcable::filters::first_order::{Filter, LowPass};
//! use oxcable::graph::{DeviceGraph, Tick};
//! use oxcable::io::audio::AudioEngine;
//! use oxcable::oscillator::*;
//!
//! let engine = AudioEngine::with_buffer_size(256).unwrap();
//! let mut graph = DeviceGraph::new();
//!
//! // Add nodes to graph
//! let microphone = graph.add_node(engine.default_input(1).unwrap());
//! let oscillator = graph.add_node(Oscillator::new(Sine).freq(440.0));
//! let filter = graph.add_node(Filter::new(LowPass(8000f32), 1));
//! let speaker = graph.add_node(engine.default_output(2).unwrap());
//!
//! // Connect devices together
//! graph.add_edge(microphone, 0, speaker, 0);
//! graph.add_edge(oscillator, 0, filter, 0);
//! graph.add_edge(filter, 0, speaker, 1);
//!
//! // Play audio ad nauseam.
//! graph.tick_forever();
//! ```
use std::collections::VecDeque;
use error::{Error, Result};
use types::{AudioDevice, Sample, Time};
pub use tick::Tick;
/// An acyclic graph for audio devices.
pub struct DeviceGraph {
nodes: Vec<AudioNode>, // the actual nodes
topology: Vec<usize>, // the order to tick the nodes
bus: Vec<Sample>, // the audio bus to write samples to
time: Time // the next timestep
}
impl DeviceGraph {
/// Creates an empty graph.
pub fn new() -> Self {
DeviceGraph {
nodes: Vec::new(),
topology: Vec::new(),
bus: Vec::new(),
time: 0
}
}
/// Adds a new device into the graph, with no connections. Returns
/// a identifier that refers back to this device.
pub fn add_node<D>(&mut self, device: D) -> AudioNodeIdx
where D: 'static+AudioDevice {
let node = AudioNode::new(device, &mut self.bus);
let idx = self.nodes.len();
self.nodes.push(node);
self.topology.push(idx);
AudioNodeIdx(idx)
}
/// Connects two devices in the graph.
///
/// * `src` and `dest` are identifiers for the actual devices to connect.
/// * `src_ch` and `dest_ch` are the channel indices of the two devices.
///
/// If invalid indices are provided, or if the specified edge would create
/// a cycle in the graph, an Err is returned and no changes dest the graph are
/// made.
pub fn add_edge(&mut self, src: AudioNodeIdx, src_ch: usize,
dest: AudioNodeIdx, dest_ch: usize) -> Result<()> {
// Check device indices
let AudioNodeIdx(src_i) = src;
let AudioNodeIdx(dest_i) = dest;
if src_i >= self.nodes.len() {
return Err(Error::OutOfRange("src"));
} else if dest_i >= self.nodes.len() {
return Err(Error::OutOfRange("dest"));
}
// Check channels
if self.nodes[src_i].device.num_outputs() <= src_ch {
return Err(Error::OutOfRange("src_ch"));
}
if self.nodes[dest_i].device.num_inputs() <= dest_ch {
return Err(Error::OutOfRange("dest_ch"));
}
while self.nodes[dest_i].inputs.len() < dest_ch {
self.nodes[dest_i].inputs.push(None);
}
// Set input
let (start,_) = self.nodes[src_i].outputs;
self.nodes[dest_i].inputs[dest_ch] = Some(start+src_ch);
self.topological_sort(dest_i, dest_ch)
}
/// Determines the topology of our device graph. If the graph has a cycle,
/// then we remove the last edge. Otherwise, we set self.topology to
/// a topologically sorted order.
fn topological_sort(&mut self, dest_i: usize, dest_ch: usize) -> Result<()> {
// Intialize our set of input edges, and our set of edgeless nodes
let mut topology = Vec::new();
let mut inputs: Vec<Vec<_>> = self.nodes.iter().map(
|node| node.inputs.iter().filter_map(|&o| o).collect()
).collect();
let mut no_inputs: VecDeque<_> = inputs.iter().enumerate().filter_map(
|(i, ins)| if ins.len() == 0 { Some(i) } else { None }
).collect();
// While there are nodes with no input, we choose one, add it as the
// next node in our topology, and remove all edges from that node. Any
// nodes that lose their final edge are added to the edgeless set.
loop {
match no_inputs.pop_front() {
Some(i) => {
topology.push(i);
let (out_start, out_end) = self.nodes[i].outputs;
for out in out_start..out_end {
for (j, ins) in inputs.iter_mut().enumerate() {
let mut idx = None;
for k in 0..ins.len() {
if ins[k] == out {
idx = Some(k);
break;
}
}
match idx {
Some(k) => {
ins.swap_remove(k);
if ins.len() == 0 {
no_inputs.push_back(j);
}
},
None => ()
}
}
}
},
None => break
}
}
if topology.len() == self.nodes.len() {
self.topology = topology;
Ok(())
} else {
self.nodes[dest_i].inputs[dest_ch] = None;
Err(Error::CreatesCycle)
}
}
}
impl Tick for DeviceGraph {
fn tick(&mut self) {
for &i in self.topology.iter() {
self.nodes[i].tick(self.time, &mut self.bus);
}
self.time += 1;
}
}
/// An identifier used to refer back to a node in the graph.
#[derive(Copy, Clone, Debug)]
pub struct AudioNodeIdx(usize);
/// A wrapper for a node in the graph.
///
/// Management of indices in the bus is handled in the graph itself.
struct AudioNode {
device: Box<AudioDevice>, // wraps the device
inputs: Vec<Option<usize>>, // bus indices of the inputs
input_buf: Vec<Sample>, // an allocated buffer for containing inputs
outputs: (usize, usize) // the range of outputs in the bus
}
impl AudioNode {
/// Wraps the device in a new node
fn new<D>(device: D, bus: &mut Vec<Sample>) -> AudioNode
where D: 'static+AudioDevice {
let num_in = device.num_inputs();
let num_out = device.num_outputs();
let start = bus.len();
for _ in 0..num_out {
bus.push(0.0);
}
let end = bus.len();
AudioNode {
device: Box::new(device),
inputs: vec![None; num_in],
input_buf: vec![0.0; num_in],
outputs: (start, end)
}
}
/// Extracts the inputs out of the bus, tick the device and place the outputs
/// back into the bus.
fn tick(&mut self, t: Time, bus: &mut[Sample]) {
for (i, ch) in self.inputs.iter().enumerate() {
self.input_buf[i] = ch.map_or(0.0, |j| bus[j]);
}
let (start, end) = self.outputs;
self.device.tick(t, &self.input_buf, &mut bus[start..end]);
}
}
#[cfg(test)]
mod test {
use testing::MockAudioDevice;
use super::{DeviceGraph, Tick};
#[test]
fn test_empty_graph() {
DeviceGraph::new().tick();
}
#[test]
fn test_one_node() {
let mut mock = MockAudioDevice::new("mock", 1, 1);
mock.will_tick(&[0.0], &[1.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock);
graph.tick();
}
#[test]
fn test_disconnected() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[0.0], &[2.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock1);
graph.add_node(mock2);
graph.tick();
}
#[test]
fn test_linear() {
let mut mock1 = MockAudioDevice::new("mock1", 0, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 0);
mock1.will_tick(&[], &[1.0]);
mock2.will_tick(&[1.0], &[]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.tick();
}
#[test]
fn test_complex() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut mock3 = MockAudioDevice::new("mock3", 2, 1);
let mut mock4 = MockAudioDevice::new("mock4", 1, 1);
let mut mock5 = MockAudioDevice::new("mock5", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[4.0], &[2.0]);
mock3.will_tick(&[2.0, 4.0], &[3.0]);
mock4.will_tick(&[1.0], &[4.0]);
mock5.will_tick(&[0.0], &[5.0]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
let mock4 = graph.add_node(mock4);
let _mock5 = graph.add_node(mock5);
graph.add_edge(mock1, 0, mock4, 0).unwrap();
graph.add_edge(mock4, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock4, 0, mock3, 1).unwrap();
graph.tick();
}
#[test]
#[should_panic]
fn | () {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock1, 0).unwrap();
}
#[test]
#[should_panic]
fn test_indirect_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mock3 = MockAudioDevice::new("mock3", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock3, 0, mock1, 0).unwrap();
}
}
| test_direct_cycle | identifier_name |
bwt.rs | use sa::{insert, suffix_array};
use std::ops::Index;
/// Generate the [Burrows-Wheeler Transform](https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform)
/// of the given input.
///
/// ``` rust
/// let text = String::from("The quick brown fox jumps over the lazy dog");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// assert_eq!(String::from("gkynxeser\u{0}l i hhv otTu c uwd rfm ebp qjoooza"),
/// String::from_utf8(bw).unwrap());
/// ```
/// The output can then be used for compression or FM-index'ing.
pub fn bwt(input: &[u8]) -> Vec<u8> {
suffix_array(input).into_iter().map(|i| {
// BWT[i] = S[SA[i] - 1]
if i == 0 { 0 } else { input[(i - 1) as usize] }
}).collect()
}
// Takes a frequency map of bytes and generates the index of first occurrence
// of each byte.
fn generate_occurrence_index(map: &mut Vec<u32>) {
let mut idx = 0;
for i in 0..map.len() {
let c = map[i];
map[i] = idx;
idx += c;
}
}
/// Invert the BWT and generate the original data.
///
/// ``` rust
/// let text = String::from("Hello, world!");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// let ibw = nucleic_acid::ibwt(&bw);
/// assert_eq!(text, String::from_utf8(ibw).unwrap());
/// ```
pub fn ibwt(input: &[u8]) -> Vec<u8> {
// get the byte distribution
let mut map = Vec::new();
for i in input {
insert(&mut map, *i);
}
generate_occurrence_index(&mut map);
// generate the LF vector
let mut lf = vec![0; input.len()];
for (i, c) in input.iter().enumerate() {
let byte = *c as usize;
let val = map[byte];
lf[i] = val;
map[byte] = val + 1;
}
let mut idx = 0;
// construct the sequence by traversing through the LF vector
let mut output = vec![0; input.len()];
for i in (0..(input.len() - 1)).rev() {
output[i] = input[idx];
idx = lf[idx] as usize;
}
output.pop();
output
}
/// [Ferragina-Manzini index](https://en.wikipedia.org/wiki/FM-index)
/// (or Full-text index in Minute space) for finding occurrences of substrings
/// in O(1) time.
///
/// ``` rust
/// use nucleic_acid::FMIndex;
///
/// let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
/// let index = FMIndex::new(text.as_bytes());
///
/// // count the occurrences
/// assert_eq!(0, index.count("CCCCC"));
/// assert_eq!(3, index.count("TG"));
///
/// // ... or get their positions
/// assert_eq!(index.search("GCGT"), vec![46, 26, 0]);
/// ```
///
/// The current implementation of FM-index is a memory killer, since it stores positions
/// of **all bytes** in the given data. For the human genome (~3 GB), it consumed
/// ~27 GB of RAM to build the index (in ~4 mins).
///
/// That said, it still returns the match results in a few microseconds.
#[derive(Clone, Debug)]
pub struct FMIndex {
/// BW-transformed data
data: Vec<u8>,
/// forward frequency of each character in the BWT data
cache: Vec<u32>,
/// incremental character frequencies
occ_map: Vec<u32>,
/// LF-mapping for backward search
lf_vec: Vec<u32>,
}
impl FMIndex {
/// Generate an FM-index for the input data.
#[inline]
pub fn new(data: &[u8]) -> FMIndex {
FMIndex::new_from_bwt(bwt(data))
}
/// Get the reference to the inner BWT data.
///
/// Note that the length of BWT is one more than the length of the actual text,
/// since it has a null byte to indicate empty string.
pub fn bwt(&self) -> &[u8] {
&self.data
}
/// Generate the FM-index from the BWT data.
///
/// It's not a good idea to generate FM-index from scratch all the time, especially for large inputs.
/// This would be very useful when your data is large and remains constant for a while.
///
/// FM-index internally uses BWT, and BWT is generated from the suffix array, which takes a lot of time.
/// If your input doesn't change, then it's better to get the BWT data (using `bwt` method), write it
/// to a file and generate the index from that in the future.
pub fn new_from_bwt(bwt_data: Vec<u8>) -> FMIndex |
/// Get the nearest position of a character in the internal BWT data.
///
/// The `count` and `search` methods rely on this method for finding occurrences.
/// For example, we can do soemthing like this,
///
/// ``` rust
/// use nucleic_acid::FMIndex;
/// let fm = FMIndex::new(b"Hello, Hello, Hello" as &[u8]);
///
/// // initially, the range should be the length of the BWT
/// let mut top = 0;
/// let mut bottom = fm.bwt().len();
/// let query = b"llo";
///
/// // feed the characters in the reverse
/// for ch in query.iter().rev() {
/// top = fm.nearest(top, *ch);
/// bottom = fm.nearest(bottom, *ch);
/// if top >= bottom {
/// return
/// }
/// }
///
/// // If we get a valid range, then everything in that range is a valid match.
/// // This way, we can get both the count and positions...
/// assert_eq!(3, bottom - top);
/// assert_eq!(vec![17, 10, 3], (top..bottom).map(|i| fm[i]).collect::<Vec<_>>());
/// ```
///
/// This is backward searching. As you feed in the characters along with a position, `nearest` will
/// give you a new position in the index. Once the range becomes invalid (which happens when the
/// substring doesn't exist), we can bail out. On the contrary, if the range remains valid after
/// you've fed in all the characters, then every value within in that range is an occurrence.
///
/// So, this is useful when you want to cache the repeating ranges. With this, you can build your own
/// count/search functions with caching. It's also useful for making custom approximate matching functions
/// by backtracking whenever there's an invalid range.
pub fn nearest(&self, idx: usize, ch: u8) -> usize {
match self.occ_map.get(ch as usize) {
Some(res) if *res > 0 => {
*res as usize + (0..idx).rev()
.find(|&i| self.data[i] == ch)
.map(|i| self.cache[i] as usize)
.unwrap_or(0)
},
_ => 0,
}
}
fn get_range(&self, query: &str) -> Option<(usize, usize)> {
let mut top = 0;
let mut bottom = self.data.len();
for ch in query.as_bytes().iter().rev() {
top = self.nearest(top, *ch);
bottom = self.nearest(bottom, *ch);
if top >= bottom {
return None
}
}
if top >= bottom {
None
} else {
Some((top, bottom))
}
}
/// Count the occurrences of the substring in the original data.
pub fn count(&self, query: &str) -> usize {
match self.get_range(query) {
Some((top, bottom)) => bottom - top,
None => 0,
}
}
/// Get the positions of occurrences of substring in the original data.
pub fn search(&self, query: &str) -> Vec<usize> {
match self.get_range(query) {
Some((top, bottom)) => (top..bottom).map(|idx| {
let i = self.nearest(idx, self.data[idx]);
self.lf_vec[i] as usize
}).collect(),
None => Vec::new(),
}
}
}
impl Index<usize> for FMIndex {
type Output = u32;
fn index(&self, i: usize) -> &u32 {
self.lf_vec.get(i).expect("index out of range")
}
}
#[cfg(test)]
mod tests {
use super::{FMIndex, bwt, ibwt};
#[test]
fn test_bwt_and_ibwt() {
let text = String::from("ATCTAGGAGATCTGAATCTAGTTCAACTAGCTAGATCTAGAGACAGCTAA");
let bw = bwt(text.as_bytes());
let ibw = ibwt(&bw);
assert_eq!(String::from("AATCGGAGTTGCTTTG\u{0}AGTAGTGATTTTAAGAAAAAACCCCCCTAAAACG"),
String::from_utf8(bw).unwrap());
assert_eq!(text, String::from_utf8(ibw).unwrap());
}
#[test]
fn test_fm_index() {
let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
let index = FMIndex::new(text.as_bytes());
assert_eq!(0, index.count("CCCCC"));
let mut result = index.search("TG");
result.sort();
assert_eq!(result, vec![3, 15, 21]);
let mut result = index.search("GCGT");
result.sort();
assert_eq!(result, vec![0, 26, 46]);
assert_eq!(vec![1], index.search("CGTGCCC"));
}
}
| {
let mut map = Vec::new();
let mut count = vec![0u32; bwt_data.len()];
let mut idx = 0;
// generate the frequency map and forward frequency vector from BWT
for i in &bwt_data {
let value = insert(&mut map, *i);
count[idx] = value;
idx += 1;
}
generate_occurrence_index(&mut map);
let mut lf_vec = count.clone();
let mut lf_occ_map = map.clone();
// generate the LF vector (just like inverting the BWT)
for (i, c) in bwt_data.iter().enumerate() {
let idx = *c as usize;
lf_vec[i] = lf_occ_map[idx];
lf_occ_map[idx] += 1;
}
let mut i = lf_vec[0] as usize;
lf_vec[0] = 0;
let mut counter = bwt_data.len() as u32 - 1;
// Only difference is that we replace the LF indices with the lengths of prefix
// from a particular position (in other words, the number of times
// it would take us to get to the start of string).
for _ in 0..(bwt_data.len() - 1) {
let next = lf_vec[i];
lf_vec[i] = counter;
i = next as usize;
counter -= 1;
}
FMIndex {
data: bwt_data,
cache: count,
occ_map: map,
lf_vec: lf_vec,
}
} | identifier_body |
bwt.rs | use sa::{insert, suffix_array};
use std::ops::Index;
/// Generate the [Burrows-Wheeler Transform](https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform)
/// of the given input.
///
/// ``` rust
/// let text = String::from("The quick brown fox jumps over the lazy dog");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// assert_eq!(String::from("gkynxeser\u{0}l i hhv otTu c uwd rfm ebp qjoooza"),
/// String::from_utf8(bw).unwrap());
/// ```
/// The output can then be used for compression or FM-index'ing.
pub fn bwt(input: &[u8]) -> Vec<u8> {
suffix_array(input).into_iter().map(|i| {
// BWT[i] = S[SA[i] - 1]
if i == 0 { 0 } else { input[(i - 1) as usize] }
}).collect()
}
// Takes a frequency map of bytes and generates the index of first occurrence
// of each byte.
fn generate_occurrence_index(map: &mut Vec<u32>) {
let mut idx = 0;
for i in 0..map.len() {
let c = map[i];
map[i] = idx;
idx += c;
}
}
/// Invert the BWT and generate the original data.
///
/// ``` rust
/// let text = String::from("Hello, world!");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// let ibw = nucleic_acid::ibwt(&bw);
/// assert_eq!(text, String::from_utf8(ibw).unwrap());
/// ```
pub fn ibwt(input: &[u8]) -> Vec<u8> {
// get the byte distribution
let mut map = Vec::new();
for i in input {
insert(&mut map, *i);
}
generate_occurrence_index(&mut map);
// generate the LF vector
let mut lf = vec![0; input.len()];
for (i, c) in input.iter().enumerate() {
let byte = *c as usize;
let val = map[byte];
lf[i] = val;
map[byte] = val + 1;
}
let mut idx = 0;
// construct the sequence by traversing through the LF vector
let mut output = vec![0; input.len()];
for i in (0..(input.len() - 1)).rev() {
output[i] = input[idx];
idx = lf[idx] as usize;
}
output.pop();
output
}
/// [Ferragina-Manzini index](https://en.wikipedia.org/wiki/FM-index)
/// (or Full-text index in Minute space) for finding occurrences of substrings
/// in O(1) time.
///
/// ``` rust
/// use nucleic_acid::FMIndex;
///
/// let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
/// let index = FMIndex::new(text.as_bytes());
///
/// // count the occurrences
/// assert_eq!(0, index.count("CCCCC"));
/// assert_eq!(3, index.count("TG"));
///
/// // ... or get their positions
/// assert_eq!(index.search("GCGT"), vec![46, 26, 0]);
/// ```
///
/// The current implementation of FM-index is a memory killer, since it stores positions
/// of **all bytes** in the given data. For the human genome (~3 GB), it consumed
/// ~27 GB of RAM to build the index (in ~4 mins).
///
/// That said, it still returns the match results in a few microseconds.
#[derive(Clone, Debug)]
pub struct FMIndex {
/// BW-transformed data
data: Vec<u8>,
/// forward frequency of each character in the BWT data
cache: Vec<u32>,
/// incremental character frequencies
occ_map: Vec<u32>,
/// LF-mapping for backward search
lf_vec: Vec<u32>,
}
impl FMIndex {
/// Generate an FM-index for the input data.
#[inline]
pub fn new(data: &[u8]) -> FMIndex {
FMIndex::new_from_bwt(bwt(data))
}
/// Get the reference to the inner BWT data.
///
/// Note that the length of BWT is one more than the length of the actual text,
/// since it has a null byte to indicate empty string.
pub fn bwt(&self) -> &[u8] {
&self.data
}
/// Generate the FM-index from the BWT data.
///
/// It's not a good idea to generate FM-index from scratch all the time, especially for large inputs.
/// This would be very useful when your data is large and remains constant for a while.
///
/// FM-index internally uses BWT, and BWT is generated from the suffix array, which takes a lot of time.
/// If your input doesn't change, then it's better to get the BWT data (using `bwt` method), write it
/// to a file and generate the index from that in the future.
pub fn new_from_bwt(bwt_data: Vec<u8>) -> FMIndex {
let mut map = Vec::new();
let mut count = vec![0u32; bwt_data.len()];
let mut idx = 0;
// generate the frequency map and forward frequency vector from BWT
for i in &bwt_data {
let value = insert(&mut map, *i);
count[idx] = value;
idx += 1;
}
generate_occurrence_index(&mut map);
let mut lf_vec = count.clone();
let mut lf_occ_map = map.clone();
// generate the LF vector (just like inverting the BWT)
for (i, c) in bwt_data.iter().enumerate() {
let idx = *c as usize;
lf_vec[i] = lf_occ_map[idx];
lf_occ_map[idx] += 1;
}
let mut i = lf_vec[0] as usize;
lf_vec[0] = 0;
let mut counter = bwt_data.len() as u32 - 1;
// Only difference is that we replace the LF indices with the lengths of prefix
// from a particular position (in other words, the number of times
// it would take us to get to the start of string).
for _ in 0..(bwt_data.len() - 1) {
let next = lf_vec[i];
lf_vec[i] = counter;
i = next as usize;
counter -= 1;
}
FMIndex {
data: bwt_data,
cache: count,
occ_map: map,
lf_vec: lf_vec,
}
}
/// Get the nearest position of a character in the internal BWT data.
///
/// The `count` and `search` methods rely on this method for finding occurrences.
/// For example, we can do soemthing like this,
///
/// ``` rust
/// use nucleic_acid::FMIndex;
/// let fm = FMIndex::new(b"Hello, Hello, Hello" as &[u8]);
///
/// // initially, the range should be the length of the BWT
/// let mut top = 0;
/// let mut bottom = fm.bwt().len();
/// let query = b"llo";
///
/// // feed the characters in the reverse
/// for ch in query.iter().rev() {
/// top = fm.nearest(top, *ch);
/// bottom = fm.nearest(bottom, *ch);
/// if top >= bottom {
/// return
/// }
/// }
///
/// // If we get a valid range, then everything in that range is a valid match.
/// // This way, we can get both the count and positions...
/// assert_eq!(3, bottom - top);
/// assert_eq!(vec![17, 10, 3], (top..bottom).map(|i| fm[i]).collect::<Vec<_>>());
/// ```
///
/// This is backward searching. As you feed in the characters along with a position, `nearest` will
/// give you a new position in the index. Once the range becomes invalid (which happens when the
/// substring doesn't exist), we can bail out. On the contrary, if the range remains valid after
/// you've fed in all the characters, then every value within in that range is an occurrence.
///
/// So, this is useful when you want to cache the repeating ranges. With this, you can build your own
/// count/search functions with caching. It's also useful for making custom approximate matching functions
/// by backtracking whenever there's an invalid range.
pub fn nearest(&self, idx: usize, ch: u8) -> usize {
match self.occ_map.get(ch as usize) {
Some(res) if *res > 0 => {
*res as usize + (0..idx).rev()
.find(|&i| self.data[i] == ch)
.map(|i| self.cache[i] as usize)
.unwrap_or(0)
},
_ => 0,
}
}
fn get_range(&self, query: &str) -> Option<(usize, usize)> {
let mut top = 0;
let mut bottom = self.data.len();
for ch in query.as_bytes().iter().rev() {
top = self.nearest(top, *ch);
bottom = self.nearest(bottom, *ch);
if top >= bottom {
return None
}
}
if top >= bottom {
None
} else {
Some((top, bottom))
}
}
/// Count the occurrences of the substring in the original data.
pub fn count(&self, query: &str) -> usize {
match self.get_range(query) {
Some((top, bottom)) => bottom - top,
None => 0,
}
}
/// Get the positions of occurrences of substring in the original data.
pub fn search(&self, query: &str) -> Vec<usize> {
match self.get_range(query) {
Some((top, bottom)) => (top..bottom).map(|idx| {
let i = self.nearest(idx, self.data[idx]);
self.lf_vec[i] as usize
}).collect(),
None => Vec::new(),
}
}
}
impl Index<usize> for FMIndex {
type Output = u32;
fn index(&self, i: usize) -> &u32 {
self.lf_vec.get(i).expect("index out of range")
}
}
#[cfg(test)]
mod tests {
use super::{FMIndex, bwt, ibwt};
#[test]
fn test_bwt_and_ibwt() {
let text = String::from("ATCTAGGAGATCTGAATCTAGTTCAACTAGCTAGATCTAGAGACAGCTAA");
let bw = bwt(text.as_bytes());
let ibw = ibwt(&bw);
assert_eq!(String::from("AATCGGAGTTGCTTTG\u{0}AGTAGTGATTTTAAGAAAAAACCCCCCTAAAACG"),
String::from_utf8(bw).unwrap());
assert_eq!(text, String::from_utf8(ibw).unwrap());
}
#[test]
fn test_fm_index() {
let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
let index = FMIndex::new(text.as_bytes());
assert_eq!(0, index.count("CCCCC"));
let mut result = index.search("TG");
result.sort();
assert_eq!(result, vec![3, 15, 21]);
let mut result = index.search("GCGT");
result.sort();
assert_eq!(result, vec![0, 26, 46]);
assert_eq!(vec![1], index.search("CGTGCCC"));
} | } | random_line_split | |
bwt.rs | use sa::{insert, suffix_array};
use std::ops::Index;
/// Generate the [Burrows-Wheeler Transform](https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform)
/// of the given input.
///
/// ``` rust
/// let text = String::from("The quick brown fox jumps over the lazy dog");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// assert_eq!(String::from("gkynxeser\u{0}l i hhv otTu c uwd rfm ebp qjoooza"),
/// String::from_utf8(bw).unwrap());
/// ```
/// The output can then be used for compression or FM-index'ing.
pub fn bwt(input: &[u8]) -> Vec<u8> {
suffix_array(input).into_iter().map(|i| {
// BWT[i] = S[SA[i] - 1]
if i == 0 { 0 } else { input[(i - 1) as usize] }
}).collect()
}
// Takes a frequency map of bytes and generates the index of first occurrence
// of each byte.
fn | (map: &mut Vec<u32>) {
let mut idx = 0;
for i in 0..map.len() {
let c = map[i];
map[i] = idx;
idx += c;
}
}
/// Invert the BWT and generate the original data.
///
/// ``` rust
/// let text = String::from("Hello, world!");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// let ibw = nucleic_acid::ibwt(&bw);
/// assert_eq!(text, String::from_utf8(ibw).unwrap());
/// ```
pub fn ibwt(input: &[u8]) -> Vec<u8> {
// get the byte distribution
let mut map = Vec::new();
for i in input {
insert(&mut map, *i);
}
generate_occurrence_index(&mut map);
// generate the LF vector
let mut lf = vec![0; input.len()];
for (i, c) in input.iter().enumerate() {
let byte = *c as usize;
let val = map[byte];
lf[i] = val;
map[byte] = val + 1;
}
let mut idx = 0;
// construct the sequence by traversing through the LF vector
let mut output = vec![0; input.len()];
for i in (0..(input.len() - 1)).rev() {
output[i] = input[idx];
idx = lf[idx] as usize;
}
output.pop();
output
}
/// [Ferragina-Manzini index](https://en.wikipedia.org/wiki/FM-index)
/// (or Full-text index in Minute space) for finding occurrences of substrings
/// in O(1) time.
///
/// ``` rust
/// use nucleic_acid::FMIndex;
///
/// let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
/// let index = FMIndex::new(text.as_bytes());
///
/// // count the occurrences
/// assert_eq!(0, index.count("CCCCC"));
/// assert_eq!(3, index.count("TG"));
///
/// // ... or get their positions
/// assert_eq!(index.search("GCGT"), vec![46, 26, 0]);
/// ```
///
/// The current implementation of FM-index is a memory killer, since it stores positions
/// of **all bytes** in the given data. For the human genome (~3 GB), it consumed
/// ~27 GB of RAM to build the index (in ~4 mins).
///
/// That said, it still returns the match results in a few microseconds.
#[derive(Clone, Debug)]
pub struct FMIndex {
/// BW-transformed data
data: Vec<u8>,
/// forward frequency of each character in the BWT data
cache: Vec<u32>,
/// incremental character frequencies
occ_map: Vec<u32>,
/// LF-mapping for backward search
lf_vec: Vec<u32>,
}
impl FMIndex {
/// Generate an FM-index for the input data.
#[inline]
pub fn new(data: &[u8]) -> FMIndex {
FMIndex::new_from_bwt(bwt(data))
}
/// Get the reference to the inner BWT data.
///
/// Note that the length of BWT is one more than the length of the actual text,
/// since it has a null byte to indicate empty string.
pub fn bwt(&self) -> &[u8] {
&self.data
}
/// Generate the FM-index from the BWT data.
///
/// It's not a good idea to generate FM-index from scratch all the time, especially for large inputs.
/// This would be very useful when your data is large and remains constant for a while.
///
/// FM-index internally uses BWT, and BWT is generated from the suffix array, which takes a lot of time.
/// If your input doesn't change, then it's better to get the BWT data (using `bwt` method), write it
/// to a file and generate the index from that in the future.
pub fn new_from_bwt(bwt_data: Vec<u8>) -> FMIndex {
let mut map = Vec::new();
let mut count = vec![0u32; bwt_data.len()];
let mut idx = 0;
// generate the frequency map and forward frequency vector from BWT
for i in &bwt_data {
let value = insert(&mut map, *i);
count[idx] = value;
idx += 1;
}
generate_occurrence_index(&mut map);
let mut lf_vec = count.clone();
let mut lf_occ_map = map.clone();
// generate the LF vector (just like inverting the BWT)
for (i, c) in bwt_data.iter().enumerate() {
let idx = *c as usize;
lf_vec[i] = lf_occ_map[idx];
lf_occ_map[idx] += 1;
}
let mut i = lf_vec[0] as usize;
lf_vec[0] = 0;
let mut counter = bwt_data.len() as u32 - 1;
// Only difference is that we replace the LF indices with the lengths of prefix
// from a particular position (in other words, the number of times
// it would take us to get to the start of string).
for _ in 0..(bwt_data.len() - 1) {
let next = lf_vec[i];
lf_vec[i] = counter;
i = next as usize;
counter -= 1;
}
FMIndex {
data: bwt_data,
cache: count,
occ_map: map,
lf_vec: lf_vec,
}
}
/// Get the nearest position of a character in the internal BWT data.
///
/// The `count` and `search` methods rely on this method for finding occurrences.
/// For example, we can do soemthing like this,
///
/// ``` rust
/// use nucleic_acid::FMIndex;
/// let fm = FMIndex::new(b"Hello, Hello, Hello" as &[u8]);
///
/// // initially, the range should be the length of the BWT
/// let mut top = 0;
/// let mut bottom = fm.bwt().len();
/// let query = b"llo";
///
/// // feed the characters in the reverse
/// for ch in query.iter().rev() {
/// top = fm.nearest(top, *ch);
/// bottom = fm.nearest(bottom, *ch);
/// if top >= bottom {
/// return
/// }
/// }
///
/// // If we get a valid range, then everything in that range is a valid match.
/// // This way, we can get both the count and positions...
/// assert_eq!(3, bottom - top);
/// assert_eq!(vec![17, 10, 3], (top..bottom).map(|i| fm[i]).collect::<Vec<_>>());
/// ```
///
/// This is backward searching. As you feed in the characters along with a position, `nearest` will
/// give you a new position in the index. Once the range becomes invalid (which happens when the
/// substring doesn't exist), we can bail out. On the contrary, if the range remains valid after
/// you've fed in all the characters, then every value within in that range is an occurrence.
///
/// So, this is useful when you want to cache the repeating ranges. With this, you can build your own
/// count/search functions with caching. It's also useful for making custom approximate matching functions
/// by backtracking whenever there's an invalid range.
pub fn nearest(&self, idx: usize, ch: u8) -> usize {
match self.occ_map.get(ch as usize) {
Some(res) if *res > 0 => {
*res as usize + (0..idx).rev()
.find(|&i| self.data[i] == ch)
.map(|i| self.cache[i] as usize)
.unwrap_or(0)
},
_ => 0,
}
}
fn get_range(&self, query: &str) -> Option<(usize, usize)> {
let mut top = 0;
let mut bottom = self.data.len();
for ch in query.as_bytes().iter().rev() {
top = self.nearest(top, *ch);
bottom = self.nearest(bottom, *ch);
if top >= bottom {
return None
}
}
if top >= bottom {
None
} else {
Some((top, bottom))
}
}
/// Count the occurrences of the substring in the original data.
pub fn count(&self, query: &str) -> usize {
match self.get_range(query) {
Some((top, bottom)) => bottom - top,
None => 0,
}
}
/// Get the positions of occurrences of substring in the original data.
pub fn search(&self, query: &str) -> Vec<usize> {
match self.get_range(query) {
Some((top, bottom)) => (top..bottom).map(|idx| {
let i = self.nearest(idx, self.data[idx]);
self.lf_vec[i] as usize
}).collect(),
None => Vec::new(),
}
}
}
impl Index<usize> for FMIndex {
type Output = u32;
fn index(&self, i: usize) -> &u32 {
self.lf_vec.get(i).expect("index out of range")
}
}
#[cfg(test)]
mod tests {
use super::{FMIndex, bwt, ibwt};
#[test]
fn test_bwt_and_ibwt() {
let text = String::from("ATCTAGGAGATCTGAATCTAGTTCAACTAGCTAGATCTAGAGACAGCTAA");
let bw = bwt(text.as_bytes());
let ibw = ibwt(&bw);
assert_eq!(String::from("AATCGGAGTTGCTTTG\u{0}AGTAGTGATTTTAAGAAAAAACCCCCCTAAAACG"),
String::from_utf8(bw).unwrap());
assert_eq!(text, String::from_utf8(ibw).unwrap());
}
#[test]
fn test_fm_index() {
let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
let index = FMIndex::new(text.as_bytes());
assert_eq!(0, index.count("CCCCC"));
let mut result = index.search("TG");
result.sort();
assert_eq!(result, vec![3, 15, 21]);
let mut result = index.search("GCGT");
result.sort();
assert_eq!(result, vec![0, 26, 46]);
assert_eq!(vec![1], index.search("CGTGCCC"));
}
}
| generate_occurrence_index | identifier_name |
LocaleSwitch.js | /* ************************************************************************
qooxdoo - the new era of web development
http://qooxdoo.org
Copyright:
2011 1&1 Internet AG, Germany, http://www.1und1.de
License:
LGPL: http://www.gnu.org/licenses/lgpl.html
EPL: http://www.eclipse.org/org/documents/epl-v10.php
See the LICENSE file in the project's top-level directory for details.
Authors:
* Daniel Wagner (danielwagner)
************************************************************************ */
/**
* @require(qxWeb)
* @require(qx.module.Attribute)
* @require(qx.module.Traversing)
*/
qx.Class.define("qx.test.mobile.LocaleSwitch", | include : qx.locale.MTranslation,
construct : function()
{
this.base(arguments);
var manager = this.manager = qx.locale.Manager.getInstance();
// add dummy translations
manager.addTranslation("en_QX", {
"test one": "test one",
"test two": "test two",
"test Hello %1!": "test Hello %1!",
"test Jonny": "test Jonny"
});
manager.addTranslation("de_QX", {
"test one": "Eins",
"test two": "Zwei",
"test Hello %1!": "Servus %1!",
"test Jonny": "Jonathan"
});
},
members :
{
setUp : function() {
this.base(arguments);
this.manager.setLocale("en_QX");
},
testLabel : function()
{
var manager = qx.locale.Manager.getInstance();
var label = new qx.ui.mobile.basic.Label(this.tr("test one"));
this.getRoot().add(label);
this.assertEquals("test one", label.getValue());
manager.setLocale("de_QX");
this.assertEquals("Eins", label.getValue());
manager.setLocale("en_QX");
label.setValue(this.tr("test Hello %1!", this.tr("test Jonny")));
this.assertEquals("test Hello test Jonny!", label.getValue());
manager.setLocale("de_QX");
this.assertEquals("Servus Jonathan!", label.getValue());
// de -> en
label.setValue(this.tr("test two"));
this.assertEquals("Zwei", label.getValue());
manager.setLocale("en_QX");
this.assertEquals("test two", label.getValue());
label.destroy();
},
testList : function()
{
var list = new qx.ui.mobile.list.List({
configureItem : function(item, data, row) {
item.setTitle(data.title);
item.setSubtitle(data.subTitle);
}
});
var data = [
{
title: this.tr("test one"),
subTitle: this.tr("test two")
},
{
title: this.tr("test Hello %1!", this.tr("test Jonny")),
subTitle: this.tr("test Jonny")
}
];
list.setModel(new qx.data.Array(data));
this.getRoot().add(list);
this.__testListEn();
this.manager.setLocale("de_QX");
var title0 = q(".list * .list-item-title").eq(0).getHtml();
this.assertEquals("Eins". title0);
var subtitle0 = q(".list * .list-item-subtitle").eq(0).getHtml();
this.assertEquals("Zwei", subtitle0);
var title1 = q(".list * .list-item-title").eq(1).getHtml();
this.assertEquals("Servus Jonathan!", title1);
var subtitle1 = q(".list * .list-item-subtitle").eq(1).getHtml();
this.assertEquals("Jonathan", subtitle1);
this.manager.setLocale("en_QX");
this.__testListEn();
},
__testListEn : function() {
//debugger
var title0 = q(".list * .list-item-title").eq(0).getHtml();
this.assertEquals("test one". title0);
var subtitle0 = q(".list * .list-item-subtitle").eq(0).getHtml();
this.assertEquals("test two", subtitle0);
var title1 = q(".list * .list-item-title").eq(1).getHtml();
this.assertEquals("test Hello test Jonny!", title1);
var subtitle1 = q(".list * .list-item-subtitle").eq(1).getHtml();
this.assertEquals("test Jonny", subtitle1);
},
testFormRendererSingle : function()
{
var manager = qx.locale.Manager.getInstance();
var title = new qx.ui.mobile.form.Title(this.tr("test one"));
var form = new qx.ui.mobile.form.Form();
form.add(new qx.ui.mobile.form.TextField(), this.tr("test two"));
this.getRoot().add(title);
var renderer = new qx.ui.mobile.form.renderer.Single(form);
this.getRoot().add(renderer);
this.assertEquals("test one", title.getValue());
this.assertEquals("test two", renderer._labels[0].getValue());
manager.setLocale("de_QX");
this.assertEquals("Eins", title.getValue());
this.assertEquals("Zwei", renderer._labels[0].getValue());
manager.setLocale("en_QX");
title.destroy();
}
}
}); | {
extend : qx.test.mobile.MobileTestCase, | random_line_split |
flags.js | /**
*/
function Flags(_flags) {
"use strict";
var _init, _isValidFlag,
_convertToBoolean,
_setFlagGetterAndSetter,
_getFlagValuesFromHash,
_internalFlags = {},
_api = this;
const REGEXP_HASHFLAG = /flags\[([A-Za-z0-9\-\_\&=]+)]/i;
/************************* Validate *************************/
// If: The flags was called as a function and not as a constructor.
// Then: Throw an exception and prevent the API from being returned.
if (typeof this === "undefined" || typeof this.constructor === "undefined" || this.constructor.name !== "Flags") {
throw new SyntaxError(`Flags is not a function, use it as a constructor. Usage: var flags = new Flags({})`);
}
// If: The flags list is not defined.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags === "undefined") {
throw new SyntaxError(`The flags list is not defined. Pass an object to the constructor.`);
}
// If: The flags list is not an object.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags !== "object" || Object.keys(_flags).length === 0) {
throw new SyntaxError(`The flags list must be an object.`);
}
/************************* Helper Methods *************************/
_init = function () {
var key, flagsFromHash;
flagsFromHash = _getFlagValuesFromHash();
for (key in _flags) {
_setFlagGetterAndSetter(key, flagsFromHash);
}
};
_setFlagGetterAndSetter = function (key, flags) {
var value = (typeof flags[key] === "boolean")
? flags[key]
: _flags[key];
if (_isValidFlag(key, value)) {
Object.defineProperty(_api, key, {
get: () => {
return _internalFlags[key];
},
set: val => _internalFlags[key] = _convertToBoolean(val)
});
_internalFlags[key] = _convertToBoolean(value);
}
};
_isValidFlag = function (key, value) {
return (
typeof key === "string"
&& typeof value === "boolean"
&& key.trim() !== ""
);
};
_convertToBoolean = function (value) {
var bool;
if (typeof value === "boolean") {
bool = value;
}
else if (typeof value === "number") {
bool = (value === 1)
? true
: false;
}
else |
return bool;
};
_getFlagValuesFromHash = function () {
var hashFlags = {},
regexpResult;
regexpResult = REGEXP_HASHFLAG.exec(location.hash);
if (Array.isArray(regexpResult) && regexpResult.length === 2) {
regexpResult[1]
.split("&")
.forEach(flagStr => {
var key, value, parts;
parts = flagStr.split("=");
key = parts[0];
value = (typeof parts[1] === "string")
? parts[1]
: true;
if (_isValidFlag(key, value)) {
hashFlags[key] = value;
}
});
}
return hashFlags;
};
/************************* Initialize *************************/
_init();
/************************* Return API *************************/
// Object.seal() allows for the values of the flags to be changed, but not the list itself.
return Object.seal(_api);
};
| {
throw new SyntaxError();
} | conditional_block |
flags.js | /**
*/
function | (_flags) {
"use strict";
var _init, _isValidFlag,
_convertToBoolean,
_setFlagGetterAndSetter,
_getFlagValuesFromHash,
_internalFlags = {},
_api = this;
const REGEXP_HASHFLAG = /flags\[([A-Za-z0-9\-\_\&=]+)]/i;
/************************* Validate *************************/
// If: The flags was called as a function and not as a constructor.
// Then: Throw an exception and prevent the API from being returned.
if (typeof this === "undefined" || typeof this.constructor === "undefined" || this.constructor.name !== "Flags") {
throw new SyntaxError(`Flags is not a function, use it as a constructor. Usage: var flags = new Flags({})`);
}
// If: The flags list is not defined.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags === "undefined") {
throw new SyntaxError(`The flags list is not defined. Pass an object to the constructor.`);
}
// If: The flags list is not an object.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags !== "object" || Object.keys(_flags).length === 0) {
throw new SyntaxError(`The flags list must be an object.`);
}
/************************* Helper Methods *************************/
_init = function () {
var key, flagsFromHash;
flagsFromHash = _getFlagValuesFromHash();
for (key in _flags) {
_setFlagGetterAndSetter(key, flagsFromHash);
}
};
_setFlagGetterAndSetter = function (key, flags) {
var value = (typeof flags[key] === "boolean")
? flags[key]
: _flags[key];
if (_isValidFlag(key, value)) {
Object.defineProperty(_api, key, {
get: () => {
return _internalFlags[key];
},
set: val => _internalFlags[key] = _convertToBoolean(val)
});
_internalFlags[key] = _convertToBoolean(value);
}
};
_isValidFlag = function (key, value) {
return (
typeof key === "string"
&& typeof value === "boolean"
&& key.trim() !== ""
);
};
_convertToBoolean = function (value) {
var bool;
if (typeof value === "boolean") {
bool = value;
}
else if (typeof value === "number") {
bool = (value === 1)
? true
: false;
}
else {
throw new SyntaxError();
}
return bool;
};
_getFlagValuesFromHash = function () {
var hashFlags = {},
regexpResult;
regexpResult = REGEXP_HASHFLAG.exec(location.hash);
if (Array.isArray(regexpResult) && regexpResult.length === 2) {
regexpResult[1]
.split("&")
.forEach(flagStr => {
var key, value, parts;
parts = flagStr.split("=");
key = parts[0];
value = (typeof parts[1] === "string")
? parts[1]
: true;
if (_isValidFlag(key, value)) {
hashFlags[key] = value;
}
});
}
return hashFlags;
};
/************************* Initialize *************************/
_init();
/************************* Return API *************************/
// Object.seal() allows for the values of the flags to be changed, but not the list itself.
return Object.seal(_api);
};
| Flags | identifier_name |
flags.js | /**
*/
function Flags(_flags) {
"use strict";
var _init, _isValidFlag,
_convertToBoolean,
_setFlagGetterAndSetter,
_getFlagValuesFromHash,
_internalFlags = {},
_api = this;
const REGEXP_HASHFLAG = /flags\[([A-Za-z0-9\-\_\&=]+)]/i;
/************************* Validate *************************/
// If: The flags was called as a function and not as a constructor.
// Then: Throw an exception and prevent the API from being returned.
if (typeof this === "undefined" || typeof this.constructor === "undefined" || this.constructor.name !== "Flags") { | throw new SyntaxError(`Flags is not a function, use it as a constructor. Usage: var flags = new Flags({})`);
}
// If: The flags list is not defined.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags === "undefined") {
throw new SyntaxError(`The flags list is not defined. Pass an object to the constructor.`);
}
// If: The flags list is not an object.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags !== "object" || Object.keys(_flags).length === 0) {
throw new SyntaxError(`The flags list must be an object.`);
}
/************************* Helper Methods *************************/
_init = function () {
var key, flagsFromHash;
flagsFromHash = _getFlagValuesFromHash();
for (key in _flags) {
_setFlagGetterAndSetter(key, flagsFromHash);
}
};
_setFlagGetterAndSetter = function (key, flags) {
var value = (typeof flags[key] === "boolean")
? flags[key]
: _flags[key];
if (_isValidFlag(key, value)) {
Object.defineProperty(_api, key, {
get: () => {
return _internalFlags[key];
},
set: val => _internalFlags[key] = _convertToBoolean(val)
});
_internalFlags[key] = _convertToBoolean(value);
}
};
_isValidFlag = function (key, value) {
return (
typeof key === "string"
&& typeof value === "boolean"
&& key.trim() !== ""
);
};
_convertToBoolean = function (value) {
var bool;
if (typeof value === "boolean") {
bool = value;
}
else if (typeof value === "number") {
bool = (value === 1)
? true
: false;
}
else {
throw new SyntaxError();
}
return bool;
};
_getFlagValuesFromHash = function () {
var hashFlags = {},
regexpResult;
regexpResult = REGEXP_HASHFLAG.exec(location.hash);
if (Array.isArray(regexpResult) && regexpResult.length === 2) {
regexpResult[1]
.split("&")
.forEach(flagStr => {
var key, value, parts;
parts = flagStr.split("=");
key = parts[0];
value = (typeof parts[1] === "string")
? parts[1]
: true;
if (_isValidFlag(key, value)) {
hashFlags[key] = value;
}
});
}
return hashFlags;
};
/************************* Initialize *************************/
_init();
/************************* Return API *************************/
// Object.seal() allows for the values of the flags to be changed, but not the list itself.
return Object.seal(_api);
}; | random_line_split | |
flags.js | /**
*/
function Flags(_flags) | ;
| {
"use strict";
var _init, _isValidFlag,
_convertToBoolean,
_setFlagGetterAndSetter,
_getFlagValuesFromHash,
_internalFlags = {},
_api = this;
const REGEXP_HASHFLAG = /flags\[([A-Za-z0-9\-\_\&=]+)]/i;
/************************* Validate *************************/
// If: The flags was called as a function and not as a constructor.
// Then: Throw an exception and prevent the API from being returned.
if (typeof this === "undefined" || typeof this.constructor === "undefined" || this.constructor.name !== "Flags") {
throw new SyntaxError(`Flags is not a function, use it as a constructor. Usage: var flags = new Flags({})`);
}
// If: The flags list is not defined.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags === "undefined") {
throw new SyntaxError(`The flags list is not defined. Pass an object to the constructor.`);
}
// If: The flags list is not an object.
// Then: Throw an exception and prevent the API from being returned.
if (typeof _flags !== "object" || Object.keys(_flags).length === 0) {
throw new SyntaxError(`The flags list must be an object.`);
}
/************************* Helper Methods *************************/
_init = function () {
var key, flagsFromHash;
flagsFromHash = _getFlagValuesFromHash();
for (key in _flags) {
_setFlagGetterAndSetter(key, flagsFromHash);
}
};
_setFlagGetterAndSetter = function (key, flags) {
var value = (typeof flags[key] === "boolean")
? flags[key]
: _flags[key];
if (_isValidFlag(key, value)) {
Object.defineProperty(_api, key, {
get: () => {
return _internalFlags[key];
},
set: val => _internalFlags[key] = _convertToBoolean(val)
});
_internalFlags[key] = _convertToBoolean(value);
}
};
_isValidFlag = function (key, value) {
return (
typeof key === "string"
&& typeof value === "boolean"
&& key.trim() !== ""
);
};
_convertToBoolean = function (value) {
var bool;
if (typeof value === "boolean") {
bool = value;
}
else if (typeof value === "number") {
bool = (value === 1)
? true
: false;
}
else {
throw new SyntaxError();
}
return bool;
};
_getFlagValuesFromHash = function () {
var hashFlags = {},
regexpResult;
regexpResult = REGEXP_HASHFLAG.exec(location.hash);
if (Array.isArray(regexpResult) && regexpResult.length === 2) {
regexpResult[1]
.split("&")
.forEach(flagStr => {
var key, value, parts;
parts = flagStr.split("=");
key = parts[0];
value = (typeof parts[1] === "string")
? parts[1]
: true;
if (_isValidFlag(key, value)) {
hashFlags[key] = value;
}
});
}
return hashFlags;
};
/************************* Initialize *************************/
_init();
/************************* Return API *************************/
// Object.seal() allows for the values of the flags to be changed, but not the list itself.
return Object.seal(_api);
} | identifier_body |
variable.py | import collections
import ctypes
import hashlib
import os
import platform
import random
import re
import string
import sys
import traceback
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
from couchpotato.core.logger import CPLog
import six
from six.moves import map, zip, filter
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateHardLinkW(six.text_type(dst), six.text_type(src), 0) == 0: raise ctypes.WinError()
else:
os.link(src, dst)
def symlink(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateSymbolicLinkW(six.text_type(dst), six.text_type(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError()
else:
os.symlink(src, dst)
def getUserDir():
try:
import pwd
if not os.environ['HOME']:
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
except:
pass
return sp(os.path.expanduser('~'))
def getDownloadDir():
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Downloads')
if os.name == 'nt':
return os.path.join(user_dir, 'Downloads')
return user_dir
def getDataDir():
# Windows
if os.name == 'nt':
return os.path.join(os.environ['APPDATA'], 'CouchPotato')
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Library', 'Application Support', 'CouchPotato')
# FreeBSD
if 'freebsd' in sys.platform:
return os.path.join('/usr/local/', 'couchpotato', 'data')
# Linux
return os.path.join(user_dir, '.couchpotato')
def isDict(obj):
return isinstance(obj, dict)
def mergeDicts(a, b, prepend_list = False):
assert isDict(a), isDict(b) | for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isDict(current_src[key]) and isDict(current_dst[key]):
stack.append((current_dst[key], current_src[key]))
elif isinstance(current_src[key], list) and isinstance(current_dst[key], list):
current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key]
current_dst[key] = removeListDuplicates(current_dst[key])
else:
current_dst[key] = current_src[key]
return dst
def removeListDuplicates(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
def flattenList(l):
if isinstance(l, list):
return sum(map(flattenList, l))
else:
return l
def md5(text):
return hashlib.md5(ss(text)).hexdigest()
def sha1(text):
return hashlib.sha1(text).hexdigest()
def isLocalIP(ip):
ip = ip.lstrip('htps:/')
regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
def getExt(filename):
return os.path.splitext(filename)[1][1:]
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
"""Return a cleaned up host with given url options set
Changes protocol to https if ssl is set to True and http if ssl is set to false.
>>> cleanHost("localhost:80", ssl=True)
'https://localhost:80/'
>>> cleanHost("localhost:80", ssl=False)
'http://localhost:80/'
Username and password is managed with the username and password variables
>>> cleanHost("localhost:80", username="user", password="passwd")
'http://user:passwd@localhost:80/'
Output without scheme (protocol) can be forced with protocol=False
>>> cleanHost("localhost:80", protocol=False)
'localhost:80'
"""
if not '://' in host and protocol:
host = ('https://' if ssl else 'http://') + host
if not protocol:
host = host.split('://', 1)[-1]
if protocol and username and password:
try:
auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
if auth:
log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
else:
host = host.replace('://', '://%s:%s@' % (username, password), 1)
except:
pass
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
def getImdb(txt, check_inside = False, multiple = False):
if not check_inside:
txt = simplifyString(txt)
else:
txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
txt = output.read()
output.close()
try:
ids = re.findall('(tt\d{4,7})', txt)
if multiple:
return removeDuplicate(['tt%07d' % tryInt(x[2:]) for x in ids]) if len(ids) > 0 else []
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
def tryInt(s, default = 0):
try: return int(s)
except: return default
def tryFloat(s):
try:
if isinstance(s, str):
return float(s) if '.' in s else tryInt(s)
else:
return float(s)
except: return 0
def natsortKey(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def toIterable(value):
if isinstance(value, collections.Iterable):
return value
return [value]
def getIdentifier(media):
return media.get('identifier') or media.get('identifiers', {}).get('imdb')
def getTitle(media_dict):
try:
try:
return media_dict['title']
except:
try:
return media_dict['titles'][0]
except:
try:
return media_dict['info']['titles'][0]
except:
try:
return media_dict['media']['info']['titles'][0]
except:
log.error('Could not get title for %s', getIdentifier(media_dict))
return None
except:
log.error('Could not get title for library item: %s', media_dict)
return None
def possibleTitles(raw_title):
titles = [
toSafeString(raw_title).lower(),
raw_title.lower(),
simplifyString(raw_title)
]
# replace some chars
new_title = raw_title.replace('&', 'and')
titles.append(simplifyString(new_title))
return removeDuplicate(titles)
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def splitString(str, split_on = ',', clean = True):
l = [x.strip() for x in str.split(split_on)] if str else []
return removeEmpty(l) if clean else l
def removeEmpty(l):
return list(filter(None, l))
def removeDuplicate(l):
seen = set()
return [x for x in l if x not in seen and not seen.add(x)]
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
# Returns True if sub_folder is the same as or inside base_folder
def isSubFolder(sub_folder, base_folder):
if base_folder and sub_folder:
base = sp(os.path.realpath(base_folder)) + os.path.sep
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
return os.path.commonprefix([subfolder, base]) == base
return False
# From SABNZBD
re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)]
def scanForPassword(name):
m = None
for reg in re_password:
m = reg.search(name)
if m: break
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def removePyc(folder, only_excess = True, show_logs = True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getFreeSpace(directories):
single = not isinstance(directories, (tuple, list))
if single:
directories = [directories]
free_space = {}
for folder in directories:
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
return [total.value, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
if single: return size
free_space[folder] = size
return free_space
def getSize(paths):
single = not isinstance(paths, (tuple, list))
if single:
paths = [paths]
total_size = 0
for path in paths:
path = sp(path)
if os.path.isdir(path):
total_size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
elif os.path.isfile(path):
total_size += os.path.getsize(path)
return total_size / 1048576 # MB
def find(func, iterable):
for item in iterable:
if func(item):
return item
return None
def compareVersions(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2)) | dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop() | random_line_split |
variable.py | import collections
import ctypes
import hashlib
import os
import platform
import random
import re
import string
import sys
import traceback
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
from couchpotato.core.logger import CPLog
import six
from six.moves import map, zip, filter
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateHardLinkW(six.text_type(dst), six.text_type(src), 0) == 0: raise ctypes.WinError()
else:
os.link(src, dst)
def symlink(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateSymbolicLinkW(six.text_type(dst), six.text_type(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError()
else:
os.symlink(src, dst)
def getUserDir():
try:
import pwd
if not os.environ['HOME']:
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
except:
pass
return sp(os.path.expanduser('~'))
def getDownloadDir():
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Downloads')
if os.name == 'nt':
return os.path.join(user_dir, 'Downloads')
return user_dir
def getDataDir():
# Windows
if os.name == 'nt':
return os.path.join(os.environ['APPDATA'], 'CouchPotato')
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Library', 'Application Support', 'CouchPotato')
# FreeBSD
if 'freebsd' in sys.platform:
return os.path.join('/usr/local/', 'couchpotato', 'data')
# Linux
return os.path.join(user_dir, '.couchpotato')
def isDict(obj):
return isinstance(obj, dict)
def mergeDicts(a, b, prepend_list = False):
assert isDict(a), isDict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isDict(current_src[key]) and isDict(current_dst[key]):
stack.append((current_dst[key], current_src[key]))
elif isinstance(current_src[key], list) and isinstance(current_dst[key], list):
current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key]
current_dst[key] = removeListDuplicates(current_dst[key])
else:
current_dst[key] = current_src[key]
return dst
def removeListDuplicates(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
def flattenList(l):
if isinstance(l, list):
return sum(map(flattenList, l))
else:
return l
def md5(text):
return hashlib.md5(ss(text)).hexdigest()
def sha1(text):
return hashlib.sha1(text).hexdigest()
def isLocalIP(ip):
ip = ip.lstrip('htps:/')
regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
def getExt(filename):
return os.path.splitext(filename)[1][1:]
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
"""Return a cleaned up host with given url options set
Changes protocol to https if ssl is set to True and http if ssl is set to false.
>>> cleanHost("localhost:80", ssl=True)
'https://localhost:80/'
>>> cleanHost("localhost:80", ssl=False)
'http://localhost:80/'
Username and password is managed with the username and password variables
>>> cleanHost("localhost:80", username="user", password="passwd")
'http://user:passwd@localhost:80/'
Output without scheme (protocol) can be forced with protocol=False
>>> cleanHost("localhost:80", protocol=False)
'localhost:80'
"""
if not '://' in host and protocol:
host = ('https://' if ssl else 'http://') + host
if not protocol:
host = host.split('://', 1)[-1]
if protocol and username and password:
try:
auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
if auth:
log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
else:
host = host.replace('://', '://%s:%s@' % (username, password), 1)
except:
pass
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
def getImdb(txt, check_inside = False, multiple = False):
if not check_inside:
txt = simplifyString(txt)
else:
txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
txt = output.read()
output.close()
try:
ids = re.findall('(tt\d{4,7})', txt)
if multiple:
return removeDuplicate(['tt%07d' % tryInt(x[2:]) for x in ids]) if len(ids) > 0 else []
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
def tryInt(s, default = 0):
try: return int(s)
except: return default
def tryFloat(s):
try:
if isinstance(s, str):
return float(s) if '.' in s else tryInt(s)
else:
return float(s)
except: return 0
def natsortKey(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def | (value):
if isinstance(value, collections.Iterable):
return value
return [value]
def getIdentifier(media):
return media.get('identifier') or media.get('identifiers', {}).get('imdb')
def getTitle(media_dict):
try:
try:
return media_dict['title']
except:
try:
return media_dict['titles'][0]
except:
try:
return media_dict['info']['titles'][0]
except:
try:
return media_dict['media']['info']['titles'][0]
except:
log.error('Could not get title for %s', getIdentifier(media_dict))
return None
except:
log.error('Could not get title for library item: %s', media_dict)
return None
def possibleTitles(raw_title):
titles = [
toSafeString(raw_title).lower(),
raw_title.lower(),
simplifyString(raw_title)
]
# replace some chars
new_title = raw_title.replace('&', 'and')
titles.append(simplifyString(new_title))
return removeDuplicate(titles)
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def splitString(str, split_on = ',', clean = True):
l = [x.strip() for x in str.split(split_on)] if str else []
return removeEmpty(l) if clean else l
def removeEmpty(l):
return list(filter(None, l))
def removeDuplicate(l):
seen = set()
return [x for x in l if x not in seen and not seen.add(x)]
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
# Returns True if sub_folder is the same as or inside base_folder
def isSubFolder(sub_folder, base_folder):
if base_folder and sub_folder:
base = sp(os.path.realpath(base_folder)) + os.path.sep
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
return os.path.commonprefix([subfolder, base]) == base
return False
# From SABNZBD
re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)]
def scanForPassword(name):
m = None
for reg in re_password:
m = reg.search(name)
if m: break
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def removePyc(folder, only_excess = True, show_logs = True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getFreeSpace(directories):
single = not isinstance(directories, (tuple, list))
if single:
directories = [directories]
free_space = {}
for folder in directories:
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
return [total.value, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
if single: return size
free_space[folder] = size
return free_space
def getSize(paths):
single = not isinstance(paths, (tuple, list))
if single:
paths = [paths]
total_size = 0
for path in paths:
path = sp(path)
if os.path.isdir(path):
total_size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
elif os.path.isfile(path):
total_size += os.path.getsize(path)
return total_size / 1048576 # MB
def find(func, iterable):
for item in iterable:
if func(item):
return item
return None
def compareVersions(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
| toIterable | identifier_name |
variable.py | import collections
import ctypes
import hashlib
import os
import platform
import random
import re
import string
import sys
import traceback
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
from couchpotato.core.logger import CPLog
import six
from six.moves import map, zip, filter
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateHardLinkW(six.text_type(dst), six.text_type(src), 0) == 0: raise ctypes.WinError()
else:
os.link(src, dst)
def symlink(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateSymbolicLinkW(six.text_type(dst), six.text_type(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError()
else:
os.symlink(src, dst)
def getUserDir():
try:
import pwd
if not os.environ['HOME']:
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
except:
pass
return sp(os.path.expanduser('~'))
def getDownloadDir():
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Downloads')
if os.name == 'nt':
return os.path.join(user_dir, 'Downloads')
return user_dir
def getDataDir():
# Windows
if os.name == 'nt':
return os.path.join(os.environ['APPDATA'], 'CouchPotato')
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Library', 'Application Support', 'CouchPotato')
# FreeBSD
if 'freebsd' in sys.platform:
return os.path.join('/usr/local/', 'couchpotato', 'data')
# Linux
return os.path.join(user_dir, '.couchpotato')
def isDict(obj):
return isinstance(obj, dict)
def mergeDicts(a, b, prepend_list = False):
assert isDict(a), isDict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isDict(current_src[key]) and isDict(current_dst[key]):
stack.append((current_dst[key], current_src[key]))
elif isinstance(current_src[key], list) and isinstance(current_dst[key], list):
current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key]
current_dst[key] = removeListDuplicates(current_dst[key])
else:
current_dst[key] = current_src[key]
return dst
def removeListDuplicates(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
def flattenList(l):
if isinstance(l, list):
return sum(map(flattenList, l))
else:
return l
def md5(text):
return hashlib.md5(ss(text)).hexdigest()
def sha1(text):
return hashlib.sha1(text).hexdigest()
def isLocalIP(ip):
ip = ip.lstrip('htps:/')
regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
def getExt(filename):
return os.path.splitext(filename)[1][1:]
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
"""Return a cleaned up host with given url options set
Changes protocol to https if ssl is set to True and http if ssl is set to false.
>>> cleanHost("localhost:80", ssl=True)
'https://localhost:80/'
>>> cleanHost("localhost:80", ssl=False)
'http://localhost:80/'
Username and password is managed with the username and password variables
>>> cleanHost("localhost:80", username="user", password="passwd")
'http://user:passwd@localhost:80/'
Output without scheme (protocol) can be forced with protocol=False
>>> cleanHost("localhost:80", protocol=False)
'localhost:80'
"""
if not '://' in host and protocol:
host = ('https://' if ssl else 'http://') + host
if not protocol:
host = host.split('://', 1)[-1]
if protocol and username and password:
try:
auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
if auth:
log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
else:
host = host.replace('://', '://%s:%s@' % (username, password), 1)
except:
pass
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
def getImdb(txt, check_inside = False, multiple = False):
if not check_inside:
txt = simplifyString(txt)
else:
txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
txt = output.read()
output.close()
try:
ids = re.findall('(tt\d{4,7})', txt)
if multiple:
return removeDuplicate(['tt%07d' % tryInt(x[2:]) for x in ids]) if len(ids) > 0 else []
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
def tryInt(s, default = 0):
try: return int(s)
except: return default
def tryFloat(s):
try:
if isinstance(s, str):
return float(s) if '.' in s else tryInt(s)
else:
return float(s)
except: return 0
def natsortKey(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def toIterable(value):
if isinstance(value, collections.Iterable):
return value
return [value]
def getIdentifier(media):
return media.get('identifier') or media.get('identifiers', {}).get('imdb')
def getTitle(media_dict):
try:
try:
return media_dict['title']
except:
try:
return media_dict['titles'][0]
except:
try:
return media_dict['info']['titles'][0]
except:
try:
return media_dict['media']['info']['titles'][0]
except:
log.error('Could not get title for %s', getIdentifier(media_dict))
return None
except:
log.error('Could not get title for library item: %s', media_dict)
return None
def possibleTitles(raw_title):
titles = [
toSafeString(raw_title).lower(),
raw_title.lower(),
simplifyString(raw_title)
]
# replace some chars
new_title = raw_title.replace('&', 'and')
titles.append(simplifyString(new_title))
return removeDuplicate(titles)
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def splitString(str, split_on = ',', clean = True):
l = [x.strip() for x in str.split(split_on)] if str else []
return removeEmpty(l) if clean else l
def removeEmpty(l):
return list(filter(None, l))
def removeDuplicate(l):
seen = set()
return [x for x in l if x not in seen and not seen.add(x)]
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
# Returns True if sub_folder is the same as or inside base_folder
def isSubFolder(sub_folder, base_folder):
if base_folder and sub_folder:
base = sp(os.path.realpath(base_folder)) + os.path.sep
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
return os.path.commonprefix([subfolder, base]) == base
return False
# From SABNZBD
re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)]
def scanForPassword(name):
m = None
for reg in re_password:
m = reg.search(name)
if m: break
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def removePyc(folder, only_excess = True, show_logs = True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getFreeSpace(directories):
single = not isinstance(directories, (tuple, list))
if single:
directories = [directories]
free_space = {}
for folder in directories:
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
return [total.value, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
if single: return size
free_space[folder] = size
return free_space
def getSize(paths):
single = not isinstance(paths, (tuple, list))
if single:
paths = [paths]
total_size = 0
for path in paths:
path = sp(path)
if os.path.isdir(path):
total_size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
|
elif os.path.isfile(path):
total_size += os.path.getsize(path)
return total_size / 1048576 # MB
def find(func, iterable):
for item in iterable:
if func(item):
return item
return None
def compareVersions(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
| total_size += os.path.getsize(sp(os.path.join(dirpath, f))) | conditional_block |
variable.py | import collections
import ctypes
import hashlib
import os
import platform
import random
import re
import string
import sys
import traceback
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
from couchpotato.core.logger import CPLog
import six
from six.moves import map, zip, filter
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateHardLinkW(six.text_type(dst), six.text_type(src), 0) == 0: raise ctypes.WinError()
else:
os.link(src, dst)
def symlink(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateSymbolicLinkW(six.text_type(dst), six.text_type(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError()
else:
os.symlink(src, dst)
def getUserDir():
try:
import pwd
if not os.environ['HOME']:
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
except:
pass
return sp(os.path.expanduser('~'))
def getDownloadDir():
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Downloads')
if os.name == 'nt':
return os.path.join(user_dir, 'Downloads')
return user_dir
def getDataDir():
# Windows
if os.name == 'nt':
return os.path.join(os.environ['APPDATA'], 'CouchPotato')
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Library', 'Application Support', 'CouchPotato')
# FreeBSD
if 'freebsd' in sys.platform:
return os.path.join('/usr/local/', 'couchpotato', 'data')
# Linux
return os.path.join(user_dir, '.couchpotato')
def isDict(obj):
return isinstance(obj, dict)
def mergeDicts(a, b, prepend_list = False):
assert isDict(a), isDict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isDict(current_src[key]) and isDict(current_dst[key]):
stack.append((current_dst[key], current_src[key]))
elif isinstance(current_src[key], list) and isinstance(current_dst[key], list):
current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key]
current_dst[key] = removeListDuplicates(current_dst[key])
else:
current_dst[key] = current_src[key]
return dst
def removeListDuplicates(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
def flattenList(l):
if isinstance(l, list):
return sum(map(flattenList, l))
else:
return l
def md5(text):
return hashlib.md5(ss(text)).hexdigest()
def sha1(text):
return hashlib.sha1(text).hexdigest()
def isLocalIP(ip):
ip = ip.lstrip('htps:/')
regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
def getExt(filename):
return os.path.splitext(filename)[1][1:]
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
"""Return a cleaned up host with given url options set
Changes protocol to https if ssl is set to True and http if ssl is set to false.
>>> cleanHost("localhost:80", ssl=True)
'https://localhost:80/'
>>> cleanHost("localhost:80", ssl=False)
'http://localhost:80/'
Username and password is managed with the username and password variables
>>> cleanHost("localhost:80", username="user", password="passwd")
'http://user:passwd@localhost:80/'
Output without scheme (protocol) can be forced with protocol=False
>>> cleanHost("localhost:80", protocol=False)
'localhost:80'
"""
if not '://' in host and protocol:
host = ('https://' if ssl else 'http://') + host
if not protocol:
host = host.split('://', 1)[-1]
if protocol and username and password:
try:
auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
if auth:
log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
else:
host = host.replace('://', '://%s:%s@' % (username, password), 1)
except:
pass
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
def getImdb(txt, check_inside = False, multiple = False):
if not check_inside:
txt = simplifyString(txt)
else:
txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
txt = output.read()
output.close()
try:
ids = re.findall('(tt\d{4,7})', txt)
if multiple:
return removeDuplicate(['tt%07d' % tryInt(x[2:]) for x in ids]) if len(ids) > 0 else []
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
def tryInt(s, default = 0):
try: return int(s)
except: return default
def tryFloat(s):
try:
if isinstance(s, str):
return float(s) if '.' in s else tryInt(s)
else:
return float(s)
except: return 0
def natsortKey(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def toIterable(value):
if isinstance(value, collections.Iterable):
return value
return [value]
def getIdentifier(media):
|
def getTitle(media_dict):
try:
try:
return media_dict['title']
except:
try:
return media_dict['titles'][0]
except:
try:
return media_dict['info']['titles'][0]
except:
try:
return media_dict['media']['info']['titles'][0]
except:
log.error('Could not get title for %s', getIdentifier(media_dict))
return None
except:
log.error('Could not get title for library item: %s', media_dict)
return None
def possibleTitles(raw_title):
titles = [
toSafeString(raw_title).lower(),
raw_title.lower(),
simplifyString(raw_title)
]
# replace some chars
new_title = raw_title.replace('&', 'and')
titles.append(simplifyString(new_title))
return removeDuplicate(titles)
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def splitString(str, split_on = ',', clean = True):
l = [x.strip() for x in str.split(split_on)] if str else []
return removeEmpty(l) if clean else l
def removeEmpty(l):
return list(filter(None, l))
def removeDuplicate(l):
seen = set()
return [x for x in l if x not in seen and not seen.add(x)]
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
# Returns True if sub_folder is the same as or inside base_folder
def isSubFolder(sub_folder, base_folder):
if base_folder and sub_folder:
base = sp(os.path.realpath(base_folder)) + os.path.sep
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
return os.path.commonprefix([subfolder, base]) == base
return False
# From SABNZBD
re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)]
def scanForPassword(name):
m = None
for reg in re_password:
m = reg.search(name)
if m: break
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def removePyc(folder, only_excess = True, show_logs = True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getFreeSpace(directories):
single = not isinstance(directories, (tuple, list))
if single:
directories = [directories]
free_space = {}
for folder in directories:
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
return [total.value, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
if single: return size
free_space[folder] = size
return free_space
def getSize(paths):
single = not isinstance(paths, (tuple, list))
if single:
paths = [paths]
total_size = 0
for path in paths:
path = sp(path)
if os.path.isdir(path):
total_size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
elif os.path.isfile(path):
total_size += os.path.getsize(path)
return total_size / 1048576 # MB
def find(func, iterable):
for item in iterable:
if func(item):
return item
return None
def compareVersions(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
| return media.get('identifier') or media.get('identifiers', {}).get('imdb') | identifier_body |
transition.js | /*!
* SeaUI: transition.js v1.0.0
* Copyright 2013-2014 chanh
* Licensed under MIT(https://github.com/seaui/transition/blob/master/LICENSE)
*/
define("seaui/transition/1.0.0/transition",["jquery/jquery/1.10.1/jquery"],function(a){function b() | var c=a("jquery/jquery/1.10.1/jquery");c.fn.emulateTransitionEnd=function(a){var b=!1,d=this;c(this).one(c.support.transition.end,function(){b=!0});var e=function(){b||c(d).trigger(c.support.transition.end)};return setTimeout(e,a),this},c(function(){c.support.transition=b()})});
| {var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1} | identifier_body |
transition.js | /*!
* SeaUI: transition.js v1.0.0
* Copyright 2013-2014 chanh
* Licensed under MIT(https://github.com/seaui/transition/blob/master/LICENSE)
*/
define("seaui/transition/1.0.0/transition",["jquery/jquery/1.10.1/jquery"],function(a){function | (){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}var c=a("jquery/jquery/1.10.1/jquery");c.fn.emulateTransitionEnd=function(a){var b=!1,d=this;c(this).one(c.support.transition.end,function(){b=!0});var e=function(){b||c(d).trigger(c.support.transition.end)};return setTimeout(e,a),this},c(function(){c.support.transition=b()})});
| b | identifier_name |
transition.js | * Licensed under MIT(https://github.com/seaui/transition/blob/master/LICENSE)
*/
define("seaui/transition/1.0.0/transition",["jquery/jquery/1.10.1/jquery"],function(a){function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}var c=a("jquery/jquery/1.10.1/jquery");c.fn.emulateTransitionEnd=function(a){var b=!1,d=this;c(this).one(c.support.transition.end,function(){b=!0});var e=function(){b||c(d).trigger(c.support.transition.end)};return setTimeout(e,a),this},c(function(){c.support.transition=b()})}); | /*!
* SeaUI: transition.js v1.0.0
* Copyright 2013-2014 chanh | random_line_split | |
setup.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
def _run_shell_command(cmd):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if not milestonever:
milestonever = ""
post_version = _get_git_post_version()
revno = post_version.split(".")[-1]
return "%s~%s.%s%s" % (milestonever, datestamp, revno_prefix, revno)
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
revno = tag_infos[-2]
return "%s.%s" % (base_version, revno)
def write_git_changelog():
"""Write a changelog based on the git changelog."""
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.openstack.org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"grep -v " + jenkins_email)
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really know way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
open(os.path.join(project, 'versioninfo'), 'w').write("%s\n" % version)
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
|
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
def get_git_branchname():
for branch in _run_shell_command("git branch --color=never").split("\n"):
if branch.startswith('*'):
_branch_name = branch.split()[1].strip()
if _branch_name == "(no":
_branch_name = "no-branch"
return _branch_name
def get_pre_version(projectname, base_version):
"""Return a version which is based"""
if os.path.isdir('.git'):
current_tag = _get_git_current_tag()
if current_tag is not None:
version = current_tag
else:
branch_name = os.getenv('BRANCHNAME',
os.getenv('GERRIT_REFNAME',
get_git_branchname()))
version_suffix = _get_git_next_version_suffix(branch_name)
version = "%s~%s" % (base_version, version_suffix)
write_versioninfo(projectname, version)
return version.split('~')[0]
else:
version = read_versioninfo(projectname)
return version.split('~')[0]
def get_post_version(projectname):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
version = _get_git_post_version()
write_versioninfo(projectname, version)
return version
return read_versioninfo(projectname)
| if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules) | conditional_block |
setup.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
|
def _run_shell_command(cmd):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if not milestonever:
milestonever = ""
post_version = _get_git_post_version()
revno = post_version.split(".")[-1]
return "%s~%s.%s%s" % (milestonever, datestamp, revno_prefix, revno)
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
revno = tag_infos[-2]
return "%s.%s" % (base_version, revno)
def write_git_changelog():
"""Write a changelog based on the git changelog."""
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.openstack.org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"grep -v " + jenkins_email)
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really know way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
open(os.path.join(project, 'versioninfo'), 'w').write("%s\n" % version)
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
def get_git_branchname():
for branch in _run_shell_command("git branch --color=never").split("\n"):
if branch.startswith('*'):
_branch_name = branch.split()[1].strip()
if _branch_name == "(no":
_branch_name = "no-branch"
return _branch_name
def get_pre_version(projectname, base_version):
"""Return a version which is based"""
if os.path.isdir('.git'):
current_tag = _get_git_current_tag()
if current_tag is not None:
version = current_tag
else:
branch_name = os.getenv('BRANCHNAME',
os.getenv('GERRIT_REFNAME',
get_git_branchname()))
version_suffix = _get_git_next_version_suffix(branch_name)
version = "%s~%s" % (base_version, version_suffix)
write_versioninfo(projectname, version)
return version.split('~')[0]
else:
version = read_versioninfo(projectname)
return version.split('~')[0]
def get_post_version(projectname):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
version = _get_git_post_version()
write_versioninfo(projectname, version)
return version
return read_versioninfo(projectname)
| venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements) | identifier_body |
setup.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
def _run_shell_command(cmd):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if not milestonever:
milestonever = ""
post_version = _get_git_post_version()
revno = post_version.split(".")[-1]
return "%s~%s.%s%s" % (milestonever, datestamp, revno_prefix, revno)
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
revno = tag_infos[-2]
return "%s.%s" % (base_version, revno)
def write_git_changelog():
"""Write a changelog based on the git changelog."""
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
def | ():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.openstack.org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"grep -v " + jenkins_email)
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really know way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
open(os.path.join(project, 'versioninfo'), 'w').write("%s\n" % version)
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
def get_git_branchname():
for branch in _run_shell_command("git branch --color=never").split("\n"):
if branch.startswith('*'):
_branch_name = branch.split()[1].strip()
if _branch_name == "(no":
_branch_name = "no-branch"
return _branch_name
def get_pre_version(projectname, base_version):
"""Return a version which is based"""
if os.path.isdir('.git'):
current_tag = _get_git_current_tag()
if current_tag is not None:
version = current_tag
else:
branch_name = os.getenv('BRANCHNAME',
os.getenv('GERRIT_REFNAME',
get_git_branchname()))
version_suffix = _get_git_next_version_suffix(branch_name)
version = "%s~%s" % (base_version, version_suffix)
write_versioninfo(projectname, version)
return version.split('~')[0]
else:
version = read_versioninfo(projectname)
return version.split('~')[0]
def get_post_version(projectname):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
version = _get_git_post_version()
write_versioninfo(projectname, version)
return version
return read_versioninfo(projectname)
| generate_authors | identifier_name |
setup.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
def _run_shell_command(cmd):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if not milestonever: |
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
revno = tag_infos[-2]
return "%s.%s" % (base_version, revno)
def write_git_changelog():
"""Write a changelog based on the git changelog."""
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.openstack.org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"grep -v " + jenkins_email)
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really know way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
open(os.path.join(project, 'versioninfo'), 'w').write("%s\n" % version)
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
def get_git_branchname():
for branch in _run_shell_command("git branch --color=never").split("\n"):
if branch.startswith('*'):
_branch_name = branch.split()[1].strip()
if _branch_name == "(no":
_branch_name = "no-branch"
return _branch_name
def get_pre_version(projectname, base_version):
"""Return a version which is based"""
if os.path.isdir('.git'):
current_tag = _get_git_current_tag()
if current_tag is not None:
version = current_tag
else:
branch_name = os.getenv('BRANCHNAME',
os.getenv('GERRIT_REFNAME',
get_git_branchname()))
version_suffix = _get_git_next_version_suffix(branch_name)
version = "%s~%s" % (base_version, version_suffix)
write_versioninfo(projectname, version)
return version.split('~')[0]
else:
version = read_versioninfo(projectname)
return version.split('~')[0]
def get_post_version(projectname):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
version = _get_git_post_version()
write_versioninfo(projectname, version)
return version
return read_versioninfo(projectname) | milestonever = ""
post_version = _get_git_post_version()
revno = post_version.split(".")[-1]
return "%s~%s.%s%s" % (milestonever, datestamp, revno_prefix, revno) | random_line_split |
azure_cleanup.py | import argparse
import sys
import traceback as tb
from datetime import datetime
from cfme.utils.path import log_path
from cfme.utils.providers import list_provider_keys, get_mgmt
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument('--nic-template',
help='NIC Name template to be removed', default="test", type=str)
parser.add_argument('--pip-template',
help='PIP Name template to be removed', default="test", type=str)
parser.add_argument('--days-old',
help='--days-old argument to find stack items older than X days ',
default="7", type=int)
parser.add_argument("--output", dest="output", help="target file name, default "
"'cleanup_azure.log' in "
"utils.path.log_path",
default=log_path.join('cleanup_azure.log').strpath)
args = parser.parse_args()
return args
def azure_cleanup(nic_template, pip_template, days_old, output):
with open(output, 'w') as report:
report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
report.write("\nDate: {}\n".format(datetime.now()))
try:
for provider_key in list_provider_keys('azure'):
provider_mgmt = get_mgmt(provider_key)
nic_list = provider_mgmt.list_free_nics(nic_template)
report.write("----- Provider: {} -----\n".format(provider_key))
if nic_list:
report.write("Removing Nics with the name \'{}\':\n".format(nic_template))
report.write("\n".join(str(k) for k in nic_list))
report.write("\n")
provider_mgmt.remove_nics_by_search(nic_template)
else:
report.write("No \'{}\' NICs were found\n".format(nic_template))
pip_list = provider_mgmt.list_free_pip(pip_template)
if pip_list:
report.write("Removing Public IPs with the name \'{}\':\n".
format(pip_template))
report.write("\n".join(str(k) for k in pip_list))
report.write("\n")
provider_mgmt.remove_pips_by_search(pip_template)
else:
report.write("No \'{}\' Public IPs were found\n".format(pip_template))
stack_list = provider_mgmt.list_stack(days_old=days_old)
if stack_list:
report.write(
"Removing empty Stacks:\n")
for stack in stack_list:
|
else:
report.write("No stacks older than \'{}\' days were found\n".format(
days_old))
return 0
except Exception:
report.write("Something bad happened during Azure cleanup\n")
report.write(tb.format_exc())
return 1
if __name__ == "__main__":
args = parse_cmd_line()
sys.exit(azure_cleanup(args.nic_template, args.pip_template, args.days_old, args.output))
| if provider_mgmt.is_stack_empty(stack):
provider_mgmt.delete_stack(stack)
report.write("Stack {} is empty - Removed\n".format(stack)) | conditional_block |
azure_cleanup.py | import argparse
import sys
import traceback as tb
from datetime import datetime
from cfme.utils.path import log_path
from cfme.utils.providers import list_provider_keys, get_mgmt
def | ():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument('--nic-template',
help='NIC Name template to be removed', default="test", type=str)
parser.add_argument('--pip-template',
help='PIP Name template to be removed', default="test", type=str)
parser.add_argument('--days-old',
help='--days-old argument to find stack items older than X days ',
default="7", type=int)
parser.add_argument("--output", dest="output", help="target file name, default "
"'cleanup_azure.log' in "
"utils.path.log_path",
default=log_path.join('cleanup_azure.log').strpath)
args = parser.parse_args()
return args
def azure_cleanup(nic_template, pip_template, days_old, output):
with open(output, 'w') as report:
report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
report.write("\nDate: {}\n".format(datetime.now()))
try:
for provider_key in list_provider_keys('azure'):
provider_mgmt = get_mgmt(provider_key)
nic_list = provider_mgmt.list_free_nics(nic_template)
report.write("----- Provider: {} -----\n".format(provider_key))
if nic_list:
report.write("Removing Nics with the name \'{}\':\n".format(nic_template))
report.write("\n".join(str(k) for k in nic_list))
report.write("\n")
provider_mgmt.remove_nics_by_search(nic_template)
else:
report.write("No \'{}\' NICs were found\n".format(nic_template))
pip_list = provider_mgmt.list_free_pip(pip_template)
if pip_list:
report.write("Removing Public IPs with the name \'{}\':\n".
format(pip_template))
report.write("\n".join(str(k) for k in pip_list))
report.write("\n")
provider_mgmt.remove_pips_by_search(pip_template)
else:
report.write("No \'{}\' Public IPs were found\n".format(pip_template))
stack_list = provider_mgmt.list_stack(days_old=days_old)
if stack_list:
report.write(
"Removing empty Stacks:\n")
for stack in stack_list:
if provider_mgmt.is_stack_empty(stack):
provider_mgmt.delete_stack(stack)
report.write("Stack {} is empty - Removed\n".format(stack))
else:
report.write("No stacks older than \'{}\' days were found\n".format(
days_old))
return 0
except Exception:
report.write("Something bad happened during Azure cleanup\n")
report.write(tb.format_exc())
return 1
if __name__ == "__main__":
args = parse_cmd_line()
sys.exit(azure_cleanup(args.nic_template, args.pip_template, args.days_old, args.output))
| parse_cmd_line | identifier_name |
azure_cleanup.py | import argparse
import sys
import traceback as tb
from datetime import datetime
from cfme.utils.path import log_path
from cfme.utils.providers import list_provider_keys, get_mgmt
def parse_cmd_line():
|
def azure_cleanup(nic_template, pip_template, days_old, output):
with open(output, 'w') as report:
report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
report.write("\nDate: {}\n".format(datetime.now()))
try:
for provider_key in list_provider_keys('azure'):
provider_mgmt = get_mgmt(provider_key)
nic_list = provider_mgmt.list_free_nics(nic_template)
report.write("----- Provider: {} -----\n".format(provider_key))
if nic_list:
report.write("Removing Nics with the name \'{}\':\n".format(nic_template))
report.write("\n".join(str(k) for k in nic_list))
report.write("\n")
provider_mgmt.remove_nics_by_search(nic_template)
else:
report.write("No \'{}\' NICs were found\n".format(nic_template))
pip_list = provider_mgmt.list_free_pip(pip_template)
if pip_list:
report.write("Removing Public IPs with the name \'{}\':\n".
format(pip_template))
report.write("\n".join(str(k) for k in pip_list))
report.write("\n")
provider_mgmt.remove_pips_by_search(pip_template)
else:
report.write("No \'{}\' Public IPs were found\n".format(pip_template))
stack_list = provider_mgmt.list_stack(days_old=days_old)
if stack_list:
report.write(
"Removing empty Stacks:\n")
for stack in stack_list:
if provider_mgmt.is_stack_empty(stack):
provider_mgmt.delete_stack(stack)
report.write("Stack {} is empty - Removed\n".format(stack))
else:
report.write("No stacks older than \'{}\' days were found\n".format(
days_old))
return 0
except Exception:
report.write("Something bad happened during Azure cleanup\n")
report.write(tb.format_exc())
return 1
if __name__ == "__main__":
args = parse_cmd_line()
sys.exit(azure_cleanup(args.nic_template, args.pip_template, args.days_old, args.output))
| parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument('--nic-template',
help='NIC Name template to be removed', default="test", type=str)
parser.add_argument('--pip-template',
help='PIP Name template to be removed', default="test", type=str)
parser.add_argument('--days-old',
help='--days-old argument to find stack items older than X days ',
default="7", type=int)
parser.add_argument("--output", dest="output", help="target file name, default "
"'cleanup_azure.log' in "
"utils.path.log_path",
default=log_path.join('cleanup_azure.log').strpath)
args = parser.parse_args()
return args | identifier_body |
azure_cleanup.py | import argparse
import sys
import traceback as tb
from datetime import datetime
from cfme.utils.path import log_path
from cfme.utils.providers import list_provider_keys, get_mgmt
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument('--nic-template',
help='NIC Name template to be removed', default="test", type=str)
parser.add_argument('--pip-template',
help='PIP Name template to be removed', default="test", type=str)
parser.add_argument('--days-old',
help='--days-old argument to find stack items older than X days ',
default="7", type=int)
parser.add_argument("--output", dest="output", help="target file name, default "
"'cleanup_azure.log' in "
"utils.path.log_path",
default=log_path.join('cleanup_azure.log').strpath)
args = parser.parse_args()
return args
def azure_cleanup(nic_template, pip_template, days_old, output): | provider_mgmt = get_mgmt(provider_key)
nic_list = provider_mgmt.list_free_nics(nic_template)
report.write("----- Provider: {} -----\n".format(provider_key))
if nic_list:
report.write("Removing Nics with the name \'{}\':\n".format(nic_template))
report.write("\n".join(str(k) for k in nic_list))
report.write("\n")
provider_mgmt.remove_nics_by_search(nic_template)
else:
report.write("No \'{}\' NICs were found\n".format(nic_template))
pip_list = provider_mgmt.list_free_pip(pip_template)
if pip_list:
report.write("Removing Public IPs with the name \'{}\':\n".
format(pip_template))
report.write("\n".join(str(k) for k in pip_list))
report.write("\n")
provider_mgmt.remove_pips_by_search(pip_template)
else:
report.write("No \'{}\' Public IPs were found\n".format(pip_template))
stack_list = provider_mgmt.list_stack(days_old=days_old)
if stack_list:
report.write(
"Removing empty Stacks:\n")
for stack in stack_list:
if provider_mgmt.is_stack_empty(stack):
provider_mgmt.delete_stack(stack)
report.write("Stack {} is empty - Removed\n".format(stack))
else:
report.write("No stacks older than \'{}\' days were found\n".format(
days_old))
return 0
except Exception:
report.write("Something bad happened during Azure cleanup\n")
report.write(tb.format_exc())
return 1
if __name__ == "__main__":
args = parse_cmd_line()
sys.exit(azure_cleanup(args.nic_template, args.pip_template, args.days_old, args.output)) | with open(output, 'w') as report:
report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
report.write("\nDate: {}\n".format(datetime.now()))
try:
for provider_key in list_provider_keys('azure'): | random_line_split |
damagePattern.py | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import copy
import eos.db
from eos.saveddata.damagePattern import DamagePattern as es_DamagePattern
class ImportError(Exception):
pass
class DamagePattern():
instance = None
@classmethod
def getInstance(cls):
if cls.instance is None:
cls.instance = DamagePattern()
return cls.instance
def getDamagePatternList(self):
return eos.db.getDamagePatternList()
def getDamagePattern(self, name):
return eos.db.getDamagePattern(name)
def newPattern(self, name):
p = es_DamagePattern(0, 0, 0, 0)
p.name = name
eos.db.save(p)
return p
def renamePattern(self, p, newName):
p.name = newName
eos.db.save(p)
def deletePattern(self, p):
eos.db.remove(p)
def copyPattern(self, p):
newP = copy.deepcopy(p)
eos.db.save(newP)
return newP
def saveChanges(self, p):
eos.db.save(p)
def importPatterns(self, text):
lookup = {}
current = self.getDamagePatternList()
for pattern in current:
lookup[pattern.name] = pattern
imports, num = es_DamagePattern.importPatterns(text)
for pattern in imports:
if pattern.name in lookup:
match = lookup[pattern.name]
match.__dict__.update(pattern.__dict__)
else:
|
eos.db.commit()
lenImports = len(imports)
if lenImports == 0:
raise ImportError("No patterns found for import")
if lenImports != num:
raise ImportError("%d patterns imported from clipboard; %d had errors" % (num, num - lenImports))
def exportPatterns(self):
patterns = self.getDamagePatternList()
for i in xrange(len(patterns) - 1, -1, -1):
if patterns[i].name in ("Uniform", "Selected Ammo"):
del patterns[i]
patterns.sort(key=lambda p: p.name)
return es_DamagePattern.exportPatterns(*patterns)
| eos.db.save(pattern) | conditional_block |
damagePattern.py | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import copy
import eos.db
from eos.saveddata.damagePattern import DamagePattern as es_DamagePattern
class ImportError(Exception):
pass
class DamagePattern():
instance = None
@classmethod
def getInstance(cls):
if cls.instance is None:
cls.instance = DamagePattern()
return cls.instance
def getDamagePatternList(self):
return eos.db.getDamagePatternList()
def getDamagePattern(self, name):
return eos.db.getDamagePattern(name)
def newPattern(self, name):
p = es_DamagePattern(0, 0, 0, 0)
p.name = name
eos.db.save(p)
return p
def renamePattern(self, p, newName):
p.name = newName
eos.db.save(p)
def deletePattern(self, p):
eos.db.remove(p)
def copyPattern(self, p):
newP = copy.deepcopy(p)
eos.db.save(newP)
return newP
def saveChanges(self, p):
eos.db.save(p)
def importPatterns(self, text):
lookup = {}
current = self.getDamagePatternList()
for pattern in current:
lookup[pattern.name] = pattern
imports, num = es_DamagePattern.importPatterns(text)
for pattern in imports:
if pattern.name in lookup:
match = lookup[pattern.name]
match.__dict__.update(pattern.__dict__)
else:
eos.db.save(pattern)
eos.db.commit()
lenImports = len(imports)
if lenImports == 0:
raise ImportError("No patterns found for import")
if lenImports != num:
raise ImportError("%d patterns imported from clipboard; %d had errors" % (num, num - lenImports))
def exportPatterns(self):
patterns = self.getDamagePatternList()
for i in xrange(len(patterns) - 1, -1, -1):
if patterns[i].name in ("Uniform", "Selected Ammo"):
del patterns[i]
| return es_DamagePattern.exportPatterns(*patterns) | patterns.sort(key=lambda p: p.name) | random_line_split |
damagePattern.py | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import copy
import eos.db
from eos.saveddata.damagePattern import DamagePattern as es_DamagePattern
class ImportError(Exception):
pass
class DamagePattern():
instance = None
@classmethod
def getInstance(cls):
if cls.instance is None:
cls.instance = DamagePattern()
return cls.instance
def getDamagePatternList(self):
return eos.db.getDamagePatternList()
def | (self, name):
return eos.db.getDamagePattern(name)
def newPattern(self, name):
p = es_DamagePattern(0, 0, 0, 0)
p.name = name
eos.db.save(p)
return p
def renamePattern(self, p, newName):
p.name = newName
eos.db.save(p)
def deletePattern(self, p):
eos.db.remove(p)
def copyPattern(self, p):
newP = copy.deepcopy(p)
eos.db.save(newP)
return newP
def saveChanges(self, p):
eos.db.save(p)
def importPatterns(self, text):
lookup = {}
current = self.getDamagePatternList()
for pattern in current:
lookup[pattern.name] = pattern
imports, num = es_DamagePattern.importPatterns(text)
for pattern in imports:
if pattern.name in lookup:
match = lookup[pattern.name]
match.__dict__.update(pattern.__dict__)
else:
eos.db.save(pattern)
eos.db.commit()
lenImports = len(imports)
if lenImports == 0:
raise ImportError("No patterns found for import")
if lenImports != num:
raise ImportError("%d patterns imported from clipboard; %d had errors" % (num, num - lenImports))
def exportPatterns(self):
patterns = self.getDamagePatternList()
for i in xrange(len(patterns) - 1, -1, -1):
if patterns[i].name in ("Uniform", "Selected Ammo"):
del patterns[i]
patterns.sort(key=lambda p: p.name)
return es_DamagePattern.exportPatterns(*patterns)
| getDamagePattern | identifier_name |
damagePattern.py | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import copy
import eos.db
from eos.saveddata.damagePattern import DamagePattern as es_DamagePattern
class ImportError(Exception):
pass
class DamagePattern():
instance = None
@classmethod
def getInstance(cls):
|
def getDamagePatternList(self):
return eos.db.getDamagePatternList()
def getDamagePattern(self, name):
return eos.db.getDamagePattern(name)
def newPattern(self, name):
p = es_DamagePattern(0, 0, 0, 0)
p.name = name
eos.db.save(p)
return p
def renamePattern(self, p, newName):
p.name = newName
eos.db.save(p)
def deletePattern(self, p):
eos.db.remove(p)
def copyPattern(self, p):
newP = copy.deepcopy(p)
eos.db.save(newP)
return newP
def saveChanges(self, p):
eos.db.save(p)
def importPatterns(self, text):
lookup = {}
current = self.getDamagePatternList()
for pattern in current:
lookup[pattern.name] = pattern
imports, num = es_DamagePattern.importPatterns(text)
for pattern in imports:
if pattern.name in lookup:
match = lookup[pattern.name]
match.__dict__.update(pattern.__dict__)
else:
eos.db.save(pattern)
eos.db.commit()
lenImports = len(imports)
if lenImports == 0:
raise ImportError("No patterns found for import")
if lenImports != num:
raise ImportError("%d patterns imported from clipboard; %d had errors" % (num, num - lenImports))
def exportPatterns(self):
patterns = self.getDamagePatternList()
for i in xrange(len(patterns) - 1, -1, -1):
if patterns[i].name in ("Uniform", "Selected Ammo"):
del patterns[i]
patterns.sort(key=lambda p: p.name)
return es_DamagePattern.exportPatterns(*patterns)
| if cls.instance is None:
cls.instance = DamagePattern()
return cls.instance | identifier_body |
albums.rs | use std::collections::HashMap;
use postgres::GenericConnection as PostgresConnection;
use db::pg::RowsExtension;
use library::Result;
const MAX_TOP_ALBUMS: i64 = 10;
#[derive(RustcEncodable)]
struct Track {
name: String,
is_favorite: bool,
scrobbles: Vec<i32>,
}
#[derive(RustcEncodable)]
pub struct Album {
name: String,
tracks: HashMap<i32, Track>,
}
#[derive(RustcEncodable)]
pub struct TopAlbum {
label: String,
value: i32,
}
pub type TopAlbums = Vec<TopAlbum>;
pub fn load_album(conn: &PostgresConnection, album_id: i32) -> Result<Option<Album>> {
let rows = try!(conn.query("SELECT name FROM albums WHERE id = $1", &[&album_id]));
if rows.len() == 0 {
return Ok(None);
}
let name: String = rows.get(0).get("name");
let mut tracks = HashMap::<i32, Track>::new();
let query = "SELECT id, name, is_favorite FROM tracks WHERE album_id = $1";
for row in &try!(conn.query(query, &[&album_id])) {
tracks.insert(row.get("id"),
Track {
name: row.get("name"),
is_favorite: row.get("is_favorite"),
scrobbles: Vec::new(),
});
}
let mut ids = tracks.keys().fold(String::new(), |acc, id| format!("{}{},", acc, id));
ids.pop();
let query = format!("SELECT track_id, timestamp FROM scrobbles WHERE track_id IN({})",
ids);
let scrobbles = try!(conn.query(&query, &[]));
for row in &scrobbles {
let track_id: i32 = row.get("track_id");
let timestamp: i32 = row.get("timestamp");
if let Some(track) = tracks.get_mut(&track_id) {
track.scrobbles.push(timestamp)
}
}
Ok(Some(Album {
name: name,
tracks: tracks,
}))
}
pub fn load_top_albums(conn: &PostgresConnection) -> Result<TopAlbums> {
let query = r#"
SELECT
artists.name as artist,
albums.name as album,
albums.plays as plays
FROM albums
LEFT JOIN artists ON artists.id = albums.artist_id
ORDER BY albums.plays DESC
OFFSET 0 LIMIT $1
"#;
let rows = try!(conn.query(query, &[&MAX_TOP_ALBUMS]));
let mut albums = TopAlbums::with_capacity(MAX_TOP_ALBUMS as usize);
for row in &rows {
let artist: String = row.get("artist");
let album: String = row.get("album");
albums.push(TopAlbum {
label: format!("{} - {}", artist, album),
value: row.get("plays"),
});
}
Ok(albums)
} |
pub fn total_albums(conn: &PostgresConnection) -> Result<i64> {
Ok(try!(conn.query("SELECT COUNT(*) FROM albums", &[])).fetch_column())
} | random_line_split | |
albums.rs | use std::collections::HashMap;
use postgres::GenericConnection as PostgresConnection;
use db::pg::RowsExtension;
use library::Result;
const MAX_TOP_ALBUMS: i64 = 10;
#[derive(RustcEncodable)]
struct Track {
name: String,
is_favorite: bool,
scrobbles: Vec<i32>,
}
#[derive(RustcEncodable)]
pub struct Album {
name: String,
tracks: HashMap<i32, Track>,
}
#[derive(RustcEncodable)]
pub struct TopAlbum {
label: String,
value: i32,
}
pub type TopAlbums = Vec<TopAlbum>;
pub fn load_album(conn: &PostgresConnection, album_id: i32) -> Result<Option<Album>> {
let rows = try!(conn.query("SELECT name FROM albums WHERE id = $1", &[&album_id]));
if rows.len() == 0 |
let name: String = rows.get(0).get("name");
let mut tracks = HashMap::<i32, Track>::new();
let query = "SELECT id, name, is_favorite FROM tracks WHERE album_id = $1";
for row in &try!(conn.query(query, &[&album_id])) {
tracks.insert(row.get("id"),
Track {
name: row.get("name"),
is_favorite: row.get("is_favorite"),
scrobbles: Vec::new(),
});
}
let mut ids = tracks.keys().fold(String::new(), |acc, id| format!("{}{},", acc, id));
ids.pop();
let query = format!("SELECT track_id, timestamp FROM scrobbles WHERE track_id IN({})",
ids);
let scrobbles = try!(conn.query(&query, &[]));
for row in &scrobbles {
let track_id: i32 = row.get("track_id");
let timestamp: i32 = row.get("timestamp");
if let Some(track) = tracks.get_mut(&track_id) {
track.scrobbles.push(timestamp)
}
}
Ok(Some(Album {
name: name,
tracks: tracks,
}))
}
pub fn load_top_albums(conn: &PostgresConnection) -> Result<TopAlbums> {
let query = r#"
SELECT
artists.name as artist,
albums.name as album,
albums.plays as plays
FROM albums
LEFT JOIN artists ON artists.id = albums.artist_id
ORDER BY albums.plays DESC
OFFSET 0 LIMIT $1
"#;
let rows = try!(conn.query(query, &[&MAX_TOP_ALBUMS]));
let mut albums = TopAlbums::with_capacity(MAX_TOP_ALBUMS as usize);
for row in &rows {
let artist: String = row.get("artist");
let album: String = row.get("album");
albums.push(TopAlbum {
label: format!("{} - {}", artist, album),
value: row.get("plays"),
});
}
Ok(albums)
}
pub fn total_albums(conn: &PostgresConnection) -> Result<i64> {
Ok(try!(conn.query("SELECT COUNT(*) FROM albums", &[])).fetch_column())
}
| {
return Ok(None);
} | conditional_block |
albums.rs | use std::collections::HashMap;
use postgres::GenericConnection as PostgresConnection;
use db::pg::RowsExtension;
use library::Result;
const MAX_TOP_ALBUMS: i64 = 10;
#[derive(RustcEncodable)]
struct Track {
name: String,
is_favorite: bool,
scrobbles: Vec<i32>,
}
#[derive(RustcEncodable)]
pub struct Album {
name: String,
tracks: HashMap<i32, Track>,
}
#[derive(RustcEncodable)]
pub struct TopAlbum {
label: String,
value: i32,
}
pub type TopAlbums = Vec<TopAlbum>;
pub fn load_album(conn: &PostgresConnection, album_id: i32) -> Result<Option<Album>> {
let rows = try!(conn.query("SELECT name FROM albums WHERE id = $1", &[&album_id]));
if rows.len() == 0 {
return Ok(None);
}
let name: String = rows.get(0).get("name");
let mut tracks = HashMap::<i32, Track>::new();
let query = "SELECT id, name, is_favorite FROM tracks WHERE album_id = $1";
for row in &try!(conn.query(query, &[&album_id])) {
tracks.insert(row.get("id"),
Track {
name: row.get("name"),
is_favorite: row.get("is_favorite"),
scrobbles: Vec::new(),
});
}
let mut ids = tracks.keys().fold(String::new(), |acc, id| format!("{}{},", acc, id));
ids.pop();
let query = format!("SELECT track_id, timestamp FROM scrobbles WHERE track_id IN({})",
ids);
let scrobbles = try!(conn.query(&query, &[]));
for row in &scrobbles {
let track_id: i32 = row.get("track_id");
let timestamp: i32 = row.get("timestamp");
if let Some(track) = tracks.get_mut(&track_id) {
track.scrobbles.push(timestamp)
}
}
Ok(Some(Album {
name: name,
tracks: tracks,
}))
}
pub fn | (conn: &PostgresConnection) -> Result<TopAlbums> {
let query = r#"
SELECT
artists.name as artist,
albums.name as album,
albums.plays as plays
FROM albums
LEFT JOIN artists ON artists.id = albums.artist_id
ORDER BY albums.plays DESC
OFFSET 0 LIMIT $1
"#;
let rows = try!(conn.query(query, &[&MAX_TOP_ALBUMS]));
let mut albums = TopAlbums::with_capacity(MAX_TOP_ALBUMS as usize);
for row in &rows {
let artist: String = row.get("artist");
let album: String = row.get("album");
albums.push(TopAlbum {
label: format!("{} - {}", artist, album),
value: row.get("plays"),
});
}
Ok(albums)
}
pub fn total_albums(conn: &PostgresConnection) -> Result<i64> {
Ok(try!(conn.query("SELECT COUNT(*) FROM albums", &[])).fetch_column())
}
| load_top_albums | identifier_name |
base_protocol.py | from struct import pack
from google.protobuf.message import DecodeError
from twisted.internet.protocol import Protocol
from twisted.python import log
from relayserver.utility import get_hex_dump
class BaseProtocol(Protocol):
def write_message(self, message):
data = message.SerializeToString()
message_len = len(data)
prefix = pack('>I', message_len)
total_len = len(prefix) + message_len
log.msg("Message type [%s] serialized into (%d) bytes and wrapped as "
"(%d) bytes." %
(message.__class__.__name__, message_len, total_len))
self.transport.write(prefix)
self.transport.write(data)
def parse_or_raise(self, message_raw, type_):
| log.msg("Parsing [%s] in (%d) bytes." % (type_.__name__, len(message_raw)))
message = type_()
try:
message.ParseFromString(message_raw)
except DecodeError:
raise
else:
if message.IsInitialized() is False:
raise Exception("Message parse with type [%s] resulted in "
"uninitialized message." % (type_))
return message | identifier_body | |
base_protocol.py | from struct import pack
from google.protobuf.message import DecodeError
from twisted.internet.protocol import Protocol
from twisted.python import log
from relayserver.utility import get_hex_dump
class | (Protocol):
def write_message(self, message):
data = message.SerializeToString()
message_len = len(data)
prefix = pack('>I', message_len)
total_len = len(prefix) + message_len
log.msg("Message type [%s] serialized into (%d) bytes and wrapped as "
"(%d) bytes." %
(message.__class__.__name__, message_len, total_len))
self.transport.write(prefix)
self.transport.write(data)
def parse_or_raise(self, message_raw, type_):
log.msg("Parsing [%s] in (%d) bytes." % (type_.__name__, len(message_raw)))
message = type_()
try:
message.ParseFromString(message_raw)
except DecodeError:
raise
else:
if message.IsInitialized() is False:
raise Exception("Message parse with type [%s] resulted in "
"uninitialized message." % (type_))
return message
| BaseProtocol | identifier_name |
base_protocol.py | from struct import pack
from google.protobuf.message import DecodeError
from twisted.internet.protocol import Protocol
from twisted.python import log
from relayserver.utility import get_hex_dump
|
total_len = len(prefix) + message_len
log.msg("Message type [%s] serialized into (%d) bytes and wrapped as "
"(%d) bytes." %
(message.__class__.__name__, message_len, total_len))
self.transport.write(prefix)
self.transport.write(data)
def parse_or_raise(self, message_raw, type_):
log.msg("Parsing [%s] in (%d) bytes." % (type_.__name__, len(message_raw)))
message = type_()
try:
message.ParseFromString(message_raw)
except DecodeError:
raise
else:
if message.IsInitialized() is False:
raise Exception("Message parse with type [%s] resulted in "
"uninitialized message." % (type_))
return message | class BaseProtocol(Protocol):
def write_message(self, message):
data = message.SerializeToString()
message_len = len(data)
prefix = pack('>I', message_len) | random_line_split |
base_protocol.py | from struct import pack
from google.protobuf.message import DecodeError
from twisted.internet.protocol import Protocol
from twisted.python import log
from relayserver.utility import get_hex_dump
class BaseProtocol(Protocol):
def write_message(self, message):
data = message.SerializeToString()
message_len = len(data)
prefix = pack('>I', message_len)
total_len = len(prefix) + message_len
log.msg("Message type [%s] serialized into (%d) bytes and wrapped as "
"(%d) bytes." %
(message.__class__.__name__, message_len, total_len))
self.transport.write(prefix)
self.transport.write(data)
def parse_or_raise(self, message_raw, type_):
log.msg("Parsing [%s] in (%d) bytes." % (type_.__name__, len(message_raw)))
message = type_()
try:
message.ParseFromString(message_raw)
except DecodeError:
raise
else:
if message.IsInitialized() is False:
|
return message
| raise Exception("Message parse with type [%s] resulted in "
"uninitialized message." % (type_)) | conditional_block |
tasks.py | """
This file contains celery tasks related to course content gating.
"""
import logging
from celery import shared_task
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from edx_django_utils.monitoring import set_code_owner_attribute
from opaque_keys.edx.keys import CourseKey, UsageKey
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.gating import api as gating_api
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
log = logging.getLogger(__name__)
@shared_task
@set_code_owner_attribute
def task_evaluate_subsection_completion_milestones(course_id, block_id, user_id):
"""
Updates users' milestones related to completion of a subsection.
Args:
course_id(str): Course id which triggered a completion event
block_id(str): Id of the completed block
user_id(int): Id of the user who completed a block
"""
store = modulestore()
course_key = CourseKey.from_string(course_id)
with store.bulk_operations(course_key):
course = store.get_course(course_key)
if not course or not course.enable_subsection_gating:
log.debug(
"Gating: ignoring evaluation of completion milestone because it disabled for course [%s]", course_id
)
else:
try:
user = User.objects.get(id=user_id)
course_structure = get_course_blocks(user, store.make_course_usage_key(course_key))
completed_block_usage_key = UsageKey.from_string(block_id).map_into_course(course.id)
subsection_block = _get_subsection_of_block(completed_block_usage_key, course_structure)
subsection = course_structure[subsection_block]
log.debug(
"Gating: Evaluating completion milestone for subsection [%s] and user [%s]",
str(subsection.location), user.id
)
gating_api.evaluate_prerequisite(course, subsection, user)
except KeyError:
log.error("Gating: Given prerequisite subsection [%s] not found in course structure", block_id)
def | (usage_key, block_structure):
"""
Finds subsection of a block by recursively iterating over its parents
:param usage_key: key of the block
:param block_structure: block structure
:return: sequential block
"""
parents = block_structure.get_parents(usage_key)
if parents:
for parent_block in parents:
if parent_block.block_type == 'sequential':
return parent_block
else:
return _get_subsection_of_block(parent_block, block_structure)
| _get_subsection_of_block | identifier_name |
tasks.py | """
This file contains celery tasks related to course content gating.
"""
import logging
from celery import shared_task
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from edx_django_utils.monitoring import set_code_owner_attribute
from opaque_keys.edx.keys import CourseKey, UsageKey
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.gating import api as gating_api
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
log = logging.getLogger(__name__)
@shared_task
@set_code_owner_attribute
def task_evaluate_subsection_completion_milestones(course_id, block_id, user_id):
"""
Updates users' milestones related to completion of a subsection.
Args:
course_id(str): Course id which triggered a completion event
block_id(str): Id of the completed block
user_id(int): Id of the user who completed a block
"""
store = modulestore()
course_key = CourseKey.from_string(course_id)
with store.bulk_operations(course_key):
course = store.get_course(course_key)
if not course or not course.enable_subsection_gating:
log.debug(
"Gating: ignoring evaluation of completion milestone because it disabled for course [%s]", course_id
)
else:
|
def _get_subsection_of_block(usage_key, block_structure):
"""
Finds subsection of a block by recursively iterating over its parents
:param usage_key: key of the block
:param block_structure: block structure
:return: sequential block
"""
parents = block_structure.get_parents(usage_key)
if parents:
for parent_block in parents:
if parent_block.block_type == 'sequential':
return parent_block
else:
return _get_subsection_of_block(parent_block, block_structure)
| try:
user = User.objects.get(id=user_id)
course_structure = get_course_blocks(user, store.make_course_usage_key(course_key))
completed_block_usage_key = UsageKey.from_string(block_id).map_into_course(course.id)
subsection_block = _get_subsection_of_block(completed_block_usage_key, course_structure)
subsection = course_structure[subsection_block]
log.debug(
"Gating: Evaluating completion milestone for subsection [%s] and user [%s]",
str(subsection.location), user.id
)
gating_api.evaluate_prerequisite(course, subsection, user)
except KeyError:
log.error("Gating: Given prerequisite subsection [%s] not found in course structure", block_id) | conditional_block |
tasks.py | """
This file contains celery tasks related to course content gating.
"""
import logging
from celery import shared_task
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from edx_django_utils.monitoring import set_code_owner_attribute
from opaque_keys.edx.keys import CourseKey, UsageKey
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.gating import api as gating_api
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
log = logging.getLogger(__name__)
@shared_task
@set_code_owner_attribute
def task_evaluate_subsection_completion_milestones(course_id, block_id, user_id):
"""
Updates users' milestones related to completion of a subsection.
Args:
course_id(str): Course id which triggered a completion event
block_id(str): Id of the completed block
user_id(int): Id of the user who completed a block
"""
store = modulestore()
course_key = CourseKey.from_string(course_id)
with store.bulk_operations(course_key):
course = store.get_course(course_key)
if not course or not course.enable_subsection_gating:
log.debug(
"Gating: ignoring evaluation of completion milestone because it disabled for course [%s]", course_id
)
else:
try:
user = User.objects.get(id=user_id)
course_structure = get_course_blocks(user, store.make_course_usage_key(course_key))
completed_block_usage_key = UsageKey.from_string(block_id).map_into_course(course.id)
subsection_block = _get_subsection_of_block(completed_block_usage_key, course_structure)
subsection = course_structure[subsection_block]
log.debug(
"Gating: Evaluating completion milestone for subsection [%s] and user [%s]",
str(subsection.location), user.id
)
gating_api.evaluate_prerequisite(course, subsection, user)
except KeyError:
log.error("Gating: Given prerequisite subsection [%s] not found in course structure", block_id)
def _get_subsection_of_block(usage_key, block_structure):
| """
Finds subsection of a block by recursively iterating over its parents
:param usage_key: key of the block
:param block_structure: block structure
:return: sequential block
"""
parents = block_structure.get_parents(usage_key)
if parents:
for parent_block in parents:
if parent_block.block_type == 'sequential':
return parent_block
else:
return _get_subsection_of_block(parent_block, block_structure) | identifier_body | |
tasks.py | """
This file contains celery tasks related to course content gating.
"""
import logging
from celery import shared_task
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from edx_django_utils.monitoring import set_code_owner_attribute
from opaque_keys.edx.keys import CourseKey, UsageKey
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.gating import api as gating_api
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
log = logging.getLogger(__name__)
@shared_task
@set_code_owner_attribute
def task_evaluate_subsection_completion_milestones(course_id, block_id, user_id):
"""
Updates users' milestones related to completion of a subsection.
Args:
course_id(str): Course id which triggered a completion event
block_id(str): Id of the completed block
user_id(int): Id of the user who completed a block
"""
store = modulestore()
course_key = CourseKey.from_string(course_id)
with store.bulk_operations(course_key):
course = store.get_course(course_key)
if not course or not course.enable_subsection_gating:
log.debug(
"Gating: ignoring evaluation of completion milestone because it disabled for course [%s]", course_id
)
else:
try:
user = User.objects.get(id=user_id)
course_structure = get_course_blocks(user, store.make_course_usage_key(course_key))
completed_block_usage_key = UsageKey.from_string(block_id).map_into_course(course.id)
subsection_block = _get_subsection_of_block(completed_block_usage_key, course_structure)
subsection = course_structure[subsection_block]
log.debug(
"Gating: Evaluating completion milestone for subsection [%s] and user [%s]",
str(subsection.location), user.id
)
gating_api.evaluate_prerequisite(course, subsection, user)
except KeyError:
log.error("Gating: Given prerequisite subsection [%s] not found in course structure", block_id)
def _get_subsection_of_block(usage_key, block_structure):
"""
Finds subsection of a block by recursively iterating over its parents
:param usage_key: key of the block
:param block_structure: block structure
:return: sequential block
""" | if parents:
for parent_block in parents:
if parent_block.block_type == 'sequential':
return parent_block
else:
return _get_subsection_of_block(parent_block, block_structure) | parents = block_structure.get_parents(usage_key) | random_line_split |
FieldFilterComponent.tsx | import { WorkItemTypeField } from '../data/AnalyticsTypes';
import * as Q from 'q';
import * as React from 'react';
import * as FluxTypes from './FluxTypes';
import { Store } from "VSS/Flux/Store";
import { Picker } from './PickerComponent';
import { AllowedOperator, FieldFilterConfigurationState, FieldFilterRowData } from '../common/FieldFilterContracts';
/** Simple implementation of a Field Filter Component. */
export class FieldFilterComponent extends React.Component<FieldFilterConfigurationState, {}> {
public render(): JSX.Element {
let content = [];
content.push(<FieldFilterHeaderComponent />);
for (let i = 0; i < this.props.fieldFilterRowValues.length; i++) {
let rowProps = this.props.fieldFilterRowValues[i];
content.push(<FieldFilterRowComponent {...rowProps}></FieldFilterRowComponent>);
}
content.push(
<a className="add-filter-row-button" onClick={()=>{this.props.addRow();}}>
<span role="button" className="bowtie-icon bowtie-math-plus"></span>
<span className="text">Add criteria</span>
</a>
);
return <div className="field-filter">
{content}
</div>;
}
}
export class FieldFilterRowComponent extends React.Component<FieldFilterRowData, {}> {
public render(): JSX.Element {
/* Note - This example is operating with a Picker control to illustrate ability to select from a known list, which does not allow for manual editing.
VSS Combo or Fabric ComboBox controls do support hybrid models. */
return <div className="field-filter">
<Picker className="field-picker" itemValues={this.props.allowedFields}
getItemText={(field: WorkItemTypeField)=> {return field.FieldName;}}
getItemId={(field: WorkItemTypeField)=>{ return field.FieldReferenceName;}}
initialSelectionId={this.props.settings.fieldReferenceName}
onChange={(value:WorkItemTypeField)=>{this.props.updateField(value);}}></Picker>
<Picker className="operation-picker" itemValues={this.props.allowedOperators}
getItemText={(operator: AllowedOperator)=> {return operator.DisplayText;}}
getItemId={(operator: AllowedOperator)=>{ return operator.value;}}
initialSelectionId={this.props.settings.operator}
onChange={(value:AllowedOperator)=>{this.props.updateOperator(value.value);}}></Picker>
<Picker className="value-picker" itemValues={this.props.suggestedValues}
getItemText={(value: string)=> {return value;}}
getItemId={(value: string)=>{ return value;}}
initialSelectionId={this.props.settings.value}
onChange={(value:string)=>{this.props.updateValue(value);}}></Picker>
<a className="remove-filter-row-button " onClick={()=>{this.props.removeRow();}}><span role="button" className="bowtie-icon bowtie-edit-delete"></span></a>
</div>;
}
}
/** Renders a header row for Field Filter */
export class FieldFilterHeaderComponent extends React.Component<{}, {}> {
public render(): JSX.Element |
} | {
return <div className="field-filter-header">
<span className="filter-field-label">Field</span>
<span className="filter-operation-label">Operation</span>
<span className="filter-value-label">Value</span>
</div>;
} | identifier_body |
FieldFilterComponent.tsx | import { WorkItemTypeField } from '../data/AnalyticsTypes';
import * as Q from 'q';
import * as React from 'react';
import * as FluxTypes from './FluxTypes';
import { Store } from "VSS/Flux/Store";
import { Picker } from './PickerComponent';
import { AllowedOperator, FieldFilterConfigurationState, FieldFilterRowData } from '../common/FieldFilterContracts';
/** Simple implementation of a Field Filter Component. */
export class FieldFilterComponent extends React.Component<FieldFilterConfigurationState, {}> {
public render(): JSX.Element {
let content = [];
content.push(<FieldFilterHeaderComponent />);
for (let i = 0; i < this.props.fieldFilterRowValues.length; i++) {
let rowProps = this.props.fieldFilterRowValues[i];
content.push(<FieldFilterRowComponent {...rowProps}></FieldFilterRowComponent>);
}
content.push(
<a className="add-filter-row-button" onClick={()=>{this.props.addRow();}}>
<span role="button" className="bowtie-icon bowtie-math-plus"></span>
<span className="text">Add criteria</span>
</a>
);
return <div className="field-filter">
{content}
</div>;
}
}
export class FieldFilterRowComponent extends React.Component<FieldFilterRowData, {}> {
public | (): JSX.Element {
/* Note - This example is operating with a Picker control to illustrate ability to select from a known list, which does not allow for manual editing.
VSS Combo or Fabric ComboBox controls do support hybrid models. */
return <div className="field-filter">
<Picker className="field-picker" itemValues={this.props.allowedFields}
getItemText={(field: WorkItemTypeField)=> {return field.FieldName;}}
getItemId={(field: WorkItemTypeField)=>{ return field.FieldReferenceName;}}
initialSelectionId={this.props.settings.fieldReferenceName}
onChange={(value:WorkItemTypeField)=>{this.props.updateField(value);}}></Picker>
<Picker className="operation-picker" itemValues={this.props.allowedOperators}
getItemText={(operator: AllowedOperator)=> {return operator.DisplayText;}}
getItemId={(operator: AllowedOperator)=>{ return operator.value;}}
initialSelectionId={this.props.settings.operator}
onChange={(value:AllowedOperator)=>{this.props.updateOperator(value.value);}}></Picker>
<Picker className="value-picker" itemValues={this.props.suggestedValues}
getItemText={(value: string)=> {return value;}}
getItemId={(value: string)=>{ return value;}}
initialSelectionId={this.props.settings.value}
onChange={(value:string)=>{this.props.updateValue(value);}}></Picker>
<a className="remove-filter-row-button " onClick={()=>{this.props.removeRow();}}><span role="button" className="bowtie-icon bowtie-edit-delete"></span></a>
</div>;
}
}
/** Renders a header row for Field Filter */
export class FieldFilterHeaderComponent extends React.Component<{}, {}> {
public render(): JSX.Element {
return <div className="field-filter-header">
<span className="filter-field-label">Field</span>
<span className="filter-operation-label">Operation</span>
<span className="filter-value-label">Value</span>
</div>;
}
} | render | identifier_name |
FieldFilterComponent.tsx | import { WorkItemTypeField } from '../data/AnalyticsTypes';
import * as Q from 'q';
import * as React from 'react';
import * as FluxTypes from './FluxTypes';
import { Store } from "VSS/Flux/Store";
import { Picker } from './PickerComponent';
import { AllowedOperator, FieldFilterConfigurationState, FieldFilterRowData } from '../common/FieldFilterContracts';
/** Simple implementation of a Field Filter Component. */
export class FieldFilterComponent extends React.Component<FieldFilterConfigurationState, {}> {
public render(): JSX.Element {
let content = [];
content.push(<FieldFilterHeaderComponent />);
for (let i = 0; i < this.props.fieldFilterRowValues.length; i++) {
let rowProps = this.props.fieldFilterRowValues[i];
content.push(<FieldFilterRowComponent {...rowProps}></FieldFilterRowComponent>);
}
content.push(
<a className="add-filter-row-button" onClick={()=>{this.props.addRow();}}>
<span role="button" className="bowtie-icon bowtie-math-plus"></span>
<span className="text">Add criteria</span>
</a>
);
return <div className="field-filter">
{content}
</div>;
}
}
export class FieldFilterRowComponent extends React.Component<FieldFilterRowData, {}> {
public render(): JSX.Element {
/* Note - This example is operating with a Picker control to illustrate ability to select from a known list, which does not allow for manual editing.
VSS Combo or Fabric ComboBox controls do support hybrid models. */
return <div className="field-filter">
<Picker className="field-picker" itemValues={this.props.allowedFields}
getItemText={(field: WorkItemTypeField)=> {return field.FieldName;}}
getItemId={(field: WorkItemTypeField)=>{ return field.FieldReferenceName;}}
initialSelectionId={this.props.settings.fieldReferenceName}
onChange={(value:WorkItemTypeField)=>{this.props.updateField(value);}}></Picker>
<Picker className="operation-picker" itemValues={this.props.allowedOperators}
getItemText={(operator: AllowedOperator)=> {return operator.DisplayText;}}
getItemId={(operator: AllowedOperator)=>{ return operator.value;}}
initialSelectionId={this.props.settings.operator}
onChange={(value:AllowedOperator)=>{this.props.updateOperator(value.value);}}></Picker>
|
<a className="remove-filter-row-button " onClick={()=>{this.props.removeRow();}}><span role="button" className="bowtie-icon bowtie-edit-delete"></span></a>
</div>;
}
}
/** Renders a header row for Field Filter */
export class FieldFilterHeaderComponent extends React.Component<{}, {}> {
public render(): JSX.Element {
return <div className="field-filter-header">
<span className="filter-field-label">Field</span>
<span className="filter-operation-label">Operation</span>
<span className="filter-value-label">Value</span>
</div>;
}
} | <Picker className="value-picker" itemValues={this.props.suggestedValues}
getItemText={(value: string)=> {return value;}}
getItemId={(value: string)=>{ return value;}}
initialSelectionId={this.props.settings.value}
onChange={(value:string)=>{this.props.updateValue(value);}}></Picker> | random_line_split |
admin.auth.guard.spec.ts | import { AdminAuthGuard } from './admin.auth.guard';
export function main() | ;
| {
describe('Admin Auth Guard', () => {
let mockCurrentUser: any;
let mockError: any;
let mockCapabilites: any;
describe('canActivate()', () => {
let loggedIn: boolean;
let hasRoot: boolean;
beforeEach(() => {
mockCurrentUser = { loggedIn: () => loggedIn };
mockCapabilites = { viewAdmin: () => hasRoot };
mockError = { handle: jasmine.createSpy('handle') };
});
it('returns true when logged in and has root', () => {
loggedIn = true;
hasRoot = true;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(true);
expect(mockError.handle).not.toHaveBeenCalled();
});
it('returns false/unauthenticated when not logged in', () => {
loggedIn = false;
hasRoot = false;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(false);
expect(mockError.handle).toHaveBeenCalledWith({ status: 401 });
});
it('returns false/unauthorized when logged in and not root', () => {
loggedIn = true;
hasRoot = false;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(false);
expect(mockError.handle).toHaveBeenCalledWith({ status: 403 });
});
});
});
} | identifier_body |
admin.auth.guard.spec.ts | import { AdminAuthGuard } from './admin.auth.guard';
export function | () {
describe('Admin Auth Guard', () => {
let mockCurrentUser: any;
let mockError: any;
let mockCapabilites: any;
describe('canActivate()', () => {
let loggedIn: boolean;
let hasRoot: boolean;
beforeEach(() => {
mockCurrentUser = { loggedIn: () => loggedIn };
mockCapabilites = { viewAdmin: () => hasRoot };
mockError = { handle: jasmine.createSpy('handle') };
});
it('returns true when logged in and has root', () => {
loggedIn = true;
hasRoot = true;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(true);
expect(mockError.handle).not.toHaveBeenCalled();
});
it('returns false/unauthenticated when not logged in', () => {
loggedIn = false;
hasRoot = false;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(false);
expect(mockError.handle).toHaveBeenCalledWith({ status: 401 });
});
it('returns false/unauthorized when logged in and not root', () => {
loggedIn = true;
hasRoot = false;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(false);
expect(mockError.handle).toHaveBeenCalledWith({ status: 403 });
});
});
});
};
| main | identifier_name |
admin.auth.guard.spec.ts | import { AdminAuthGuard } from './admin.auth.guard';
export function main() {
describe('Admin Auth Guard', () => {
let mockCurrentUser: any;
let mockError: any;
let mockCapabilites: any;
describe('canActivate()', () => {
let loggedIn: boolean;
let hasRoot: boolean;
beforeEach(() => {
mockCurrentUser = { loggedIn: () => loggedIn };
mockCapabilites = { viewAdmin: () => hasRoot }; |
it('returns true when logged in and has root', () => {
loggedIn = true;
hasRoot = true;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(true);
expect(mockError.handle).not.toHaveBeenCalled();
});
it('returns false/unauthenticated when not logged in', () => {
loggedIn = false;
hasRoot = false;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(false);
expect(mockError.handle).toHaveBeenCalledWith({ status: 401 });
});
it('returns false/unauthorized when logged in and not root', () => {
loggedIn = true;
hasRoot = false;
expect(new AdminAuthGuard(mockCapabilites, mockError, mockCurrentUser).canActivate()).toBe(false);
expect(mockError.handle).toHaveBeenCalledWith({ status: 403 });
});
});
});
}; | mockError = { handle: jasmine.createSpy('handle') };
}); | random_line_split |
wanderingspirit.js | 'use strict';
const assert = require('./../../assert');
const common = require('./../../common');
let battle;
describe('Wandering Spirit', function () {
afterEach(() => battle.destroy());
it(`should exchange abilities with an attacker that makes contact`, function () {
battle = common.createBattle([[
{species: 'Decidueye', ability: 'overgrow', moves: ['shadowsneak']},
], [
{species: 'Runerigus', ability: 'wanderingspirit', moves: ['sleeptalk']},
]]);
battle.makeChoices();
assert(battle.p1.active[0].hasAbility('wanderingspirit'));
assert(battle.p2.active[0].hasAbility('overgrow'));
}); | {species: 'Runerigus', ability: 'wanderingspirit', moves: ['bodypress']},
]]);
battle.makeChoices('auto', 'move bodypress dynamax');
assert(battle.p1.active[0].hasAbility('overgrow'));
assert(battle.p2.active[0].hasAbility('wanderingspirit'));
});
it(`should not swap with Wonder Guard`, function () {
battle = common.createBattle([[
{species: 'Shedinja', ability: 'wonderguard', moves: ['shadowsneak']},
], [
{species: 'Runerigus', ability: 'wanderingspirit', moves: ['sleeptalk']},
]]);
battle.makeChoices();
assert(battle.p1.active[0].hasAbility('wonderguard'));
assert(battle.p2.active[0].hasAbility('wanderingspirit'));
});
}); |
it(`should not activate while Dynamaxed`, function () {
battle = common.createBattle([[
{species: 'Decidueye', ability: 'overgrow', moves: ['shadowsneak']},
], [ | random_line_split |
pep8.py | # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Leo Cavaille <leo@cavaille.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.slave.wrappers.pep8 import parse_pep8
from debile.slave.utils import cd
from debile.utils.commands import run_command
def pep8(dsc, analysis):
run_command(["dpkg-source", "-x", dsc, "source-pep8"])
with cd('source-pep8'):
out, _, ret = run_command(['pep8', '.'])
failed = ret != 0
for issue in parse_pep8(out.splitlines()):
analysis.results.append(issue)
return (analysis, out, failed, None, None)
def version():
out, _, ret = run_command(['pep8', '--version'])
if ret != 0:
|
return ('pep8', out.strip())
| raise Exception("pep8 is not installed") | conditional_block |
pep8.py | # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Leo Cavaille <leo@cavaille.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.slave.wrappers.pep8 import parse_pep8
from debile.slave.utils import cd
from debile.utils.commands import run_command
| failed = ret != 0
for issue in parse_pep8(out.splitlines()):
analysis.results.append(issue)
return (analysis, out, failed, None, None)
def version():
out, _, ret = run_command(['pep8', '--version'])
if ret != 0:
raise Exception("pep8 is not installed")
return ('pep8', out.strip()) | def pep8(dsc, analysis):
run_command(["dpkg-source", "-x", dsc, "source-pep8"])
with cd('source-pep8'):
out, _, ret = run_command(['pep8', '.']) | random_line_split |
pep8.py | # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Leo Cavaille <leo@cavaille.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.slave.wrappers.pep8 import parse_pep8
from debile.slave.utils import cd
from debile.utils.commands import run_command
def pep8(dsc, analysis):
|
def version():
out, _, ret = run_command(['pep8', '--version'])
if ret != 0:
raise Exception("pep8 is not installed")
return ('pep8', out.strip())
| run_command(["dpkg-source", "-x", dsc, "source-pep8"])
with cd('source-pep8'):
out, _, ret = run_command(['pep8', '.'])
failed = ret != 0
for issue in parse_pep8(out.splitlines()):
analysis.results.append(issue)
return (analysis, out, failed, None, None) | identifier_body |
pep8.py | # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Leo Cavaille <leo@cavaille.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.slave.wrappers.pep8 import parse_pep8
from debile.slave.utils import cd
from debile.utils.commands import run_command
def pep8(dsc, analysis):
run_command(["dpkg-source", "-x", dsc, "source-pep8"])
with cd('source-pep8'):
out, _, ret = run_command(['pep8', '.'])
failed = ret != 0
for issue in parse_pep8(out.splitlines()):
analysis.results.append(issue)
return (analysis, out, failed, None, None)
def | ():
out, _, ret = run_command(['pep8', '--version'])
if ret != 0:
raise Exception("pep8 is not installed")
return ('pep8', out.strip())
| version | identifier_name |
yahoo-request.js | var HttpRequestStack = require('../lib/http-stack').HttpRequestStack;
// Create the 'low-level' net.Stream we're going to be 'stacking' on
var conn = require('net').createConnection(80, 'www.yahoo.com');
conn.on('connect', function() {
console.log('connected!');
// Create our first "stack", the HttpRequestStack instance. This class is
// responsible for writing an HTTP request to the provided 'conn', and then
// parsing the response into a 'response' event and clean 'data' events.
var req = new HttpRequestStack(conn); | ]);
//req.end();
// 'response' is fired after the final HTTP header has been parsed. 'res'
// is a ReadStream, that also contains 'rawHeaders', 'headers' properties.
req.on('response', function(res) {
res.pipe(process.stdout);
res.on('data', function(chunk) {
//console.error(chunk.toString());
});
res.on('end', function() {
console.error('received FIN packet from "conn"');
});
});
}); |
req.get("/", [
"Host: www.yahoo.com"
//"Connection: close",
//"Accept-Encoding: gzip" | random_line_split |
list_cases.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import Option
from collections import deque
from redhat_support_lib.infrastructure.errors import RequestError, \
ConnectionError
from redhat_support_lib.infrastructure import contextmanager
from redhat_support_tool.helpers.confighelper import _
from redhat_support_tool.helpers.confighelper import EmptyValueError
from redhat_support_tool.plugins import InteractivePlugin, ObjectDisplayOption
from redhat_support_tool.helpers.constants import Constants
from redhat_support_tool.helpers.launchhelper import LaunchHelper
from redhat_support_tool.plugins.get_case import GetCase
import pydoc
import redhat_support_tool.helpers.common as common
import redhat_support_tool.helpers.apihelper as apihelper
import redhat_support_tool.helpers.confighelper as confighelper
import logging
__author__ = 'Keith Robertson <kroberts@redhat.com>'
logger = logging.getLogger("redhat_support_tool.plugins.list_cases")
class ListCases(InteractivePlugin):
plugin_name = 'listcases'
ALL = _("Display all cases")
partial_entries = _('%s of %s cases displayed. Type \'m\' to see more.')
end_of_entries = _('No more cases to display')
_submenu_opts = None
_sections = None
casesAry = None
# Help should not print the option list
help_is_options = False
# Record the last offset value used with the API, and the maximum results
# we should display for one search query.
_nextOffset = 0
_MAX_OFFSET = confighelper.get_config_helper().get(option='max_results')
_MAX_OFFSET = 1500 if not _MAX_OFFSET else int(_MAX_OFFSET)
_limit = 50 if _MAX_OFFSET >= 50 else _MAX_OFFSET
_caseGroupNumbers = None
# for displaying cases owned by an associate as per SFDC
_associateSSOName = None
_view = None
@classmethod
def get_usage(cls):
'''
The usage statement that will be printed by OptionParser.
Example:
- %prog -c CASENUMBER [options] <comment text here>
Important: %prog is a OptionParser built-in. Use it!
'''
return _('%prog')
@classmethod
def get_desc(cls):
'''
The description statement that will be printed by OptionParser.
Example:
- 'Use the \'%s\' command to add a comment to a case.'\
% cls.plugin_name
'''
return _('Use the \'%s\' command to list your open support cases.\n'
'- For Red Hat employees it lists open cases in your queue.\n'
'- For other users it lists open cases in your account.\n'
% cls.plugin_name)
@classmethod
def get_epilog(cls):
'''
The epilog string that will be printed by OptionParser. Usually
used to print an example of how to use the program.
Example:
Examples:
- %s -c 12345678 Lorem ipsum dolor sit amet, consectetur adipisicing
- %s -c 12345678
'''
return _('Example:\n'
' - %s\n'
' - %s -g groupname -c -s status -a\n'
' - %s -o ownerSSOName -s severity\n'
' - %s -o all') % (cls.plugin_name,
cls.plugin_name,
cls.plugin_name,
cls.plugin_name)
@classmethod
def get_options(cls):
'''
Subclasses that need command line options should override this method
and return an array of optparse.Option(s) to be used by the
OptionParser.
Example:
return [Option("-f", "--file", action="store",
dest="filename", help='Some file'),
Option("-c", "--case",
action="store", dest="casenumber",
help='A case')]
Would produce the following:
Command (? for help): help mycommand
Usage: mycommand [options]
Use the 'mycommand' command to find a knowledge base solution by ID
Options:
-h, --help show this help message and exit
-f, --file Some file
-c, --case A case
Example:
- mycommand -c 12345 -f abc.txt
'''
return [Option('-c', '--includeclosed', dest='includeclosed',
action='store_true',
help=_('Show closed cases. (optional)'), default=False),
Option('-o', '--owner', dest='owner',
help=_('For Red Hat employees only. Show cases '
'for another Red Hat employee portal login ID.'
' Specify -o ALL to show cases in the Red Hat '
'account instead of your case queue. (optional)'
), default=None),
Option('-g', '--casegroup', dest='casegroup',
help=_('Show cases belonging to a particular case group'
' in your account. (optional) Note, use the '
'\'listcasegroups\' command to see the case '
'groups in your account.'), default=None),
#Option('-k', '--keyword', dest='keyword',
# help=_('Only show cases with the given keyword in '
# 'their title. (optional)'), default=None),
Option('-u', '--ungrouped', dest='ungrouped',
action='store_true',
help=_('Include ungrouped cases in results. When this '
'is set then -o owner options will be ignored.'
'(optional)'), default=False),
Option('-s', '--sortby', dest='sortfield',
help=_("Sort cases by a particular field. Available "
"fields to sort by are: 'caseNumber' (default), "
"'createdDate', 'lastModifiedDate', 'severity', "
"'status'. (optional)"), default='caseNumber'),
Option('-a', '--ascending', dest='sortorder',
action='store_const', const='ASC',
help=_('Sort results in ascending order. Default is '
'to sort in descending order (optional)'),
default='DESC')]
def _check_case_group(self):
if self._options['casegroup']:
valid_groups = []
given_groupAry = str(self._options['casegroup']).split(',')
real_groupAry = common.get_groups()
for i in given_groupAry:
match = False
for j in real_groupAry:
if i.lower() == j.get_name().lower() or \
i == str(j.get_number()):
valid_groups.append(j.get_number())
match = True
if(not match):
msg = _("Unable to find case group %s" % i)
print msg
raise Exception(msg)
if len(valid_groups) > 0:
self._caseGroupNumbers = valid_groups
logger.log(logging.INFO,
'Casegroup(%s) casegroupnumber(%s)' % (
given_groupAry,
self._caseGroupNumbers))
def _check_owner(self):
# Firstly, determine for whom listcases is being run and if they're a
# Red Hat employee (isInternal == True) or not
# If they're internal, then display the open cases they *own* in SFDC
# ...except however if the -o all, -g or -u options are specified, then
# it displays cases in the Red Hat employee's account.
# If they're not internal, then display the open cases in their account
try:
api = apihelper.get_api()
username = api.config.username
userobj = contextmanager.get('user')
if not userobj:
userobj = api.users.get(username)
contextmanager.add('user', userobj)
if self._options['owner']:
if not userobj.isInternal:
raise Exception("The -o switch is only available to Red Hat"
" employees")
elif self._options['owner'].lower() != 'all':
username = self._options['owner']
userobj = api.users.get(username)
if not userobj.isInternal:
# for some reason RH users can't display non-RH users
raise Exception("Red Hat employees are unable to list"
"cases for non-Red Hat portal users.")
if userobj.isInternal:
if not (str(self._options['owner']).lower() == 'all' or
self._caseGroupNumbers or
self._options['ungrouped']):
# this will trigger the display of cases owned as per SFDC
self._associateSSOName = username
self._view = 'internal'
except RequestError, re:
if re.status == 404:
msg = _("Unable to find user %s" % username)
else:
msg = _('Problem querying the support services API. Reason: '
'%s' % re.reason)
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services API. '
'Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception, e:
msg = _("%s" % str(e))
print msg
logger.log(logging.WARNING, msg)
raise
def validate_args(self):
self._check_case_group()
self._check_owner()
def get_intro_text(self):
return _('\nType the number of the case to view or \'e\' '
'to return to the previous menu.')
def get_prompt_text(self):
return _('Select a Case: ')
def get_sub_menu_options(self):
return self._submenu_opts
def get_more_options(self, num_options):
if (len(self.casesAry) < self._nextOffset or
len(self.casesAry) == 0 or
self._nextOffset > self._MAX_OFFSET):
# Either we did not max out on results last time, there were
# no results last time, or we have seen more than _MAX_OFFSET
# results.
# In the instance of cases, the maximum a single query can
# retrieve is 1500 cases, hence MAX_OFFSET is set to 1450
return False
# Strata introduces an issue where if the limit > 50, it will only
# return 50 results. This creates a potential issue if the terminal
# size is greater than 53.
limit = self._get_limit()
if num_options > limit:
num_options = limit
searchopts = {'count': num_options, 'start': self._nextOffset}
self._nextOffset += num_options
newresults = self._get_cases(searchopts)
if len(newresults) == 0:
return False
self.casesAry.extend(newresults)
self._parse_cases(newresults)
return True
def postinit(self):
self._submenu_opts = deque()
self._sections = {}
| searchopts = {'count': self._limit, 'start': 0}
self.casesAry = self._get_cases(searchopts)
self._nextOffset = self._limit
if not self._parse_cases(self.casesAry):
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise Exception()
if not common.is_interactive():
while self.get_more_options(self._limit):
continue
def non_interactive_action(self):
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
try:
print doc.encode("UTF-8", 'replace')
except Exception, e:
# There are some truly bizarre errors when you pipe
# the output from python's 'print' function with sys encoding
# set to ascii. These errors seem to manifes when you pipe
# to something like 'more' or 'less'. You'll get encoding errors.
# Curiously, you don't see them with 'grep' or even simply piping
# to terminal. WTF :(
logger.log(logging.WARNING, e)
import sys
print doc.encode(sys.getdefaultencoding(),
'replace')
def interactive_action(self, display_option=None):
if display_option.display_text == self.ALL:
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
pydoc.pipepager(doc.encode("UTF-8", 'replace'),
cmd='less -R')
else:
val = None
try:
val = display_option.stored_obj
lh = LaunchHelper(GetCase)
lh.run(val)
except:
raise Exception()
def _parse_cases(self, cases_ary):
'''
Use this for non-interactive display of results.
'''
if len(cases_ary) == 0:
return False
try:
for val in cases_ary:
doc = u''
doc += '%-12s %-60s\n' % ('%s:' % Constants.CASE_NUMBER,
val.get_caseNumber())
doc += '%-12s %-60s\n' % ('%s:' % Constants.TITLE,
val.get_summary())
doc += '%-12s %-60s\n' % (Constants.CASE_STATUS,
val.get_status())
doc += '%-12s %-60s\n' % (Constants.CASE_SEVERITY,
val.get_severity())
vuri = val.get_view_uri()
if vuri:
doc += '%-12s %-60s' % (Constants.URL, vuri)
else:
doc += '%-12s %-60s' % (Constants.URL, val.get_uri())
doc += '\n\n%s%s%s\n\n' % (Constants.BOLD,
str('-' * Constants.MAX_RULE),
Constants.END)
disp_opt = ObjectDisplayOption('%s [%-19s] [sev%s] %s' % (
val.get_caseNumber(),
val.get_status(),
val.get_severity()[0],
val.get_summary()),
'interactive_action',
val.get_caseNumber())
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
except:
msg = _('ERROR: problem parsing the cases.')
print msg
logger.log(logging.WARNING, msg)
return False
return True
def _get_cases(self, searchopts):
api = None
try:
api = apihelper.get_api()
filt = api.im.makeCaseFilter(
count=searchopts['count'],
start=searchopts['start'],
includeClosed=self._options['includeclosed'],
groupNumbers=self._caseGroupNumbers,
associateSSOName=self._associateSSOName,
view=self._view,
sortField=self._options['sortfield'],
sortOrder=self._options['sortorder'],
#keyword=self._options['keyword'],
onlyUngrouped=self._options['ungrouped'])
return api.cases.filter(filt)
except EmptyValueError, eve:
msg = _('ERROR: %s') % str(eve)
print msg
logger.log(logging.WARNING, msg)
raise
except RequestError, re:
msg = _('Unable to connect to support services API. '
'Reason: %s') % re.reason
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services '
'API. Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception:
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise
def _get_limit(self):
limit = self._limit
remaining = self._MAX_OFFSET - self._nextOffset
return limit if remaining >= limit else remaining % limit | random_line_split | |
list_cases.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import Option
from collections import deque
from redhat_support_lib.infrastructure.errors import RequestError, \
ConnectionError
from redhat_support_lib.infrastructure import contextmanager
from redhat_support_tool.helpers.confighelper import _
from redhat_support_tool.helpers.confighelper import EmptyValueError
from redhat_support_tool.plugins import InteractivePlugin, ObjectDisplayOption
from redhat_support_tool.helpers.constants import Constants
from redhat_support_tool.helpers.launchhelper import LaunchHelper
from redhat_support_tool.plugins.get_case import GetCase
import pydoc
import redhat_support_tool.helpers.common as common
import redhat_support_tool.helpers.apihelper as apihelper
import redhat_support_tool.helpers.confighelper as confighelper
import logging
__author__ = 'Keith Robertson <kroberts@redhat.com>'
logger = logging.getLogger("redhat_support_tool.plugins.list_cases")
class ListCases(InteractivePlugin):
plugin_name = 'listcases'
ALL = _("Display all cases")
partial_entries = _('%s of %s cases displayed. Type \'m\' to see more.')
end_of_entries = _('No more cases to display')
_submenu_opts = None
_sections = None
casesAry = None
# Help should not print the option list
help_is_options = False
# Record the last offset value used with the API, and the maximum results
# we should display for one search query.
_nextOffset = 0
_MAX_OFFSET = confighelper.get_config_helper().get(option='max_results')
_MAX_OFFSET = 1500 if not _MAX_OFFSET else int(_MAX_OFFSET)
_limit = 50 if _MAX_OFFSET >= 50 else _MAX_OFFSET
_caseGroupNumbers = None
# for displaying cases owned by an associate as per SFDC
_associateSSOName = None
_view = None
@classmethod
def get_usage(cls):
'''
The usage statement that will be printed by OptionParser.
Example:
- %prog -c CASENUMBER [options] <comment text here>
Important: %prog is a OptionParser built-in. Use it!
'''
return _('%prog')
@classmethod
def get_desc(cls):
'''
The description statement that will be printed by OptionParser.
Example:
- 'Use the \'%s\' command to add a comment to a case.'\
% cls.plugin_name
'''
return _('Use the \'%s\' command to list your open support cases.\n'
'- For Red Hat employees it lists open cases in your queue.\n'
'- For other users it lists open cases in your account.\n'
% cls.plugin_name)
@classmethod
def get_epilog(cls):
'''
The epilog string that will be printed by OptionParser. Usually
used to print an example of how to use the program.
Example:
Examples:
- %s -c 12345678 Lorem ipsum dolor sit amet, consectetur adipisicing
- %s -c 12345678
'''
return _('Example:\n'
' - %s\n'
' - %s -g groupname -c -s status -a\n'
' - %s -o ownerSSOName -s severity\n'
' - %s -o all') % (cls.plugin_name,
cls.plugin_name,
cls.plugin_name,
cls.plugin_name)
@classmethod
def get_options(cls):
'''
Subclasses that need command line options should override this method
and return an array of optparse.Option(s) to be used by the
OptionParser.
Example:
return [Option("-f", "--file", action="store",
dest="filename", help='Some file'),
Option("-c", "--case",
action="store", dest="casenumber",
help='A case')]
Would produce the following:
Command (? for help): help mycommand
Usage: mycommand [options]
Use the 'mycommand' command to find a knowledge base solution by ID
Options:
-h, --help show this help message and exit
-f, --file Some file
-c, --case A case
Example:
- mycommand -c 12345 -f abc.txt
'''
return [Option('-c', '--includeclosed', dest='includeclosed',
action='store_true',
help=_('Show closed cases. (optional)'), default=False),
Option('-o', '--owner', dest='owner',
help=_('For Red Hat employees only. Show cases '
'for another Red Hat employee portal login ID.'
' Specify -o ALL to show cases in the Red Hat '
'account instead of your case queue. (optional)'
), default=None),
Option('-g', '--casegroup', dest='casegroup',
help=_('Show cases belonging to a particular case group'
' in your account. (optional) Note, use the '
'\'listcasegroups\' command to see the case '
'groups in your account.'), default=None),
#Option('-k', '--keyword', dest='keyword',
# help=_('Only show cases with the given keyword in '
# 'their title. (optional)'), default=None),
Option('-u', '--ungrouped', dest='ungrouped',
action='store_true',
help=_('Include ungrouped cases in results. When this '
'is set then -o owner options will be ignored.'
'(optional)'), default=False),
Option('-s', '--sortby', dest='sortfield',
help=_("Sort cases by a particular field. Available "
"fields to sort by are: 'caseNumber' (default), "
"'createdDate', 'lastModifiedDate', 'severity', "
"'status'. (optional)"), default='caseNumber'),
Option('-a', '--ascending', dest='sortorder',
action='store_const', const='ASC',
help=_('Sort results in ascending order. Default is '
'to sort in descending order (optional)'),
default='DESC')]
def | (self):
if self._options['casegroup']:
valid_groups = []
given_groupAry = str(self._options['casegroup']).split(',')
real_groupAry = common.get_groups()
for i in given_groupAry:
match = False
for j in real_groupAry:
if i.lower() == j.get_name().lower() or \
i == str(j.get_number()):
valid_groups.append(j.get_number())
match = True
if(not match):
msg = _("Unable to find case group %s" % i)
print msg
raise Exception(msg)
if len(valid_groups) > 0:
self._caseGroupNumbers = valid_groups
logger.log(logging.INFO,
'Casegroup(%s) casegroupnumber(%s)' % (
given_groupAry,
self._caseGroupNumbers))
def _check_owner(self):
# Firstly, determine for whom listcases is being run and if they're a
# Red Hat employee (isInternal == True) or not
# If they're internal, then display the open cases they *own* in SFDC
# ...except however if the -o all, -g or -u options are specified, then
# it displays cases in the Red Hat employee's account.
# If they're not internal, then display the open cases in their account
try:
api = apihelper.get_api()
username = api.config.username
userobj = contextmanager.get('user')
if not userobj:
userobj = api.users.get(username)
contextmanager.add('user', userobj)
if self._options['owner']:
if not userobj.isInternal:
raise Exception("The -o switch is only available to Red Hat"
" employees")
elif self._options['owner'].lower() != 'all':
username = self._options['owner']
userobj = api.users.get(username)
if not userobj.isInternal:
# for some reason RH users can't display non-RH users
raise Exception("Red Hat employees are unable to list"
"cases for non-Red Hat portal users.")
if userobj.isInternal:
if not (str(self._options['owner']).lower() == 'all' or
self._caseGroupNumbers or
self._options['ungrouped']):
# this will trigger the display of cases owned as per SFDC
self._associateSSOName = username
self._view = 'internal'
except RequestError, re:
if re.status == 404:
msg = _("Unable to find user %s" % username)
else:
msg = _('Problem querying the support services API. Reason: '
'%s' % re.reason)
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services API. '
'Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception, e:
msg = _("%s" % str(e))
print msg
logger.log(logging.WARNING, msg)
raise
def validate_args(self):
self._check_case_group()
self._check_owner()
def get_intro_text(self):
return _('\nType the number of the case to view or \'e\' '
'to return to the previous menu.')
def get_prompt_text(self):
return _('Select a Case: ')
def get_sub_menu_options(self):
return self._submenu_opts
def get_more_options(self, num_options):
if (len(self.casesAry) < self._nextOffset or
len(self.casesAry) == 0 or
self._nextOffset > self._MAX_OFFSET):
# Either we did not max out on results last time, there were
# no results last time, or we have seen more than _MAX_OFFSET
# results.
# In the instance of cases, the maximum a single query can
# retrieve is 1500 cases, hence MAX_OFFSET is set to 1450
return False
# Strata introduces an issue where if the limit > 50, it will only
# return 50 results. This creates a potential issue if the terminal
# size is greater than 53.
limit = self._get_limit()
if num_options > limit:
num_options = limit
searchopts = {'count': num_options, 'start': self._nextOffset}
self._nextOffset += num_options
newresults = self._get_cases(searchopts)
if len(newresults) == 0:
return False
self.casesAry.extend(newresults)
self._parse_cases(newresults)
return True
def postinit(self):
self._submenu_opts = deque()
self._sections = {}
searchopts = {'count': self._limit, 'start': 0}
self.casesAry = self._get_cases(searchopts)
self._nextOffset = self._limit
if not self._parse_cases(self.casesAry):
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise Exception()
if not common.is_interactive():
while self.get_more_options(self._limit):
continue
def non_interactive_action(self):
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
try:
print doc.encode("UTF-8", 'replace')
except Exception, e:
# There are some truly bizarre errors when you pipe
# the output from python's 'print' function with sys encoding
# set to ascii. These errors seem to manifes when you pipe
# to something like 'more' or 'less'. You'll get encoding errors.
# Curiously, you don't see them with 'grep' or even simply piping
# to terminal. WTF :(
logger.log(logging.WARNING, e)
import sys
print doc.encode(sys.getdefaultencoding(),
'replace')
def interactive_action(self, display_option=None):
if display_option.display_text == self.ALL:
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
pydoc.pipepager(doc.encode("UTF-8", 'replace'),
cmd='less -R')
else:
val = None
try:
val = display_option.stored_obj
lh = LaunchHelper(GetCase)
lh.run(val)
except:
raise Exception()
def _parse_cases(self, cases_ary):
'''
Use this for non-interactive display of results.
'''
if len(cases_ary) == 0:
return False
try:
for val in cases_ary:
doc = u''
doc += '%-12s %-60s\n' % ('%s:' % Constants.CASE_NUMBER,
val.get_caseNumber())
doc += '%-12s %-60s\n' % ('%s:' % Constants.TITLE,
val.get_summary())
doc += '%-12s %-60s\n' % (Constants.CASE_STATUS,
val.get_status())
doc += '%-12s %-60s\n' % (Constants.CASE_SEVERITY,
val.get_severity())
vuri = val.get_view_uri()
if vuri:
doc += '%-12s %-60s' % (Constants.URL, vuri)
else:
doc += '%-12s %-60s' % (Constants.URL, val.get_uri())
doc += '\n\n%s%s%s\n\n' % (Constants.BOLD,
str('-' * Constants.MAX_RULE),
Constants.END)
disp_opt = ObjectDisplayOption('%s [%-19s] [sev%s] %s' % (
val.get_caseNumber(),
val.get_status(),
val.get_severity()[0],
val.get_summary()),
'interactive_action',
val.get_caseNumber())
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
except:
msg = _('ERROR: problem parsing the cases.')
print msg
logger.log(logging.WARNING, msg)
return False
return True
def _get_cases(self, searchopts):
api = None
try:
api = apihelper.get_api()
filt = api.im.makeCaseFilter(
count=searchopts['count'],
start=searchopts['start'],
includeClosed=self._options['includeclosed'],
groupNumbers=self._caseGroupNumbers,
associateSSOName=self._associateSSOName,
view=self._view,
sortField=self._options['sortfield'],
sortOrder=self._options['sortorder'],
#keyword=self._options['keyword'],
onlyUngrouped=self._options['ungrouped'])
return api.cases.filter(filt)
except EmptyValueError, eve:
msg = _('ERROR: %s') % str(eve)
print msg
logger.log(logging.WARNING, msg)
raise
except RequestError, re:
msg = _('Unable to connect to support services API. '
'Reason: %s') % re.reason
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services '
'API. Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception:
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise
def _get_limit(self):
limit = self._limit
remaining = self._MAX_OFFSET - self._nextOffset
return limit if remaining >= limit else remaining % limit
| _check_case_group | identifier_name |
list_cases.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import Option
from collections import deque
from redhat_support_lib.infrastructure.errors import RequestError, \
ConnectionError
from redhat_support_lib.infrastructure import contextmanager
from redhat_support_tool.helpers.confighelper import _
from redhat_support_tool.helpers.confighelper import EmptyValueError
from redhat_support_tool.plugins import InteractivePlugin, ObjectDisplayOption
from redhat_support_tool.helpers.constants import Constants
from redhat_support_tool.helpers.launchhelper import LaunchHelper
from redhat_support_tool.plugins.get_case import GetCase
import pydoc
import redhat_support_tool.helpers.common as common
import redhat_support_tool.helpers.apihelper as apihelper
import redhat_support_tool.helpers.confighelper as confighelper
import logging
__author__ = 'Keith Robertson <kroberts@redhat.com>'
logger = logging.getLogger("redhat_support_tool.plugins.list_cases")
class ListCases(InteractivePlugin):
plugin_name = 'listcases'
ALL = _("Display all cases")
partial_entries = _('%s of %s cases displayed. Type \'m\' to see more.')
end_of_entries = _('No more cases to display')
_submenu_opts = None
_sections = None
casesAry = None
# Help should not print the option list
help_is_options = False
# Record the last offset value used with the API, and the maximum results
# we should display for one search query.
_nextOffset = 0
_MAX_OFFSET = confighelper.get_config_helper().get(option='max_results')
_MAX_OFFSET = 1500 if not _MAX_OFFSET else int(_MAX_OFFSET)
_limit = 50 if _MAX_OFFSET >= 50 else _MAX_OFFSET
_caseGroupNumbers = None
# for displaying cases owned by an associate as per SFDC
_associateSSOName = None
_view = None
@classmethod
def get_usage(cls):
'''
The usage statement that will be printed by OptionParser.
Example:
- %prog -c CASENUMBER [options] <comment text here>
Important: %prog is a OptionParser built-in. Use it!
'''
return _('%prog')
@classmethod
def get_desc(cls):
'''
The description statement that will be printed by OptionParser.
Example:
- 'Use the \'%s\' command to add a comment to a case.'\
% cls.plugin_name
'''
return _('Use the \'%s\' command to list your open support cases.\n'
'- For Red Hat employees it lists open cases in your queue.\n'
'- For other users it lists open cases in your account.\n'
% cls.plugin_name)
@classmethod
def get_epilog(cls):
'''
The epilog string that will be printed by OptionParser. Usually
used to print an example of how to use the program.
Example:
Examples:
- %s -c 12345678 Lorem ipsum dolor sit amet, consectetur adipisicing
- %s -c 12345678
'''
return _('Example:\n'
' - %s\n'
' - %s -g groupname -c -s status -a\n'
' - %s -o ownerSSOName -s severity\n'
' - %s -o all') % (cls.plugin_name,
cls.plugin_name,
cls.plugin_name,
cls.plugin_name)
@classmethod
def get_options(cls):
'''
Subclasses that need command line options should override this method
and return an array of optparse.Option(s) to be used by the
OptionParser.
Example:
return [Option("-f", "--file", action="store",
dest="filename", help='Some file'),
Option("-c", "--case",
action="store", dest="casenumber",
help='A case')]
Would produce the following:
Command (? for help): help mycommand
Usage: mycommand [options]
Use the 'mycommand' command to find a knowledge base solution by ID
Options:
-h, --help show this help message and exit
-f, --file Some file
-c, --case A case
Example:
- mycommand -c 12345 -f abc.txt
'''
return [Option('-c', '--includeclosed', dest='includeclosed',
action='store_true',
help=_('Show closed cases. (optional)'), default=False),
Option('-o', '--owner', dest='owner',
help=_('For Red Hat employees only. Show cases '
'for another Red Hat employee portal login ID.'
' Specify -o ALL to show cases in the Red Hat '
'account instead of your case queue. (optional)'
), default=None),
Option('-g', '--casegroup', dest='casegroup',
help=_('Show cases belonging to a particular case group'
' in your account. (optional) Note, use the '
'\'listcasegroups\' command to see the case '
'groups in your account.'), default=None),
#Option('-k', '--keyword', dest='keyword',
# help=_('Only show cases with the given keyword in '
# 'their title. (optional)'), default=None),
Option('-u', '--ungrouped', dest='ungrouped',
action='store_true',
help=_('Include ungrouped cases in results. When this '
'is set then -o owner options will be ignored.'
'(optional)'), default=False),
Option('-s', '--sortby', dest='sortfield',
help=_("Sort cases by a particular field. Available "
"fields to sort by are: 'caseNumber' (default), "
"'createdDate', 'lastModifiedDate', 'severity', "
"'status'. (optional)"), default='caseNumber'),
Option('-a', '--ascending', dest='sortorder',
action='store_const', const='ASC',
help=_('Sort results in ascending order. Default is '
'to sort in descending order (optional)'),
default='DESC')]
def _check_case_group(self):
if self._options['casegroup']:
valid_groups = []
given_groupAry = str(self._options['casegroup']).split(',')
real_groupAry = common.get_groups()
for i in given_groupAry:
match = False
for j in real_groupAry:
if i.lower() == j.get_name().lower() or \
i == str(j.get_number()):
valid_groups.append(j.get_number())
match = True
if(not match):
msg = _("Unable to find case group %s" % i)
print msg
raise Exception(msg)
if len(valid_groups) > 0:
self._caseGroupNumbers = valid_groups
logger.log(logging.INFO,
'Casegroup(%s) casegroupnumber(%s)' % (
given_groupAry,
self._caseGroupNumbers))
def _check_owner(self):
# Firstly, determine for whom listcases is being run and if they're a
# Red Hat employee (isInternal == True) or not
# If they're internal, then display the open cases they *own* in SFDC
# ...except however if the -o all, -g or -u options are specified, then
# it displays cases in the Red Hat employee's account.
# If they're not internal, then display the open cases in their account
try:
api = apihelper.get_api()
username = api.config.username
userobj = contextmanager.get('user')
if not userobj:
userobj = api.users.get(username)
contextmanager.add('user', userobj)
if self._options['owner']:
if not userobj.isInternal:
raise Exception("The -o switch is only available to Red Hat"
" employees")
elif self._options['owner'].lower() != 'all':
username = self._options['owner']
userobj = api.users.get(username)
if not userobj.isInternal:
# for some reason RH users can't display non-RH users
raise Exception("Red Hat employees are unable to list"
"cases for non-Red Hat portal users.")
if userobj.isInternal:
|
except RequestError, re:
if re.status == 404:
msg = _("Unable to find user %s" % username)
else:
msg = _('Problem querying the support services API. Reason: '
'%s' % re.reason)
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services API. '
'Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception, e:
msg = _("%s" % str(e))
print msg
logger.log(logging.WARNING, msg)
raise
def validate_args(self):
self._check_case_group()
self._check_owner()
def get_intro_text(self):
return _('\nType the number of the case to view or \'e\' '
'to return to the previous menu.')
def get_prompt_text(self):
return _('Select a Case: ')
def get_sub_menu_options(self):
return self._submenu_opts
def get_more_options(self, num_options):
if (len(self.casesAry) < self._nextOffset or
len(self.casesAry) == 0 or
self._nextOffset > self._MAX_OFFSET):
# Either we did not max out on results last time, there were
# no results last time, or we have seen more than _MAX_OFFSET
# results.
# In the instance of cases, the maximum a single query can
# retrieve is 1500 cases, hence MAX_OFFSET is set to 1450
return False
# Strata introduces an issue where if the limit > 50, it will only
# return 50 results. This creates a potential issue if the terminal
# size is greater than 53.
limit = self._get_limit()
if num_options > limit:
num_options = limit
searchopts = {'count': num_options, 'start': self._nextOffset}
self._nextOffset += num_options
newresults = self._get_cases(searchopts)
if len(newresults) == 0:
return False
self.casesAry.extend(newresults)
self._parse_cases(newresults)
return True
def postinit(self):
self._submenu_opts = deque()
self._sections = {}
searchopts = {'count': self._limit, 'start': 0}
self.casesAry = self._get_cases(searchopts)
self._nextOffset = self._limit
if not self._parse_cases(self.casesAry):
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise Exception()
if not common.is_interactive():
while self.get_more_options(self._limit):
continue
def non_interactive_action(self):
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
try:
print doc.encode("UTF-8", 'replace')
except Exception, e:
# There are some truly bizarre errors when you pipe
# the output from python's 'print' function with sys encoding
# set to ascii. These errors seem to manifes when you pipe
# to something like 'more' or 'less'. You'll get encoding errors.
# Curiously, you don't see them with 'grep' or even simply piping
# to terminal. WTF :(
logger.log(logging.WARNING, e)
import sys
print doc.encode(sys.getdefaultencoding(),
'replace')
def interactive_action(self, display_option=None):
if display_option.display_text == self.ALL:
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
pydoc.pipepager(doc.encode("UTF-8", 'replace'),
cmd='less -R')
else:
val = None
try:
val = display_option.stored_obj
lh = LaunchHelper(GetCase)
lh.run(val)
except:
raise Exception()
def _parse_cases(self, cases_ary):
'''
Use this for non-interactive display of results.
'''
if len(cases_ary) == 0:
return False
try:
for val in cases_ary:
doc = u''
doc += '%-12s %-60s\n' % ('%s:' % Constants.CASE_NUMBER,
val.get_caseNumber())
doc += '%-12s %-60s\n' % ('%s:' % Constants.TITLE,
val.get_summary())
doc += '%-12s %-60s\n' % (Constants.CASE_STATUS,
val.get_status())
doc += '%-12s %-60s\n' % (Constants.CASE_SEVERITY,
val.get_severity())
vuri = val.get_view_uri()
if vuri:
doc += '%-12s %-60s' % (Constants.URL, vuri)
else:
doc += '%-12s %-60s' % (Constants.URL, val.get_uri())
doc += '\n\n%s%s%s\n\n' % (Constants.BOLD,
str('-' * Constants.MAX_RULE),
Constants.END)
disp_opt = ObjectDisplayOption('%s [%-19s] [sev%s] %s' % (
val.get_caseNumber(),
val.get_status(),
val.get_severity()[0],
val.get_summary()),
'interactive_action',
val.get_caseNumber())
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
except:
msg = _('ERROR: problem parsing the cases.')
print msg
logger.log(logging.WARNING, msg)
return False
return True
def _get_cases(self, searchopts):
api = None
try:
api = apihelper.get_api()
filt = api.im.makeCaseFilter(
count=searchopts['count'],
start=searchopts['start'],
includeClosed=self._options['includeclosed'],
groupNumbers=self._caseGroupNumbers,
associateSSOName=self._associateSSOName,
view=self._view,
sortField=self._options['sortfield'],
sortOrder=self._options['sortorder'],
#keyword=self._options['keyword'],
onlyUngrouped=self._options['ungrouped'])
return api.cases.filter(filt)
except EmptyValueError, eve:
msg = _('ERROR: %s') % str(eve)
print msg
logger.log(logging.WARNING, msg)
raise
except RequestError, re:
msg = _('Unable to connect to support services API. '
'Reason: %s') % re.reason
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services '
'API. Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception:
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise
def _get_limit(self):
limit = self._limit
remaining = self._MAX_OFFSET - self._nextOffset
return limit if remaining >= limit else remaining % limit
| if not (str(self._options['owner']).lower() == 'all' or
self._caseGroupNumbers or
self._options['ungrouped']):
# this will trigger the display of cases owned as per SFDC
self._associateSSOName = username
self._view = 'internal' | conditional_block |
list_cases.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import Option
from collections import deque
from redhat_support_lib.infrastructure.errors import RequestError, \
ConnectionError
from redhat_support_lib.infrastructure import contextmanager
from redhat_support_tool.helpers.confighelper import _
from redhat_support_tool.helpers.confighelper import EmptyValueError
from redhat_support_tool.plugins import InteractivePlugin, ObjectDisplayOption
from redhat_support_tool.helpers.constants import Constants
from redhat_support_tool.helpers.launchhelper import LaunchHelper
from redhat_support_tool.plugins.get_case import GetCase
import pydoc
import redhat_support_tool.helpers.common as common
import redhat_support_tool.helpers.apihelper as apihelper
import redhat_support_tool.helpers.confighelper as confighelper
import logging
__author__ = 'Keith Robertson <kroberts@redhat.com>'
logger = logging.getLogger("redhat_support_tool.plugins.list_cases")
class ListCases(InteractivePlugin):
plugin_name = 'listcases'
ALL = _("Display all cases")
partial_entries = _('%s of %s cases displayed. Type \'m\' to see more.')
end_of_entries = _('No more cases to display')
_submenu_opts = None
_sections = None
casesAry = None
# Help should not print the option list
help_is_options = False
# Record the last offset value used with the API, and the maximum results
# we should display for one search query.
_nextOffset = 0
_MAX_OFFSET = confighelper.get_config_helper().get(option='max_results')
_MAX_OFFSET = 1500 if not _MAX_OFFSET else int(_MAX_OFFSET)
_limit = 50 if _MAX_OFFSET >= 50 else _MAX_OFFSET
_caseGroupNumbers = None
# for displaying cases owned by an associate as per SFDC
_associateSSOName = None
_view = None
@classmethod
def get_usage(cls):
'''
The usage statement that will be printed by OptionParser.
Example:
- %prog -c CASENUMBER [options] <comment text here>
Important: %prog is a OptionParser built-in. Use it!
'''
return _('%prog')
@classmethod
def get_desc(cls):
'''
The description statement that will be printed by OptionParser.
Example:
- 'Use the \'%s\' command to add a comment to a case.'\
% cls.plugin_name
'''
return _('Use the \'%s\' command to list your open support cases.\n'
'- For Red Hat employees it lists open cases in your queue.\n'
'- For other users it lists open cases in your account.\n'
% cls.plugin_name)
@classmethod
def get_epilog(cls):
'''
The epilog string that will be printed by OptionParser. Usually
used to print an example of how to use the program.
Example:
Examples:
- %s -c 12345678 Lorem ipsum dolor sit amet, consectetur adipisicing
- %s -c 12345678
'''
return _('Example:\n'
' - %s\n'
' - %s -g groupname -c -s status -a\n'
' - %s -o ownerSSOName -s severity\n'
' - %s -o all') % (cls.plugin_name,
cls.plugin_name,
cls.plugin_name,
cls.plugin_name)
@classmethod
def get_options(cls):
'''
Subclasses that need command line options should override this method
and return an array of optparse.Option(s) to be used by the
OptionParser.
Example:
return [Option("-f", "--file", action="store",
dest="filename", help='Some file'),
Option("-c", "--case",
action="store", dest="casenumber",
help='A case')]
Would produce the following:
Command (? for help): help mycommand
Usage: mycommand [options]
Use the 'mycommand' command to find a knowledge base solution by ID
Options:
-h, --help show this help message and exit
-f, --file Some file
-c, --case A case
Example:
- mycommand -c 12345 -f abc.txt
'''
return [Option('-c', '--includeclosed', dest='includeclosed',
action='store_true',
help=_('Show closed cases. (optional)'), default=False),
Option('-o', '--owner', dest='owner',
help=_('For Red Hat employees only. Show cases '
'for another Red Hat employee portal login ID.'
' Specify -o ALL to show cases in the Red Hat '
'account instead of your case queue. (optional)'
), default=None),
Option('-g', '--casegroup', dest='casegroup',
help=_('Show cases belonging to a particular case group'
' in your account. (optional) Note, use the '
'\'listcasegroups\' command to see the case '
'groups in your account.'), default=None),
#Option('-k', '--keyword', dest='keyword',
# help=_('Only show cases with the given keyword in '
# 'their title. (optional)'), default=None),
Option('-u', '--ungrouped', dest='ungrouped',
action='store_true',
help=_('Include ungrouped cases in results. When this '
'is set then -o owner options will be ignored.'
'(optional)'), default=False),
Option('-s', '--sortby', dest='sortfield',
help=_("Sort cases by a particular field. Available "
"fields to sort by are: 'caseNumber' (default), "
"'createdDate', 'lastModifiedDate', 'severity', "
"'status'. (optional)"), default='caseNumber'),
Option('-a', '--ascending', dest='sortorder',
action='store_const', const='ASC',
help=_('Sort results in ascending order. Default is '
'to sort in descending order (optional)'),
default='DESC')]
def _check_case_group(self):
if self._options['casegroup']:
valid_groups = []
given_groupAry = str(self._options['casegroup']).split(',')
real_groupAry = common.get_groups()
for i in given_groupAry:
match = False
for j in real_groupAry:
if i.lower() == j.get_name().lower() or \
i == str(j.get_number()):
valid_groups.append(j.get_number())
match = True
if(not match):
msg = _("Unable to find case group %s" % i)
print msg
raise Exception(msg)
if len(valid_groups) > 0:
self._caseGroupNumbers = valid_groups
logger.log(logging.INFO,
'Casegroup(%s) casegroupnumber(%s)' % (
given_groupAry,
self._caseGroupNumbers))
def _check_owner(self):
# Firstly, determine for whom listcases is being run and if they're a
# Red Hat employee (isInternal == True) or not
# If they're internal, then display the open cases they *own* in SFDC
# ...except however if the -o all, -g or -u options are specified, then
# it displays cases in the Red Hat employee's account.
# If they're not internal, then display the open cases in their account
try:
api = apihelper.get_api()
username = api.config.username
userobj = contextmanager.get('user')
if not userobj:
userobj = api.users.get(username)
contextmanager.add('user', userobj)
if self._options['owner']:
if not userobj.isInternal:
raise Exception("The -o switch is only available to Red Hat"
" employees")
elif self._options['owner'].lower() != 'all':
username = self._options['owner']
userobj = api.users.get(username)
if not userobj.isInternal:
# for some reason RH users can't display non-RH users
raise Exception("Red Hat employees are unable to list"
"cases for non-Red Hat portal users.")
if userobj.isInternal:
if not (str(self._options['owner']).lower() == 'all' or
self._caseGroupNumbers or
self._options['ungrouped']):
# this will trigger the display of cases owned as per SFDC
self._associateSSOName = username
self._view = 'internal'
except RequestError, re:
if re.status == 404:
msg = _("Unable to find user %s" % username)
else:
msg = _('Problem querying the support services API. Reason: '
'%s' % re.reason)
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services API. '
'Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception, e:
msg = _("%s" % str(e))
print msg
logger.log(logging.WARNING, msg)
raise
def validate_args(self):
self._check_case_group()
self._check_owner()
def get_intro_text(self):
return _('\nType the number of the case to view or \'e\' '
'to return to the previous menu.')
def get_prompt_text(self):
return _('Select a Case: ')
def get_sub_menu_options(self):
|
def get_more_options(self, num_options):
if (len(self.casesAry) < self._nextOffset or
len(self.casesAry) == 0 or
self._nextOffset > self._MAX_OFFSET):
# Either we did not max out on results last time, there were
# no results last time, or we have seen more than _MAX_OFFSET
# results.
# In the instance of cases, the maximum a single query can
# retrieve is 1500 cases, hence MAX_OFFSET is set to 1450
return False
# Strata introduces an issue where if the limit > 50, it will only
# return 50 results. This creates a potential issue if the terminal
# size is greater than 53.
limit = self._get_limit()
if num_options > limit:
num_options = limit
searchopts = {'count': num_options, 'start': self._nextOffset}
self._nextOffset += num_options
newresults = self._get_cases(searchopts)
if len(newresults) == 0:
return False
self.casesAry.extend(newresults)
self._parse_cases(newresults)
return True
def postinit(self):
self._submenu_opts = deque()
self._sections = {}
searchopts = {'count': self._limit, 'start': 0}
self.casesAry = self._get_cases(searchopts)
self._nextOffset = self._limit
if not self._parse_cases(self.casesAry):
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise Exception()
if not common.is_interactive():
while self.get_more_options(self._limit):
continue
def non_interactive_action(self):
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
try:
print doc.encode("UTF-8", 'replace')
except Exception, e:
# There are some truly bizarre errors when you pipe
# the output from python's 'print' function with sys encoding
# set to ascii. These errors seem to manifes when you pipe
# to something like 'more' or 'less'. You'll get encoding errors.
# Curiously, you don't see them with 'grep' or even simply piping
# to terminal. WTF :(
logger.log(logging.WARNING, e)
import sys
print doc.encode(sys.getdefaultencoding(),
'replace')
def interactive_action(self, display_option=None):
if display_option.display_text == self.ALL:
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
pydoc.pipepager(doc.encode("UTF-8", 'replace'),
cmd='less -R')
else:
val = None
try:
val = display_option.stored_obj
lh = LaunchHelper(GetCase)
lh.run(val)
except:
raise Exception()
def _parse_cases(self, cases_ary):
'''
Use this for non-interactive display of results.
'''
if len(cases_ary) == 0:
return False
try:
for val in cases_ary:
doc = u''
doc += '%-12s %-60s\n' % ('%s:' % Constants.CASE_NUMBER,
val.get_caseNumber())
doc += '%-12s %-60s\n' % ('%s:' % Constants.TITLE,
val.get_summary())
doc += '%-12s %-60s\n' % (Constants.CASE_STATUS,
val.get_status())
doc += '%-12s %-60s\n' % (Constants.CASE_SEVERITY,
val.get_severity())
vuri = val.get_view_uri()
if vuri:
doc += '%-12s %-60s' % (Constants.URL, vuri)
else:
doc += '%-12s %-60s' % (Constants.URL, val.get_uri())
doc += '\n\n%s%s%s\n\n' % (Constants.BOLD,
str('-' * Constants.MAX_RULE),
Constants.END)
disp_opt = ObjectDisplayOption('%s [%-19s] [sev%s] %s' % (
val.get_caseNumber(),
val.get_status(),
val.get_severity()[0],
val.get_summary()),
'interactive_action',
val.get_caseNumber())
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
except:
msg = _('ERROR: problem parsing the cases.')
print msg
logger.log(logging.WARNING, msg)
return False
return True
def _get_cases(self, searchopts):
api = None
try:
api = apihelper.get_api()
filt = api.im.makeCaseFilter(
count=searchopts['count'],
start=searchopts['start'],
includeClosed=self._options['includeclosed'],
groupNumbers=self._caseGroupNumbers,
associateSSOName=self._associateSSOName,
view=self._view,
sortField=self._options['sortfield'],
sortOrder=self._options['sortorder'],
#keyword=self._options['keyword'],
onlyUngrouped=self._options['ungrouped'])
return api.cases.filter(filt)
except EmptyValueError, eve:
msg = _('ERROR: %s') % str(eve)
print msg
logger.log(logging.WARNING, msg)
raise
except RequestError, re:
msg = _('Unable to connect to support services API. '
'Reason: %s') % re.reason
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services '
'API. Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception:
msg = _("Unable to find cases")
print msg
logger.log(logging.WARNING, msg)
raise
def _get_limit(self):
limit = self._limit
remaining = self._MAX_OFFSET - self._nextOffset
return limit if remaining >= limit else remaining % limit
| return self._submenu_opts | identifier_body |
VoiceRecordComposerTile.tsx | /*
Copyright 2021 - 2022 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React, { ReactNode } from "react";
import { Room } from "matrix-js-sdk/src/models/room";
import { MsgType } from "matrix-js-sdk/src/@types/event";
import { logger } from "matrix-js-sdk/src/logger";
import { Optional } from "matrix-events-sdk";
import AccessibleTooltipButton from "../elements/AccessibleTooltipButton";
import { _t } from "../../../languageHandler";
import { IUpload, RecordingState, VoiceRecording } from "../../../audio/VoiceRecording";
import { MatrixClientPeg } from "../../../MatrixClientPeg";
import LiveRecordingWaveform from "../audio_messages/LiveRecordingWaveform";
import { replaceableComponent } from "../../../utils/replaceableComponent";
import LiveRecordingClock from "../audio_messages/LiveRecordingClock";
import { VoiceRecordingStore } from "../../../stores/VoiceRecordingStore";
import { UPDATE_EVENT } from "../../../stores/AsyncStore";
import RecordingPlayback from "../audio_messages/RecordingPlayback";
import Modal from "../../../Modal";
import ErrorDialog from "../dialogs/ErrorDialog";
import MediaDeviceHandler, { MediaDeviceKindEnum } from "../../../MediaDeviceHandler";
import NotificationBadge from "./NotificationBadge";
import { StaticNotificationState } from "../../../stores/notifications/StaticNotificationState";
import { NotificationColor } from "../../../stores/notifications/NotificationColor";
import InlineSpinner from "../elements/InlineSpinner";
import { PlaybackManager } from "../../../audio/PlaybackManager";
interface IProps {
room: Room;
}
interface IState {
recorder?: VoiceRecording;
recordingPhase?: RecordingState;
didUploadFail?: boolean;
}
/**
* Container tile for rendering the voice message recorder in the composer.
*/
@replaceableComponent("views.rooms.VoiceRecordComposerTile")
export default class VoiceRecordComposerTile extends React.PureComponent<IProps, IState> {
public constructor(props) {
super(props);
this.state = {
recorder: null, // no recording started by default
};
}
public componentDidMount() {
const recorder = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
if (recorder) {
if (recorder.isRecording || !recorder.hasRecording) {
logger.warn("Cached recording hasn't ended yet and might cause issues");
}
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Ended });
}
}
public async componentWillUnmount() {
// Stop recording, but keep the recording memory (don't dispose it). This is to let the user
// come back and finish working with it.
const recording = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
await recording?.stop();
// Clean up our listeners by binding a falsy recorder
this.bindNewRecorder(null);
}
// called by composer
public async send() {
if (!this.state.recorder) {
throw new Error("No recording started - cannot send anything");
}
await this.state.recorder.stop();
let upload: IUpload;
try {
upload = await this.state.recorder.upload(this.props.room.roomId);
} catch (e) {
logger.error("Error uploading voice message:", e);
// Flag error and move on. The recording phase will be reset by the upload function.
this.setState({ didUploadFail: true });
return; // don't dispose the recording: the user has a chance to re-upload
}
try {
// noinspection ES6MissingAwait - we don't care if it fails, it'll get queued.
MatrixClientPeg.get().sendMessage(this.props.room.roomId, {
"body": "Voice message",
//"msgtype": "org.matrix.msc2516.voice",
"msgtype": MsgType.Audio,
"url": upload.mxc,
"file": upload.encrypted,
"info": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
// MSC1767 + Ideals of MSC2516 as MSC3245
// https://github.com/matrix-org/matrix-doc/pull/3245
"org.matrix.msc1767.text": "Voice message",
"org.matrix.msc1767.file": {
url: upload.mxc,
file: upload.encrypted,
name: "Voice message.ogg",
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
"org.matrix.msc1767.audio": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
// https://github.com/matrix-org/matrix-doc/pull/3246
waveform: this.state.recorder.getPlayback().thumbnailWaveform.map(v => Math.round(v * 1024)),
},
"org.matrix.msc3245.voice": {}, // No content, this is a rendering hint
});
} catch (e) {
logger.error("Error sending voice message:", e);
// Voice message should be in the timeline at this point, so let other things take care
// of error handling. We also shouldn't need the recording anymore, so fall through to
// disposal.
}
await this.disposeRecording();
}
private async | () {
await VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
// Reset back to no recording, which means no phase (ie: restart component entirely)
this.setState({ recorder: null, recordingPhase: null, didUploadFail: false });
}
private onCancel = async () => {
await this.disposeRecording();
};
public onRecordStartEndClick = async () => {
if (this.state.recorder) {
await this.state.recorder.stop();
return;
}
// The "microphone access error" dialogs are used a lot, so let's functionify them
const accessError = () => {
Modal.createTrackedDialog('Microphone Access Error', '', ErrorDialog, {
title: _t("Unable to access your microphone"),
description: <>
<p>{ _t(
"We were unable to access your microphone. Please check your browser settings and try again.",
) }</p>
</>,
});
};
// Do a sanity test to ensure we're about to grab a valid microphone reference. Things might
// change between this and recording, but at least we will have tried.
try {
const devices = await MediaDeviceHandler.getDevices();
if (!devices?.[MediaDeviceKindEnum.AudioInput]?.length) {
Modal.createTrackedDialog('No Microphone Error', '', ErrorDialog, {
title: _t("No microphone found"),
description: <>
<p>{ _t(
"We didn't find a microphone on your device. Please check your settings and try again.",
) }</p>
</>,
});
return;
}
// else we probably have a device that is good enough
} catch (e) {
logger.error("Error getting devices: ", e);
accessError();
return;
}
try {
// stop any noises which might be happening
await PlaybackManager.instance.pauseAllExcept(null);
const recorder = VoiceRecordingStore.instance.startRecording(this.props.room.roomId);
await recorder.start();
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Started });
} catch (e) {
logger.error("Error starting recording: ", e);
accessError();
// noinspection ES6MissingAwait - if this goes wrong we don't want it to affect the call stack
VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
}
};
private bindNewRecorder(recorder: Optional<VoiceRecording>) {
if (this.state.recorder) {
this.state.recorder.off(UPDATE_EVENT, this.onRecordingUpdate);
}
if (recorder) {
recorder.on(UPDATE_EVENT, this.onRecordingUpdate);
}
}
private onRecordingUpdate = (ev: RecordingState) => {
if (ev === RecordingState.EndingSoon) return; // ignore this state: it has no UI purpose here
this.setState({ recordingPhase: ev });
};
private renderWaveformArea(): ReactNode {
if (!this.state.recorder) return null; // no recorder means we're not recording: no waveform
if (this.state.recordingPhase !== RecordingState.Started) {
return <RecordingPlayback playback={this.state.recorder.getPlayback()} />;
}
// only other UI is the recording-in-progress UI
return <div className="mx_MediaBody mx_VoiceMessagePrimaryContainer mx_VoiceRecordComposerTile_recording">
<LiveRecordingClock recorder={this.state.recorder} />
<LiveRecordingWaveform recorder={this.state.recorder} />
</div>;
}
public render(): ReactNode {
if (!this.state.recordingPhase) return null;
let stopBtn;
let deleteButton;
if (this.state.recordingPhase === RecordingState.Started) {
let tooltip = _t("Send voice message");
if (!!this.state.recorder) {
tooltip = _t("Stop recording");
}
stopBtn = <AccessibleTooltipButton
className="mx_VoiceRecordComposerTile_stop"
onClick={this.onRecordStartEndClick}
title={tooltip}
/>;
if (this.state.recorder && !this.state.recorder?.isRecording) {
stopBtn = null;
}
}
if (this.state.recorder && this.state.recordingPhase !== RecordingState.Uploading) {
deleteButton = <AccessibleTooltipButton
className='mx_VoiceRecordComposerTile_delete'
title={_t("Delete")}
onClick={this.onCancel}
/>;
}
let uploadIndicator;
if (this.state.recordingPhase === RecordingState.Uploading) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_uploadingState'>
<InlineSpinner w={16} h={16} />
</span>;
} else if (this.state.didUploadFail && this.state.recordingPhase === RecordingState.Ended) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_failedState'>
<span className='mx_VoiceRecordComposerTile_uploadState_badge'>
{ /* Need to stick the badge in a span to ensure it doesn't create a block component */ }
<NotificationBadge
notification={StaticNotificationState.forSymbol("!", NotificationColor.Red)}
/>
</span>
<span className='text-warning'>{ _t("Failed to send") }</span>
</span>;
}
return (<>
{ uploadIndicator }
{ deleteButton }
{ stopBtn }
{ this.renderWaveformArea() }
</>);
}
}
| disposeRecording | identifier_name |
VoiceRecordComposerTile.tsx | /*
Copyright 2021 - 2022 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React, { ReactNode } from "react";
import { Room } from "matrix-js-sdk/src/models/room";
import { MsgType } from "matrix-js-sdk/src/@types/event";
import { logger } from "matrix-js-sdk/src/logger";
import { Optional } from "matrix-events-sdk";
import AccessibleTooltipButton from "../elements/AccessibleTooltipButton";
import { _t } from "../../../languageHandler";
import { IUpload, RecordingState, VoiceRecording } from "../../../audio/VoiceRecording";
import { MatrixClientPeg } from "../../../MatrixClientPeg";
import LiveRecordingWaveform from "../audio_messages/LiveRecordingWaveform";
import { replaceableComponent } from "../../../utils/replaceableComponent";
import LiveRecordingClock from "../audio_messages/LiveRecordingClock";
import { VoiceRecordingStore } from "../../../stores/VoiceRecordingStore";
import { UPDATE_EVENT } from "../../../stores/AsyncStore";
import RecordingPlayback from "../audio_messages/RecordingPlayback";
import Modal from "../../../Modal";
import ErrorDialog from "../dialogs/ErrorDialog";
import MediaDeviceHandler, { MediaDeviceKindEnum } from "../../../MediaDeviceHandler";
import NotificationBadge from "./NotificationBadge";
import { StaticNotificationState } from "../../../stores/notifications/StaticNotificationState";
import { NotificationColor } from "../../../stores/notifications/NotificationColor";
import InlineSpinner from "../elements/InlineSpinner";
import { PlaybackManager } from "../../../audio/PlaybackManager";
interface IProps {
room: Room;
}
interface IState {
recorder?: VoiceRecording;
recordingPhase?: RecordingState;
didUploadFail?: boolean;
}
/**
* Container tile for rendering the voice message recorder in the composer.
*/
@replaceableComponent("views.rooms.VoiceRecordComposerTile")
export default class VoiceRecordComposerTile extends React.PureComponent<IProps, IState> {
public constructor(props) {
super(props);
this.state = {
recorder: null, // no recording started by default
};
}
public componentDidMount() {
const recorder = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
if (recorder) {
if (recorder.isRecording || !recorder.hasRecording) {
logger.warn("Cached recording hasn't ended yet and might cause issues");
}
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Ended });
}
}
public async componentWillUnmount() {
// Stop recording, but keep the recording memory (don't dispose it). This is to let the user
// come back and finish working with it.
const recording = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
await recording?.stop();
// Clean up our listeners by binding a falsy recorder
this.bindNewRecorder(null);
}
// called by composer
public async send() {
if (!this.state.recorder) {
throw new Error("No recording started - cannot send anything");
}
await this.state.recorder.stop();
let upload: IUpload;
try {
upload = await this.state.recorder.upload(this.props.room.roomId);
} catch (e) {
logger.error("Error uploading voice message:", e);
// Flag error and move on. The recording phase will be reset by the upload function.
this.setState({ didUploadFail: true });
return; // don't dispose the recording: the user has a chance to re-upload
}
try {
// noinspection ES6MissingAwait - we don't care if it fails, it'll get queued.
MatrixClientPeg.get().sendMessage(this.props.room.roomId, {
"body": "Voice message",
//"msgtype": "org.matrix.msc2516.voice",
"msgtype": MsgType.Audio,
"url": upload.mxc,
"file": upload.encrypted,
"info": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
// MSC1767 + Ideals of MSC2516 as MSC3245
// https://github.com/matrix-org/matrix-doc/pull/3245
"org.matrix.msc1767.text": "Voice message",
"org.matrix.msc1767.file": {
url: upload.mxc,
file: upload.encrypted,
name: "Voice message.ogg",
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
"org.matrix.msc1767.audio": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
// https://github.com/matrix-org/matrix-doc/pull/3246
waveform: this.state.recorder.getPlayback().thumbnailWaveform.map(v => Math.round(v * 1024)),
},
"org.matrix.msc3245.voice": {}, // No content, this is a rendering hint
});
} catch (e) {
logger.error("Error sending voice message:", e);
// Voice message should be in the timeline at this point, so let other things take care
// of error handling. We also shouldn't need the recording anymore, so fall through to
// disposal.
}
await this.disposeRecording();
}
private async disposeRecording() |
private onCancel = async () => {
await this.disposeRecording();
};
public onRecordStartEndClick = async () => {
if (this.state.recorder) {
await this.state.recorder.stop();
return;
}
// The "microphone access error" dialogs are used a lot, so let's functionify them
const accessError = () => {
Modal.createTrackedDialog('Microphone Access Error', '', ErrorDialog, {
title: _t("Unable to access your microphone"),
description: <>
<p>{ _t(
"We were unable to access your microphone. Please check your browser settings and try again.",
) }</p>
</>,
});
};
// Do a sanity test to ensure we're about to grab a valid microphone reference. Things might
// change between this and recording, but at least we will have tried.
try {
const devices = await MediaDeviceHandler.getDevices();
if (!devices?.[MediaDeviceKindEnum.AudioInput]?.length) {
Modal.createTrackedDialog('No Microphone Error', '', ErrorDialog, {
title: _t("No microphone found"),
description: <>
<p>{ _t(
"We didn't find a microphone on your device. Please check your settings and try again.",
) }</p>
</>,
});
return;
}
// else we probably have a device that is good enough
} catch (e) {
logger.error("Error getting devices: ", e);
accessError();
return;
}
try {
// stop any noises which might be happening
await PlaybackManager.instance.pauseAllExcept(null);
const recorder = VoiceRecordingStore.instance.startRecording(this.props.room.roomId);
await recorder.start();
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Started });
} catch (e) {
logger.error("Error starting recording: ", e);
accessError();
// noinspection ES6MissingAwait - if this goes wrong we don't want it to affect the call stack
VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
}
};
private bindNewRecorder(recorder: Optional<VoiceRecording>) {
if (this.state.recorder) {
this.state.recorder.off(UPDATE_EVENT, this.onRecordingUpdate);
}
if (recorder) {
recorder.on(UPDATE_EVENT, this.onRecordingUpdate);
}
}
private onRecordingUpdate = (ev: RecordingState) => {
if (ev === RecordingState.EndingSoon) return; // ignore this state: it has no UI purpose here
this.setState({ recordingPhase: ev });
};
private renderWaveformArea(): ReactNode {
if (!this.state.recorder) return null; // no recorder means we're not recording: no waveform
if (this.state.recordingPhase !== RecordingState.Started) {
return <RecordingPlayback playback={this.state.recorder.getPlayback()} />;
}
// only other UI is the recording-in-progress UI
return <div className="mx_MediaBody mx_VoiceMessagePrimaryContainer mx_VoiceRecordComposerTile_recording">
<LiveRecordingClock recorder={this.state.recorder} />
<LiveRecordingWaveform recorder={this.state.recorder} />
</div>;
}
public render(): ReactNode {
if (!this.state.recordingPhase) return null;
let stopBtn;
let deleteButton;
if (this.state.recordingPhase === RecordingState.Started) {
let tooltip = _t("Send voice message");
if (!!this.state.recorder) {
tooltip = _t("Stop recording");
}
stopBtn = <AccessibleTooltipButton
className="mx_VoiceRecordComposerTile_stop"
onClick={this.onRecordStartEndClick}
title={tooltip}
/>;
if (this.state.recorder && !this.state.recorder?.isRecording) {
stopBtn = null;
}
}
if (this.state.recorder && this.state.recordingPhase !== RecordingState.Uploading) {
deleteButton = <AccessibleTooltipButton
className='mx_VoiceRecordComposerTile_delete'
title={_t("Delete")}
onClick={this.onCancel}
/>;
}
let uploadIndicator;
if (this.state.recordingPhase === RecordingState.Uploading) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_uploadingState'>
<InlineSpinner w={16} h={16} />
</span>;
} else if (this.state.didUploadFail && this.state.recordingPhase === RecordingState.Ended) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_failedState'>
<span className='mx_VoiceRecordComposerTile_uploadState_badge'>
{ /* Need to stick the badge in a span to ensure it doesn't create a block component */ }
<NotificationBadge
notification={StaticNotificationState.forSymbol("!", NotificationColor.Red)}
/>
</span>
<span className='text-warning'>{ _t("Failed to send") }</span>
</span>;
}
return (<>
{ uploadIndicator }
{ deleteButton }
{ stopBtn }
{ this.renderWaveformArea() }
</>);
}
}
| {
await VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
// Reset back to no recording, which means no phase (ie: restart component entirely)
this.setState({ recorder: null, recordingPhase: null, didUploadFail: false });
} | identifier_body |
VoiceRecordComposerTile.tsx | /*
Copyright 2021 - 2022 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React, { ReactNode } from "react";
import { Room } from "matrix-js-sdk/src/models/room";
import { MsgType } from "matrix-js-sdk/src/@types/event";
import { logger } from "matrix-js-sdk/src/logger";
import { Optional } from "matrix-events-sdk";
import AccessibleTooltipButton from "../elements/AccessibleTooltipButton";
import { _t } from "../../../languageHandler";
import { IUpload, RecordingState, VoiceRecording } from "../../../audio/VoiceRecording";
import { MatrixClientPeg } from "../../../MatrixClientPeg";
import LiveRecordingWaveform from "../audio_messages/LiveRecordingWaveform";
import { replaceableComponent } from "../../../utils/replaceableComponent";
import LiveRecordingClock from "../audio_messages/LiveRecordingClock";
import { VoiceRecordingStore } from "../../../stores/VoiceRecordingStore";
import { UPDATE_EVENT } from "../../../stores/AsyncStore";
import RecordingPlayback from "../audio_messages/RecordingPlayback";
import Modal from "../../../Modal";
import ErrorDialog from "../dialogs/ErrorDialog";
import MediaDeviceHandler, { MediaDeviceKindEnum } from "../../../MediaDeviceHandler";
import NotificationBadge from "./NotificationBadge";
import { StaticNotificationState } from "../../../stores/notifications/StaticNotificationState";
import { NotificationColor } from "../../../stores/notifications/NotificationColor";
import InlineSpinner from "../elements/InlineSpinner";
import { PlaybackManager } from "../../../audio/PlaybackManager";
interface IProps {
room: Room;
}
interface IState {
recorder?: VoiceRecording;
recordingPhase?: RecordingState;
didUploadFail?: boolean;
}
/**
* Container tile for rendering the voice message recorder in the composer.
*/
@replaceableComponent("views.rooms.VoiceRecordComposerTile")
export default class VoiceRecordComposerTile extends React.PureComponent<IProps, IState> {
public constructor(props) {
super(props);
this.state = {
recorder: null, // no recording started by default
};
}
public componentDidMount() {
const recorder = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
if (recorder) {
if (recorder.isRecording || !recorder.hasRecording) {
logger.warn("Cached recording hasn't ended yet and might cause issues");
}
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Ended });
}
}
public async componentWillUnmount() {
// Stop recording, but keep the recording memory (don't dispose it). This is to let the user
// come back and finish working with it.
const recording = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
await recording?.stop();
// Clean up our listeners by binding a falsy recorder
this.bindNewRecorder(null);
}
// called by composer
public async send() {
if (!this.state.recorder) {
throw new Error("No recording started - cannot send anything");
}
await this.state.recorder.stop();
let upload: IUpload;
try {
upload = await this.state.recorder.upload(this.props.room.roomId);
} catch (e) {
logger.error("Error uploading voice message:", e);
// Flag error and move on. The recording phase will be reset by the upload function.
this.setState({ didUploadFail: true });
return; // don't dispose the recording: the user has a chance to re-upload
}
try {
// noinspection ES6MissingAwait - we don't care if it fails, it'll get queued.
MatrixClientPeg.get().sendMessage(this.props.room.roomId, {
"body": "Voice message",
//"msgtype": "org.matrix.msc2516.voice",
"msgtype": MsgType.Audio,
"url": upload.mxc,
"file": upload.encrypted,
"info": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
// MSC1767 + Ideals of MSC2516 as MSC3245
// https://github.com/matrix-org/matrix-doc/pull/3245
"org.matrix.msc1767.text": "Voice message",
"org.matrix.msc1767.file": {
url: upload.mxc,
file: upload.encrypted,
name: "Voice message.ogg",
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
"org.matrix.msc1767.audio": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
// https://github.com/matrix-org/matrix-doc/pull/3246
waveform: this.state.recorder.getPlayback().thumbnailWaveform.map(v => Math.round(v * 1024)),
},
"org.matrix.msc3245.voice": {}, // No content, this is a rendering hint
});
} catch (e) {
logger.error("Error sending voice message:", e);
// Voice message should be in the timeline at this point, so let other things take care
// of error handling. We also shouldn't need the recording anymore, so fall through to
// disposal.
}
await this.disposeRecording();
}
private async disposeRecording() {
await VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
// Reset back to no recording, which means no phase (ie: restart component entirely)
this.setState({ recorder: null, recordingPhase: null, didUploadFail: false });
}
private onCancel = async () => {
await this.disposeRecording();
};
public onRecordStartEndClick = async () => {
if (this.state.recorder) {
await this.state.recorder.stop();
return;
}
// The "microphone access error" dialogs are used a lot, so let's functionify them
const accessError = () => {
Modal.createTrackedDialog('Microphone Access Error', '', ErrorDialog, {
title: _t("Unable to access your microphone"),
description: <>
<p>{ _t(
"We were unable to access your microphone. Please check your browser settings and try again.",
) }</p>
</>,
});
};
// Do a sanity test to ensure we're about to grab a valid microphone reference. Things might
// change between this and recording, but at least we will have tried.
try {
const devices = await MediaDeviceHandler.getDevices();
if (!devices?.[MediaDeviceKindEnum.AudioInput]?.length) {
Modal.createTrackedDialog('No Microphone Error', '', ErrorDialog, {
title: _t("No microphone found"),
description: <>
<p>{ _t(
"We didn't find a microphone on your device. Please check your settings and try again.",
) }</p>
</>,
});
return;
}
// else we probably have a device that is good enough
} catch (e) {
logger.error("Error getting devices: ", e);
accessError();
return;
}
try {
// stop any noises which might be happening
await PlaybackManager.instance.pauseAllExcept(null);
const recorder = VoiceRecordingStore.instance.startRecording(this.props.room.roomId);
await recorder.start();
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Started });
} catch (e) {
logger.error("Error starting recording: ", e);
accessError();
// noinspection ES6MissingAwait - if this goes wrong we don't want it to affect the call stack
VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
}
};
private bindNewRecorder(recorder: Optional<VoiceRecording>) {
if (this.state.recorder) {
this.state.recorder.off(UPDATE_EVENT, this.onRecordingUpdate);
}
if (recorder) {
recorder.on(UPDATE_EVENT, this.onRecordingUpdate);
}
}
private onRecordingUpdate = (ev: RecordingState) => {
if (ev === RecordingState.EndingSoon) return; // ignore this state: it has no UI purpose here
this.setState({ recordingPhase: ev });
};
private renderWaveformArea(): ReactNode {
if (!this.state.recorder) return null; // no recorder means we're not recording: no waveform
if (this.state.recordingPhase !== RecordingState.Started) {
return <RecordingPlayback playback={this.state.recorder.getPlayback()} />;
}
// only other UI is the recording-in-progress UI
return <div className="mx_MediaBody mx_VoiceMessagePrimaryContainer mx_VoiceRecordComposerTile_recording">
<LiveRecordingClock recorder={this.state.recorder} />
<LiveRecordingWaveform recorder={this.state.recorder} />
</div>;
}
public render(): ReactNode {
if (!this.state.recordingPhase) return null;
let stopBtn;
let deleteButton;
if (this.state.recordingPhase === RecordingState.Started) {
let tooltip = _t("Send voice message");
if (!!this.state.recorder) {
tooltip = _t("Stop recording");
}
stopBtn = <AccessibleTooltipButton
className="mx_VoiceRecordComposerTile_stop"
onClick={this.onRecordStartEndClick}
title={tooltip}
/>;
if (this.state.recorder && !this.state.recorder?.isRecording) {
stopBtn = null;
}
}
if (this.state.recorder && this.state.recordingPhase !== RecordingState.Uploading) {
deleteButton = <AccessibleTooltipButton
className='mx_VoiceRecordComposerTile_delete'
title={_t("Delete")}
onClick={this.onCancel}
/>;
}
let uploadIndicator;
if (this.state.recordingPhase === RecordingState.Uploading) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_uploadingState'>
<InlineSpinner w={16} h={16} />
</span>;
} else if (this.state.didUploadFail && this.state.recordingPhase === RecordingState.Ended) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_failedState'>
<span className='mx_VoiceRecordComposerTile_uploadState_badge'>
{ /* Need to stick the badge in a span to ensure it doesn't create a block component */ }
<NotificationBadge
notification={StaticNotificationState.forSymbol("!", NotificationColor.Red)}
/>
</span>
<span className='text-warning'>{ _t("Failed to send") }</span>
</span>;
}
return (<>
{ uploadIndicator }
{ deleteButton }
{ stopBtn }
{ this.renderWaveformArea() }
</>); | } | } | random_line_split |
VoiceRecordComposerTile.tsx | /*
Copyright 2021 - 2022 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React, { ReactNode } from "react";
import { Room } from "matrix-js-sdk/src/models/room";
import { MsgType } from "matrix-js-sdk/src/@types/event";
import { logger } from "matrix-js-sdk/src/logger";
import { Optional } from "matrix-events-sdk";
import AccessibleTooltipButton from "../elements/AccessibleTooltipButton";
import { _t } from "../../../languageHandler";
import { IUpload, RecordingState, VoiceRecording } from "../../../audio/VoiceRecording";
import { MatrixClientPeg } from "../../../MatrixClientPeg";
import LiveRecordingWaveform from "../audio_messages/LiveRecordingWaveform";
import { replaceableComponent } from "../../../utils/replaceableComponent";
import LiveRecordingClock from "../audio_messages/LiveRecordingClock";
import { VoiceRecordingStore } from "../../../stores/VoiceRecordingStore";
import { UPDATE_EVENT } from "../../../stores/AsyncStore";
import RecordingPlayback from "../audio_messages/RecordingPlayback";
import Modal from "../../../Modal";
import ErrorDialog from "../dialogs/ErrorDialog";
import MediaDeviceHandler, { MediaDeviceKindEnum } from "../../../MediaDeviceHandler";
import NotificationBadge from "./NotificationBadge";
import { StaticNotificationState } from "../../../stores/notifications/StaticNotificationState";
import { NotificationColor } from "../../../stores/notifications/NotificationColor";
import InlineSpinner from "../elements/InlineSpinner";
import { PlaybackManager } from "../../../audio/PlaybackManager";
interface IProps {
room: Room;
}
interface IState {
recorder?: VoiceRecording;
recordingPhase?: RecordingState;
didUploadFail?: boolean;
}
/**
* Container tile for rendering the voice message recorder in the composer.
*/
@replaceableComponent("views.rooms.VoiceRecordComposerTile")
export default class VoiceRecordComposerTile extends React.PureComponent<IProps, IState> {
public constructor(props) {
super(props);
this.state = {
recorder: null, // no recording started by default
};
}
public componentDidMount() {
const recorder = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
if (recorder) {
if (recorder.isRecording || !recorder.hasRecording) {
logger.warn("Cached recording hasn't ended yet and might cause issues");
}
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Ended });
}
}
public async componentWillUnmount() {
// Stop recording, but keep the recording memory (don't dispose it). This is to let the user
// come back and finish working with it.
const recording = VoiceRecordingStore.instance.getActiveRecording(this.props.room.roomId);
await recording?.stop();
// Clean up our listeners by binding a falsy recorder
this.bindNewRecorder(null);
}
// called by composer
public async send() {
if (!this.state.recorder) {
throw new Error("No recording started - cannot send anything");
}
await this.state.recorder.stop();
let upload: IUpload;
try {
upload = await this.state.recorder.upload(this.props.room.roomId);
} catch (e) {
logger.error("Error uploading voice message:", e);
// Flag error and move on. The recording phase will be reset by the upload function.
this.setState({ didUploadFail: true });
return; // don't dispose the recording: the user has a chance to re-upload
}
try {
// noinspection ES6MissingAwait - we don't care if it fails, it'll get queued.
MatrixClientPeg.get().sendMessage(this.props.room.roomId, {
"body": "Voice message",
//"msgtype": "org.matrix.msc2516.voice",
"msgtype": MsgType.Audio,
"url": upload.mxc,
"file": upload.encrypted,
"info": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
// MSC1767 + Ideals of MSC2516 as MSC3245
// https://github.com/matrix-org/matrix-doc/pull/3245
"org.matrix.msc1767.text": "Voice message",
"org.matrix.msc1767.file": {
url: upload.mxc,
file: upload.encrypted,
name: "Voice message.ogg",
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
"org.matrix.msc1767.audio": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
// https://github.com/matrix-org/matrix-doc/pull/3246
waveform: this.state.recorder.getPlayback().thumbnailWaveform.map(v => Math.round(v * 1024)),
},
"org.matrix.msc3245.voice": {}, // No content, this is a rendering hint
});
} catch (e) {
logger.error("Error sending voice message:", e);
// Voice message should be in the timeline at this point, so let other things take care
// of error handling. We also shouldn't need the recording anymore, so fall through to
// disposal.
}
await this.disposeRecording();
}
private async disposeRecording() {
await VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
// Reset back to no recording, which means no phase (ie: restart component entirely)
this.setState({ recorder: null, recordingPhase: null, didUploadFail: false });
}
private onCancel = async () => {
await this.disposeRecording();
};
public onRecordStartEndClick = async () => {
if (this.state.recorder) {
await this.state.recorder.stop();
return;
}
// The "microphone access error" dialogs are used a lot, so let's functionify them
const accessError = () => {
Modal.createTrackedDialog('Microphone Access Error', '', ErrorDialog, {
title: _t("Unable to access your microphone"),
description: <>
<p>{ _t(
"We were unable to access your microphone. Please check your browser settings and try again.",
) }</p>
</>,
});
};
// Do a sanity test to ensure we're about to grab a valid microphone reference. Things might
// change between this and recording, but at least we will have tried.
try {
const devices = await MediaDeviceHandler.getDevices();
if (!devices?.[MediaDeviceKindEnum.AudioInput]?.length) {
Modal.createTrackedDialog('No Microphone Error', '', ErrorDialog, {
title: _t("No microphone found"),
description: <>
<p>{ _t(
"We didn't find a microphone on your device. Please check your settings and try again.",
) }</p>
</>,
});
return;
}
// else we probably have a device that is good enough
} catch (e) {
logger.error("Error getting devices: ", e);
accessError();
return;
}
try {
// stop any noises which might be happening
await PlaybackManager.instance.pauseAllExcept(null);
const recorder = VoiceRecordingStore.instance.startRecording(this.props.room.roomId);
await recorder.start();
this.bindNewRecorder(recorder);
this.setState({ recorder, recordingPhase: RecordingState.Started });
} catch (e) {
logger.error("Error starting recording: ", e);
accessError();
// noinspection ES6MissingAwait - if this goes wrong we don't want it to affect the call stack
VoiceRecordingStore.instance.disposeRecording(this.props.room.roomId);
}
};
private bindNewRecorder(recorder: Optional<VoiceRecording>) {
if (this.state.recorder) {
this.state.recorder.off(UPDATE_EVENT, this.onRecordingUpdate);
}
if (recorder) {
recorder.on(UPDATE_EVENT, this.onRecordingUpdate);
}
}
private onRecordingUpdate = (ev: RecordingState) => {
if (ev === RecordingState.EndingSoon) return; // ignore this state: it has no UI purpose here
this.setState({ recordingPhase: ev });
};
private renderWaveformArea(): ReactNode {
if (!this.state.recorder) return null; // no recorder means we're not recording: no waveform
if (this.state.recordingPhase !== RecordingState.Started) |
// only other UI is the recording-in-progress UI
return <div className="mx_MediaBody mx_VoiceMessagePrimaryContainer mx_VoiceRecordComposerTile_recording">
<LiveRecordingClock recorder={this.state.recorder} />
<LiveRecordingWaveform recorder={this.state.recorder} />
</div>;
}
public render(): ReactNode {
if (!this.state.recordingPhase) return null;
let stopBtn;
let deleteButton;
if (this.state.recordingPhase === RecordingState.Started) {
let tooltip = _t("Send voice message");
if (!!this.state.recorder) {
tooltip = _t("Stop recording");
}
stopBtn = <AccessibleTooltipButton
className="mx_VoiceRecordComposerTile_stop"
onClick={this.onRecordStartEndClick}
title={tooltip}
/>;
if (this.state.recorder && !this.state.recorder?.isRecording) {
stopBtn = null;
}
}
if (this.state.recorder && this.state.recordingPhase !== RecordingState.Uploading) {
deleteButton = <AccessibleTooltipButton
className='mx_VoiceRecordComposerTile_delete'
title={_t("Delete")}
onClick={this.onCancel}
/>;
}
let uploadIndicator;
if (this.state.recordingPhase === RecordingState.Uploading) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_uploadingState'>
<InlineSpinner w={16} h={16} />
</span>;
} else if (this.state.didUploadFail && this.state.recordingPhase === RecordingState.Ended) {
uploadIndicator = <span className='mx_VoiceRecordComposerTile_failedState'>
<span className='mx_VoiceRecordComposerTile_uploadState_badge'>
{ /* Need to stick the badge in a span to ensure it doesn't create a block component */ }
<NotificationBadge
notification={StaticNotificationState.forSymbol("!", NotificationColor.Red)}
/>
</span>
<span className='text-warning'>{ _t("Failed to send") }</span>
</span>;
}
return (<>
{ uploadIndicator }
{ deleteButton }
{ stopBtn }
{ this.renderWaveformArea() }
</>);
}
}
| {
return <RecordingPlayback playback={this.state.recorder.getPlayback()} />;
} | conditional_block |
index.ts | import AssertAgainstNamedBlocks from './assert-against-named-blocks';
import AssertIfHelperWithoutArguments from './assert-if-helper-without-arguments'; | import TransformActionSyntax from './transform-action-syntax';
import TransformAttrsIntoArgs from './transform-attrs-into-args';
import TransformComponentInvocation from './transform-component-invocation';
import TransformEachInIntoEach from './transform-each-in-into-each';
import TransformEachTrackArray from './transform-each-track-array';
import TransformHasBlockSyntax from './transform-has-block-syntax';
import TransformInElement from './transform-in-element';
import TransformLinkTo from './transform-link-to';
import TransformOldClassBindingSyntax from './transform-old-class-binding-syntax';
import TransformQuotedBindingsIntoJustBindings from './transform-quoted-bindings-into-just-bindings';
import TransformWrapMountAndOutlet from './transform-wrap-mount-and-outlet';
import { EMBER_NAMED_BLOCKS } from '@ember/canary-features';
import { SEND_ACTION } from '@ember/deprecated-features';
// order of plugins is important
const transforms = [
TransformComponentInvocation,
TransformOldClassBindingSyntax,
TransformQuotedBindingsIntoJustBindings,
AssertReservedNamedArguments,
TransformActionSyntax,
TransformAttrsIntoArgs,
TransformEachInIntoEach,
TransformHasBlockSyntax,
AssertLocalVariableShadowingHelperInvocation,
TransformLinkTo,
AssertInputHelperWithoutBlock,
TransformInElement,
AssertIfHelperWithoutArguments,
AssertSplattributeExpressions,
TransformEachTrackArray,
TransformWrapMountAndOutlet,
];
if (SEND_ACTION) {
transforms.push(DeprecateSendAction);
}
if (!EMBER_NAMED_BLOCKS) {
transforms.push(AssertAgainstNamedBlocks);
}
export default Object.freeze(transforms); | import AssertInputHelperWithoutBlock from './assert-input-helper-without-block';
import AssertLocalVariableShadowingHelperInvocation from './assert-local-variable-shadowing-helper-invocation';
import AssertReservedNamedArguments from './assert-reserved-named-arguments';
import AssertSplattributeExpressions from './assert-splattribute-expression';
import DeprecateSendAction from './deprecate-send-action'; | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.