file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
gulpfile.js
var gulp = require('gulp'); var babel = require('gulp-babel'); var concat = require('gulp-concat'); var merge = require('merge-stream'); var stylus = require('gulp-stylus'); var rename = require("gulp-rename"); var uglify = require("gulp-uglify"); var cssmin = require("gulp-cssmin"); var ngAnnotate = require('gulp-ng-annotate'); var nib = require("nib"); var watch = require('gulp-watch'); function compileJs(devOnly) { var othersUmd = gulp.src(['src/**/*.js', '!src/main.js']) .pipe(babel({ modules: 'umdStrict', moduleRoot: 'angular-chatbar', moduleIds: true })), mainUmd = gulp.src('src/main.js') .pipe(babel({ modules: 'umdStrict', moduleIds: true, moduleId: 'angular-chatbar' })), stream = merge(othersUmd, mainUmd) .pipe(concat('angular-chatbar.umd.js')) .pipe(gulp.dest('dist')) ; if (!devOnly) { stream = stream .pipe(ngAnnotate()) .pipe(uglify()) .pipe(rename('angular-chatbar.umd.min.js')) .pipe(gulp.dest('dist')); } return stream; } function compileCss(name, devOnly) { var stream = gulp.src('styles/' + name + '.styl') .pipe(stylus({use: nib()})) .pipe(rename('angular-' + name + '.css')) .pipe(gulp.dest('dist')) ;
if (!devOnly) { stream = stream.pipe(cssmin()) .pipe(rename('angular-' + name + '.min.css')) .pipe(gulp.dest('dist')); } return stream; } function compileAllCss(devOnly) { var streams = []; ['chatbar', 'chatbar.default-theme', 'chatbar.default-animations'].forEach(function (name) { streams.push(compileCss(name, devOnly)); }); return merge.apply(null, streams); } gulp.task('default', function() { return merge.apply(compileJs(), compileAllCss()); }); gulp.task('_watch', function() { watch('styles/**/*.styl', function () { compileAllCss(true); }); watch('src/**/*.js', function () { compileJs(true); }); }); gulp.task('watch', ['default', '_watch']);
random_line_split
gulpfile.js
var gulp = require('gulp'); var babel = require('gulp-babel'); var concat = require('gulp-concat'); var merge = require('merge-stream'); var stylus = require('gulp-stylus'); var rename = require("gulp-rename"); var uglify = require("gulp-uglify"); var cssmin = require("gulp-cssmin"); var ngAnnotate = require('gulp-ng-annotate'); var nib = require("nib"); var watch = require('gulp-watch'); function compileJs(devOnly) { var othersUmd = gulp.src(['src/**/*.js', '!src/main.js']) .pipe(babel({ modules: 'umdStrict', moduleRoot: 'angular-chatbar', moduleIds: true })), mainUmd = gulp.src('src/main.js') .pipe(babel({ modules: 'umdStrict', moduleIds: true, moduleId: 'angular-chatbar' })), stream = merge(othersUmd, mainUmd) .pipe(concat('angular-chatbar.umd.js')) .pipe(gulp.dest('dist')) ; if (!devOnly) { stream = stream .pipe(ngAnnotate()) .pipe(uglify()) .pipe(rename('angular-chatbar.umd.min.js')) .pipe(gulp.dest('dist')); } return stream; } function compileCss(name, devOnly)
function compileAllCss(devOnly) { var streams = []; ['chatbar', 'chatbar.default-theme', 'chatbar.default-animations'].forEach(function (name) { streams.push(compileCss(name, devOnly)); }); return merge.apply(null, streams); } gulp.task('default', function() { return merge.apply(compileJs(), compileAllCss()); }); gulp.task('_watch', function() { watch('styles/**/*.styl', function () { compileAllCss(true); }); watch('src/**/*.js', function () { compileJs(true); }); }); gulp.task('watch', ['default', '_watch']);
{ var stream = gulp.src('styles/' + name + '.styl') .pipe(stylus({use: nib()})) .pipe(rename('angular-' + name + '.css')) .pipe(gulp.dest('dist')) ; if (!devOnly) { stream = stream.pipe(cssmin()) .pipe(rename('angular-' + name + '.min.css')) .pipe(gulp.dest('dist')); } return stream; }
identifier_body
gulpfile.js
var gulp = require('gulp'); var babel = require('gulp-babel'); var concat = require('gulp-concat'); var merge = require('merge-stream'); var stylus = require('gulp-stylus'); var rename = require("gulp-rename"); var uglify = require("gulp-uglify"); var cssmin = require("gulp-cssmin"); var ngAnnotate = require('gulp-ng-annotate'); var nib = require("nib"); var watch = require('gulp-watch'); function compileJs(devOnly) { var othersUmd = gulp.src(['src/**/*.js', '!src/main.js']) .pipe(babel({ modules: 'umdStrict', moduleRoot: 'angular-chatbar', moduleIds: true })), mainUmd = gulp.src('src/main.js') .pipe(babel({ modules: 'umdStrict', moduleIds: true, moduleId: 'angular-chatbar' })), stream = merge(othersUmd, mainUmd) .pipe(concat('angular-chatbar.umd.js')) .pipe(gulp.dest('dist')) ; if (!devOnly) { stream = stream .pipe(ngAnnotate()) .pipe(uglify()) .pipe(rename('angular-chatbar.umd.min.js')) .pipe(gulp.dest('dist')); } return stream; } function compileCss(name, devOnly) { var stream = gulp.src('styles/' + name + '.styl') .pipe(stylus({use: nib()})) .pipe(rename('angular-' + name + '.css')) .pipe(gulp.dest('dist')) ; if (!devOnly)
return stream; } function compileAllCss(devOnly) { var streams = []; ['chatbar', 'chatbar.default-theme', 'chatbar.default-animations'].forEach(function (name) { streams.push(compileCss(name, devOnly)); }); return merge.apply(null, streams); } gulp.task('default', function() { return merge.apply(compileJs(), compileAllCss()); }); gulp.task('_watch', function() { watch('styles/**/*.styl', function () { compileAllCss(true); }); watch('src/**/*.js', function () { compileJs(true); }); }); gulp.task('watch', ['default', '_watch']);
{ stream = stream.pipe(cssmin()) .pipe(rename('angular-' + name + '.min.css')) .pipe(gulp.dest('dist')); }
conditional_block
category.component.ts
/** * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { Component, Input, OnInit } from '@angular/core'; import { CategoryDetails } from '../store.service'; @Component({ selector: 'app-category', templateUrl: './category.component.html', styleUrls: ['./category.component.scss'], }) export class CategoryComponent implements OnInit { @Input() category!: CategoryDetails;
() {} ngOnInit(): void {} }
constructor
identifier_name
category.component.ts
/** * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,
* limitations under the License. */ import { Component, Input, OnInit } from '@angular/core'; import { CategoryDetails } from '../store.service'; @Component({ selector: 'app-category', templateUrl: './category.component.html', styleUrls: ['./category.component.scss'], }) export class CategoryComponent implements OnInit { @Input() category!: CategoryDetails; constructor() {} ngOnInit(): void {} }
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and
random_line_split
category.component.ts
/** * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { Component, Input, OnInit } from '@angular/core'; import { CategoryDetails } from '../store.service'; @Component({ selector: 'app-category', templateUrl: './category.component.html', styleUrls: ['./category.component.scss'], }) export class CategoryComponent implements OnInit { @Input() category!: CategoryDetails; constructor() {} ngOnInit(): void
}
{}
identifier_body
test_GLM2_syn_2659x1049.py
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_glm, h2o_import as h2i params = { 'response': 1049, 'family': 'binomial', 'beta_epsilon': 0.0001, 'alpha': 1.0, 'lambda': 1e-05, 'n_folds': 1, 'max_iter': 20, } class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(1) @classmethod def
(cls): h2o.tear_down_cloud() def test_GLM2_syn_2659x1049(self): csvFilename = "syn_2659x1049.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=120, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) def test_GLM2_syn_2659x1049x2enum(self): csvFilename = "syn_2659x1049x2enum.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=240, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) if __name__ == '__main__': h2o.unit_main()
tearDownClass
identifier_name
test_GLM2_syn_2659x1049.py
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_glm, h2o_import as h2i params = { 'response': 1049, 'family': 'binomial', 'beta_epsilon': 0.0001, 'alpha': 1.0, 'lambda': 1e-05, 'n_folds': 1, 'max_iter': 20, } class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(1) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GLM2_syn_2659x1049(self): csvFilename = "syn_2659x1049.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=120, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) def test_GLM2_syn_2659x1049x2enum(self): csvFilename = "syn_2659x1049x2enum.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=240, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) if __name__ == '__main__':
h2o.unit_main()
conditional_block
test_GLM2_syn_2659x1049.py
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_glm, h2o_import as h2i params = { 'response': 1049, 'family': 'binomial', 'beta_epsilon': 0.0001, 'alpha': 1.0, 'lambda': 1e-05, 'n_folds': 1, 'max_iter': 20, } class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(1) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GLM2_syn_2659x1049(self): csvFilename = "syn_2659x1049.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=120, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) def test_GLM2_syn_2659x1049x2enum(self):
if __name__ == '__main__': h2o.unit_main()
csvFilename = "syn_2659x1049x2enum.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=240, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
identifier_body
test_GLM2_syn_2659x1049.py
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_glm, h2o_import as h2i params = { 'response': 1049, 'family': 'binomial', 'beta_epsilon': 0.0001, 'alpha': 1.0, 'lambda': 1e-05, 'n_folds': 1, 'max_iter': 20, } class Basic(unittest.TestCase):
@classmethod def setUpClass(cls): h2o.init(1) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GLM2_syn_2659x1049(self): csvFilename = "syn_2659x1049.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=120, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) def test_GLM2_syn_2659x1049x2enum(self): csvFilename = "syn_2659x1049x2enum.csv" csvPathname = 'logreg' + '/' + csvFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put') kwargs = params glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=240, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) if __name__ == '__main__': h2o.unit_main()
def tearDown(self): h2o.check_sandbox_for_errors()
random_line_split
settings.py
""" Django settings for busquecursos project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'hb&=!izzysndvyjd_i@2pdx^d&px8ty%1g3#&%l$k))lpo(dvf' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'website', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'busquecursos.urls' WSGI_APPLICATION = 'busquecursos.wsgi.application'
DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
# Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases
random_line_split
shootout-spectralnorm.rs
use core::from_str::FromStr; use core::iter::ExtendedMutableIter; #[inline] fn A(i: i32, j: i32) -> i32 { (i+j) * (i+j+1) / 2 + i + 1 } fn dot(v: &[f64], u: &[f64]) -> f64 { let mut sum = 0.0; for v.eachi |i, &v_i| { sum += v_i * u[i]; } sum } fn mult_Av(v: &mut [f64], out: &mut [f64]) { for vec::eachi_mut(out) |i, out_i| { let mut sum = 0.0; for vec::eachi_mut(v) |j, &v_j| { sum += v_j / (A(i as i32, j as i32) as f64); } *out_i = sum; } } fn mult_Atv(v: &mut [f64], out: &mut [f64]) { for vec::eachi_mut(out) |i, out_i| { let mut sum = 0.0; for vec::eachi_mut(v) |j, &v_j| { sum += v_j / (A(j as i32, i as i32) as f64); } *out_i = sum; } } fn mult_AtAv(v: &mut [f64], out: &mut [f64], tmp: &mut [f64]) { mult_Av(v, tmp); mult_Atv(tmp, out); } #[fixed_stack_segment] fn main() { let n: uint = FromStr::from_str(os::args()[1]).get(); let mut u = vec::from_elem(n, 1f64), v = u.clone(), tmp = u.clone();
} println(fmt!("%.9f", f64::sqrt(dot(u,v) / dot(v,v)) as float)); }
for 8.times { mult_AtAv(u, v, tmp); mult_AtAv(v, u, tmp);
random_line_split
shootout-spectralnorm.rs
use core::from_str::FromStr; use core::iter::ExtendedMutableIter; #[inline] fn A(i: i32, j: i32) -> i32 { (i+j) * (i+j+1) / 2 + i + 1 } fn dot(v: &[f64], u: &[f64]) -> f64 { let mut sum = 0.0; for v.eachi |i, &v_i| { sum += v_i * u[i]; } sum } fn mult_Av(v: &mut [f64], out: &mut [f64]) { for vec::eachi_mut(out) |i, out_i| { let mut sum = 0.0; for vec::eachi_mut(v) |j, &v_j| { sum += v_j / (A(i as i32, j as i32) as f64); } *out_i = sum; } } fn mult_Atv(v: &mut [f64], out: &mut [f64]) { for vec::eachi_mut(out) |i, out_i| { let mut sum = 0.0; for vec::eachi_mut(v) |j, &v_j| { sum += v_j / (A(j as i32, i as i32) as f64); } *out_i = sum; } } fn
(v: &mut [f64], out: &mut [f64], tmp: &mut [f64]) { mult_Av(v, tmp); mult_Atv(tmp, out); } #[fixed_stack_segment] fn main() { let n: uint = FromStr::from_str(os::args()[1]).get(); let mut u = vec::from_elem(n, 1f64), v = u.clone(), tmp = u.clone(); for 8.times { mult_AtAv(u, v, tmp); mult_AtAv(v, u, tmp); } println(fmt!("%.9f", f64::sqrt(dot(u,v) / dot(v,v)) as float)); }
mult_AtAv
identifier_name
shootout-spectralnorm.rs
use core::from_str::FromStr; use core::iter::ExtendedMutableIter; #[inline] fn A(i: i32, j: i32) -> i32
fn dot(v: &[f64], u: &[f64]) -> f64 { let mut sum = 0.0; for v.eachi |i, &v_i| { sum += v_i * u[i]; } sum } fn mult_Av(v: &mut [f64], out: &mut [f64]) { for vec::eachi_mut(out) |i, out_i| { let mut sum = 0.0; for vec::eachi_mut(v) |j, &v_j| { sum += v_j / (A(i as i32, j as i32) as f64); } *out_i = sum; } } fn mult_Atv(v: &mut [f64], out: &mut [f64]) { for vec::eachi_mut(out) |i, out_i| { let mut sum = 0.0; for vec::eachi_mut(v) |j, &v_j| { sum += v_j / (A(j as i32, i as i32) as f64); } *out_i = sum; } } fn mult_AtAv(v: &mut [f64], out: &mut [f64], tmp: &mut [f64]) { mult_Av(v, tmp); mult_Atv(tmp, out); } #[fixed_stack_segment] fn main() { let n: uint = FromStr::from_str(os::args()[1]).get(); let mut u = vec::from_elem(n, 1f64), v = u.clone(), tmp = u.clone(); for 8.times { mult_AtAv(u, v, tmp); mult_AtAv(v, u, tmp); } println(fmt!("%.9f", f64::sqrt(dot(u,v) / dot(v,v)) as float)); }
{ (i+j) * (i+j+1) / 2 + i + 1 }
identifier_body
withLatestFrom.d.ts
import { Observable, ObservableInput } from '../Observable'; /** * Combines the source Observable with other Observables to create an Observable * whose values are calculated from the latest values of each, only when the * source emits. * * <span class="informal">Whenever the source Observable emits a value, it
* `withLatestFrom` combines each value from the source Observable (the * instance) with the latest values from the other input Observables only when * the source emits a value, optionally using a `project` function to determine * the value to be emitted on the output Observable. All input Observables must * emit at least one value before the output Observable will emit a value. * * @example <caption>On every click event, emit an array with the latest timer event plus the click event</caption> * var clicks = Rx.Observable.fromEvent(document, 'click'); * var timer = Rx.Observable.interval(1000); * var result = clicks.withLatestFrom(timer); * result.subscribe(x => console.log(x)); * * @see {@link combineLatest} * * @param {Observable} other An input Observable to combine with the source * Observable. More than one input Observables may be given as argument. * @param {Function} [project] Projection function for combining values * together. Receives all values in order of the Observables passed, where the * first parameter is a value from the source Observable. (e.g. * `a.withLatestFrom(b, c, (a1, b1, c1) => a1 + b1 + c1)`). If this is not * passed, arrays will be emitted on the output Observable. * @return {Observable} An Observable of projected values from the most recent * values from each input Observable, or an array of the most recent values from * each input Observable. * @method withLatestFrom * @owner Observable */ export declare function withLatestFrom<T, R>(...args: Array<ObservableInput<any> | ((...values: Array<any>) => R)>): Observable<R>; export interface WithLatestFromSignature<T> { <R>(project: (v1: T) => R): Observable<R>; <T2, R>(v2: ObservableInput<T2>, project: (v1: T, v2: T2) => R): Observable<R>; <T2, T3, R>(v2: ObservableInput<T2>, v3: ObservableInput<T3>, project: (v1: T, v2: T2, v3: T3) => R): Observable<R>; <T2, T3, T4, R>(v2: ObservableInput<T2>, v3: ObservableInput<T3>, v4: ObservableInput<T4>, project: (v1: T, v2: T2, v3: T3, v4: T4) => R): Observable<R>; <T2, T3, T4, T5, R>(v2: ObservableInput<T2>, v3: ObservableInput<T3>, v4: ObservableInput<T4>, v5: ObservableInput<T5>, project: (v1: T, v2: T2, v3: T3, v4: T4, v5: T5) => R): Observable<R>; <T2, T3, T4, T5, T6, R>(v2: ObservableInput<T2>, v3: ObservableInput<T3>, v4: ObservableInput<T4>, v5: ObservableInput<T5>, v6: ObservableInput<T6>, project: (v1: T, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6) => R): Observable<R>; <T2>(v2: ObservableInput<T2>): Observable<[T, T2]>; <T2, T3>(v2: ObservableInput<T2>, v3: ObservableInput<T3>): Observable<[T, T2, T3]>; <T2, T3, T4>(v2: ObservableInput<T2>, v3: ObservableInput<T3>, v4: ObservableInput<T4>): Observable<[T, T2, T3, T4]>; <T2, T3, T4, T5>(v2: ObservableInput<T2>, v3: ObservableInput<T3>, v4: ObservableInput<T4>, v5: ObservableInput<T5>): Observable<[T, T2, T3, T4, T5]>; <T2, T3, T4, T5, T6>(v2: ObservableInput<T2>, v3: ObservableInput<T3>, v4: ObservableInput<T4>, v5: ObservableInput<T5>, v6: ObservableInput<T6>): Observable<[T, T2, T3, T4, T5, T6]>; <R>(...observables: Array<ObservableInput<any> | ((...values: Array<any>) => R)>): Observable<R>; <R>(array: ObservableInput<any>[]): Observable<R>; <R>(array: ObservableInput<any>[], project: (...values: Array<any>) => R): Observable<R>; }
* computes a formula using that value plus the latest values from other input * Observables, then emits the output of that formula.</span> * * <img src="./img/withLatestFrom.png" width="100%"> *
random_line_split
run_finetuning.py
from finetune import preprocessing from finetune import task_builder from model import modeling from model import optimization from util import training_utils from util import utils class FinetuningModel(object): """Finetuning model with support for multi-task training.""" def __init__(self, config: configure_finetuning.FinetuningConfig, tasks, is_training, features, num_train_steps): # Create a shared transformer encoder bert_config = training_utils.get_bert_config(config) self.bert_config = bert_config if config.debug: bert_config.num_hidden_layers = 3 bert_config.hidden_size = 144 bert_config.intermediate_size = 144 * 4 bert_config.num_attention_heads = 4 assert config.max_seq_length <= bert_config.max_position_embeddings bert_model = modeling.BertModel( bert_config=bert_config, is_training=is_training, input_ids=features["input_ids"], input_mask=features["input_mask"], token_type_ids=features["segment_ids"], use_one_hot_embeddings=config.use_tpu, embedding_size=config.embedding_size) percent_done = (tf.cast(tf.train.get_or_create_global_step(), tf.float32) / tf.cast(num_train_steps, tf.float32)) # Add specific tasks self.outputs = {"task_id": features["task_id"]} losses = [] for task in tasks: with tf.variable_scope("task_specific/" + task.name): task_losses, task_outputs = task.get_prediction_module( bert_model, features, is_training, percent_done) losses.append(task_losses) self.outputs[task.name] = task_outputs self.loss = tf.reduce_sum( tf.stack(losses, -1) * tf.one_hot(features["task_id"], len(config.task_names))) def model_fn_builder(config: configure_finetuning.FinetuningConfig, tasks, num_train_steps, pretraining_config=None): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" utils.log("Building model...") is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = FinetuningModel( config, tasks, is_training, features, num_train_steps) # Load pre-trained weights from checkpoint init_checkpoint = config.init_checkpoint if pretraining_config is not None: init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir) utils.log("Using checkpoint", init_checkpoint) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: assignment_map, _ = modeling.get_assignment_map_from_checkpoint( tvars, init_checkpoint) if config.use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) # Build model for training or prediction if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( model.loss, config.learning_rate, num_train_steps, weight_decay_rate=config.weight_decay_rate, use_tpu=config.use_tpu, warmup_proportion=config.warmup_proportion, layerwise_lr_decay_power=config.layerwise_lr_decay, n_transformer_layers=model.bert_config.num_hidden_layers ) output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=model.loss, train_op=train_op, scaffold_fn=scaffold_fn, training_hooks=[training_utils.ETAHook( {} if config.use_tpu else dict(loss=model.loss), num_train_steps, config.iterations_per_loop, config.use_tpu, 10)]) else: assert mode == tf.estimator.ModeKeys.PREDICT output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, predictions=utils.flatten_dict(model.outputs), scaffold_fn=scaffold_fn) utils.log("Building complete") return output_spec return model_fn class ModelRunner(object): """Fine-tunes a model on a supervised task.""" def __init__(self, config: configure_finetuning.FinetuningConfig, tasks, pretraining_config=None): self._config = config self._tasks = tasks self._preprocessor = preprocessing.Preprocessor(config, self._tasks) is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 tpu_cluster_resolver = None if config.use_tpu and config.tpu_name: tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( config.tpu_name, zone=config.tpu_zone, project=config.gcp_project) tpu_config = tf.estimator.tpu.TPUConfig( iterations_per_loop=config.iterations_per_loop, num_shards=config.num_tpu_cores, per_host_input_for_training=is_per_host, tpu_job_name=config.tpu_job_name) run_config = tf.estimator.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=config.model_dir, save_checkpoints_steps=config.save_checkpoints_steps, save_checkpoints_secs=None, tpu_config=tpu_config) if self._config.do_train: (self._train_input_fn, self.train_steps) = self._preprocessor.prepare_train() else: self._train_input_fn, self.train_steps = None, 0 model_fn = model_fn_builder( config=config, tasks=self._tasks, num_train_steps=self.train_steps, pretraining_config=pretraining_config) self._estimator = tf.estimator.tpu.TPUEstimator( use_tpu=config.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=config.train_batch_size, eval_batch_size=config.eval_batch_size, predict_batch_size=config.predict_batch_size) def train(self): utils.log("Training for {:} steps".format(self.train_steps)) self._estimator.train( input_fn=self._train_input_fn, max_steps=self.train_steps) def evaluate(self): return {task.name: self.evaluate_task(task) for task in self._tasks} def evaluate_task(self, task, split="dev", return_results=True): """Evaluate the current model.""" utils.log("Evaluating", task.name) eval_input_fn, _ = self._preprocessor.prepare_predict([task], split) results = self._estimator.predict(input_fn=eval_input_fn, yield_single_examples=True) scorer = task.get_scorer() for r in results: if r["task_id"] != len(self._tasks): # ignore padding examples r = utils.nest_dict(r, self._config.task_names) scorer.update(r[task.name]) if return_results: utils.log(task.name + ": " + scorer.results_str()) utils.log() return dict(scorer.get_results()) else: return scorer def write_classification_outputs(self, tasks, trial, split): """Write classification predictions to disk.""" utils.log("Writing out predictions for", tasks, split) predict_input_fn, _ = self._preprocessor.prepare_predict(tasks, split) results = self._estimator.predict(input_fn=predict_input_fn, yield_single_examples=True) # task name -> eid -> model-logits logits = collections.defaultdict(dict) for r in results: if r["task_id"] != len(self._tasks): r = utils.nest_dict(r, self._config.task_names) task_name = self._config.task_names[r["task_id"]] logits[task_name][r[task_name]["eid"]] = ( r[task_name]["logits"] if "logits" in r[task_name] else r[task_name]["predictions"]) for task_name in logits: utils.log("Pickling predictions for {:} {:} examples ({:})".format( len(logits[task_name]), task_name, split)) if trial <= self._config.n_writes_test: utils.write_pickle(logits[task_name], self._config.test_predictions( task_name, split, trial)) def write_results(config: configure_finetuning.FinetuningConfig, results): """Write evaluation metrics to disk.""" utils.log("Writing results to", config.results_txt) utils.mkdir(config.results_txt.rsplit("/", 1)[0]) utils.write_pickle(results, config.results_pkl) with tf.io.gfile.GFile(config.results_txt, "w") as f: results_str = "" for trial_results in results: for task_name, task_results in trial_results.items(): if task_name == "time" or task_name == "global_step": continue results_str += task_name + ": " + " - ".join( ["{:}: {:.2f}".format(k, v) for k, v in task_results.items()]) + "\n" f.write(results_str) utils.write_pickle(results, config.results_pkl) def run_finetuning(config: configure_finetuning.FinetuningConfig): """Run finetuning.""" # Setup for training results = [] trial = 1 heading_info = "model={:}, trial {:}/{:}".format( config.model_name, trial, config.num_trials) heading = lambda
import configure_finetuning
random_line_split
run_finetuning.py
(losses, -1) * tf.one_hot(features["task_id"], len(config.task_names))) def model_fn_builder(config: configure_finetuning.FinetuningConfig, tasks, num_train_steps, pretraining_config=None): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" utils.log("Building model...") is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = FinetuningModel( config, tasks, is_training, features, num_train_steps) # Load pre-trained weights from checkpoint init_checkpoint = config.init_checkpoint if pretraining_config is not None: init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir) utils.log("Using checkpoint", init_checkpoint) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: assignment_map, _ = modeling.get_assignment_map_from_checkpoint( tvars, init_checkpoint) if config.use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) # Build model for training or prediction if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( model.loss, config.learning_rate, num_train_steps, weight_decay_rate=config.weight_decay_rate, use_tpu=config.use_tpu, warmup_proportion=config.warmup_proportion, layerwise_lr_decay_power=config.layerwise_lr_decay, n_transformer_layers=model.bert_config.num_hidden_layers ) output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=model.loss, train_op=train_op, scaffold_fn=scaffold_fn, training_hooks=[training_utils.ETAHook( {} if config.use_tpu else dict(loss=model.loss), num_train_steps, config.iterations_per_loop, config.use_tpu, 10)]) else: assert mode == tf.estimator.ModeKeys.PREDICT output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, predictions=utils.flatten_dict(model.outputs), scaffold_fn=scaffold_fn) utils.log("Building complete") return output_spec return model_fn class ModelRunner(object): """Fine-tunes a model on a supervised task.""" def __init__(self, config: configure_finetuning.FinetuningConfig, tasks, pretraining_config=None): self._config = config self._tasks = tasks self._preprocessor = preprocessing.Preprocessor(config, self._tasks) is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 tpu_cluster_resolver = None if config.use_tpu and config.tpu_name: tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( config.tpu_name, zone=config.tpu_zone, project=config.gcp_project) tpu_config = tf.estimator.tpu.TPUConfig( iterations_per_loop=config.iterations_per_loop, num_shards=config.num_tpu_cores, per_host_input_for_training=is_per_host, tpu_job_name=config.tpu_job_name) run_config = tf.estimator.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=config.model_dir, save_checkpoints_steps=config.save_checkpoints_steps, save_checkpoints_secs=None, tpu_config=tpu_config) if self._config.do_train: (self._train_input_fn, self.train_steps) = self._preprocessor.prepare_train() else: self._train_input_fn, self.train_steps = None, 0 model_fn = model_fn_builder( config=config, tasks=self._tasks, num_train_steps=self.train_steps, pretraining_config=pretraining_config) self._estimator = tf.estimator.tpu.TPUEstimator( use_tpu=config.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=config.train_batch_size, eval_batch_size=config.eval_batch_size, predict_batch_size=config.predict_batch_size) def train(self): utils.log("Training for {:} steps".format(self.train_steps)) self._estimator.train( input_fn=self._train_input_fn, max_steps=self.train_steps) def evaluate(self): return {task.name: self.evaluate_task(task) for task in self._tasks} def evaluate_task(self, task, split="dev", return_results=True): """Evaluate the current model.""" utils.log("Evaluating", task.name) eval_input_fn, _ = self._preprocessor.prepare_predict([task], split) results = self._estimator.predict(input_fn=eval_input_fn, yield_single_examples=True) scorer = task.get_scorer() for r in results: if r["task_id"] != len(self._tasks): # ignore padding examples r = utils.nest_dict(r, self._config.task_names) scorer.update(r[task.name]) if return_results: utils.log(task.name + ": " + scorer.results_str()) utils.log() return dict(scorer.get_results()) else: return scorer def write_classification_outputs(self, tasks, trial, split): """Write classification predictions to disk.""" utils.log("Writing out predictions for", tasks, split) predict_input_fn, _ = self._preprocessor.prepare_predict(tasks, split) results = self._estimator.predict(input_fn=predict_input_fn, yield_single_examples=True) # task name -> eid -> model-logits logits = collections.defaultdict(dict) for r in results: if r["task_id"] != len(self._tasks): r = utils.nest_dict(r, self._config.task_names) task_name = self._config.task_names[r["task_id"]] logits[task_name][r[task_name]["eid"]] = ( r[task_name]["logits"] if "logits" in r[task_name] else r[task_name]["predictions"]) for task_name in logits: utils.log("Pickling predictions for {:} {:} examples ({:})".format( len(logits[task_name]), task_name, split)) if trial <= self._config.n_writes_test: utils.write_pickle(logits[task_name], self._config.test_predictions( task_name, split, trial)) def write_results(config: configure_finetuning.FinetuningConfig, results): """Write evaluation metrics to disk.""" utils.log("Writing results to", config.results_txt) utils.mkdir(config.results_txt.rsplit("/", 1)[0]) utils.write_pickle(results, config.results_pkl) with tf.io.gfile.GFile(config.results_txt, "w") as f: results_str = "" for trial_results in results: for task_name, task_results in trial_results.items(): if task_name == "time" or task_name == "global_step": continue results_str += task_name + ": " + " - ".join( ["{:}: {:.2f}".format(k, v) for k, v in task_results.items()]) + "\n" f.write(results_str) utils.write_pickle(results, config.results_pkl) def run_finetuning(config: configure_finetuning.FinetuningConfig): """Run finetuning.""" # Setup for training results = [] trial = 1 heading_info = "model={:}, trial {:}/{:}".format( config.model_name, trial, config.num_trials) heading = lambda msg: utils.heading(msg + ": " + heading_info) heading("Config") utils.log_config(config) generic_model_dir = config.model_dir tasks = task_builder.get_tasks(config) # Train and evaluate num_trials models with different random seeds while config.num_trials < 0 or trial <= config.num_trials: config.model_dir = generic_model_dir + "_" + str(trial) if config.do_train: utils.rmkdir(config.model_dir) model_runner = ModelRunner(config, tasks) if config.do_train: heading("Start training") model_runner.train() utils.log() if config.do_eval: heading("Run dev set evaluation") results.append(model_runner.evaluate()) write_results(config, results) if config.write_test_outputs and trial <= config.n_writes_test: heading("Running on the test set and writing the predictions") for task in tasks: # Currently only writing preds for GLUE and SQuAD 2.0 is supported if task.name in ["cola", "mrpc", "mnli", "sst", "rte", "qnli", "qqp", "sts"]: for split in task.get_test_splits(): model_runner.write_classification_outputs([task], trial, split) elif task.name == "squad":
scorer = model_runner.evaluate_task(task, "test", False) scorer.write_predictions() preds = utils.load_json(config.qa_preds_file("squad")) null_odds = utils.load_json(config.qa_na_file("squad")) for q, _ in preds.items(): if null_odds[q] > config.qa_na_threshold: preds[q] = "" utils.write_json(preds, config.test_predictions( task.name, "test", trial))
conditional_block
run_finetuning.py
_config(config) self.bert_config = bert_config if config.debug: bert_config.num_hidden_layers = 3 bert_config.hidden_size = 144 bert_config.intermediate_size = 144 * 4 bert_config.num_attention_heads = 4 assert config.max_seq_length <= bert_config.max_position_embeddings bert_model = modeling.BertModel( bert_config=bert_config, is_training=is_training, input_ids=features["input_ids"], input_mask=features["input_mask"], token_type_ids=features["segment_ids"], use_one_hot_embeddings=config.use_tpu, embedding_size=config.embedding_size) percent_done = (tf.cast(tf.train.get_or_create_global_step(), tf.float32) / tf.cast(num_train_steps, tf.float32)) # Add specific tasks self.outputs = {"task_id": features["task_id"]} losses = [] for task in tasks: with tf.variable_scope("task_specific/" + task.name): task_losses, task_outputs = task.get_prediction_module( bert_model, features, is_training, percent_done) losses.append(task_losses) self.outputs[task.name] = task_outputs self.loss = tf.reduce_sum( tf.stack(losses, -1) * tf.one_hot(features["task_id"], len(config.task_names))) def model_fn_builder(config: configure_finetuning.FinetuningConfig, tasks, num_train_steps, pretraining_config=None): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" utils.log("Building model...") is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = FinetuningModel( config, tasks, is_training, features, num_train_steps) # Load pre-trained weights from checkpoint init_checkpoint = config.init_checkpoint if pretraining_config is not None: init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir) utils.log("Using checkpoint", init_checkpoint) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: assignment_map, _ = modeling.get_assignment_map_from_checkpoint( tvars, init_checkpoint) if config.use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) # Build model for training or prediction if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( model.loss, config.learning_rate, num_train_steps, weight_decay_rate=config.weight_decay_rate, use_tpu=config.use_tpu, warmup_proportion=config.warmup_proportion, layerwise_lr_decay_power=config.layerwise_lr_decay, n_transformer_layers=model.bert_config.num_hidden_layers ) output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=model.loss, train_op=train_op, scaffold_fn=scaffold_fn, training_hooks=[training_utils.ETAHook( {} if config.use_tpu else dict(loss=model.loss), num_train_steps, config.iterations_per_loop, config.use_tpu, 10)]) else: assert mode == tf.estimator.ModeKeys.PREDICT output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, predictions=utils.flatten_dict(model.outputs), scaffold_fn=scaffold_fn) utils.log("Building complete") return output_spec return model_fn class ModelRunner(object): """Fine-tunes a model on a supervised task.""" def __init__(self, config: configure_finetuning.FinetuningConfig, tasks, pretraining_config=None): self._config = config self._tasks = tasks self._preprocessor = preprocessing.Preprocessor(config, self._tasks) is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 tpu_cluster_resolver = None if config.use_tpu and config.tpu_name: tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( config.tpu_name, zone=config.tpu_zone, project=config.gcp_project) tpu_config = tf.estimator.tpu.TPUConfig( iterations_per_loop=config.iterations_per_loop, num_shards=config.num_tpu_cores, per_host_input_for_training=is_per_host, tpu_job_name=config.tpu_job_name) run_config = tf.estimator.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=config.model_dir, save_checkpoints_steps=config.save_checkpoints_steps, save_checkpoints_secs=None, tpu_config=tpu_config) if self._config.do_train: (self._train_input_fn, self.train_steps) = self._preprocessor.prepare_train() else: self._train_input_fn, self.train_steps = None, 0 model_fn = model_fn_builder( config=config, tasks=self._tasks, num_train_steps=self.train_steps, pretraining_config=pretraining_config) self._estimator = tf.estimator.tpu.TPUEstimator( use_tpu=config.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=config.train_batch_size, eval_batch_size=config.eval_batch_size, predict_batch_size=config.predict_batch_size) def train(self): utils.log("Training for {:} steps".format(self.train_steps)) self._estimator.train( input_fn=self._train_input_fn, max_steps=self.train_steps) def evaluate(self): return {task.name: self.evaluate_task(task) for task in self._tasks} def evaluate_task(self, task, split="dev", return_results=True): """Evaluate the current model.""" utils.log("Evaluating", task.name) eval_input_fn, _ = self._preprocessor.prepare_predict([task], split) results = self._estimator.predict(input_fn=eval_input_fn, yield_single_examples=True) scorer = task.get_scorer() for r in results: if r["task_id"] != len(self._tasks): # ignore padding examples r = utils.nest_dict(r, self._config.task_names) scorer.update(r[task.name]) if return_results: utils.log(task.name + ": " + scorer.results_str()) utils.log() return dict(scorer.get_results()) else: return scorer def write_classification_outputs(self, tasks, trial, split): """Write classification predictions to disk.""" utils.log("Writing out predictions for", tasks, split) predict_input_fn, _ = self._preprocessor.prepare_predict(tasks, split) results = self._estimator.predict(input_fn=predict_input_fn, yield_single_examples=True) # task name -> eid -> model-logits logits = collections.defaultdict(dict) for r in results: if r["task_id"] != len(self._tasks): r = utils.nest_dict(r, self._config.task_names) task_name = self._config.task_names[r["task_id"]] logits[task_name][r[task_name]["eid"]] = ( r[task_name]["logits"] if "logits" in r[task_name] else r[task_name]["predictions"]) for task_name in logits: utils.log("Pickling predictions for {:} {:} examples ({:})".format( len(logits[task_name]), task_name, split)) if trial <= self._config.n_writes_test: utils.write_pickle(logits[task_name], self._config.test_predictions( task_name, split, trial)) def write_results(config: configure_finetuning.FinetuningConfig, results): """Write evaluation metrics to disk.""" utils.log("Writing results to", config.results_txt) utils.mkdir(config.results_txt.rsplit("/", 1)[0]) utils.write_pickle(results, config.results_pkl) with tf.io.gfile.GFile(config.results_txt, "w") as f: results_str = "" for trial_results in results: for task_name, task_results in trial_results.items(): if task_name == "time" or task_name == "global_step": continue results_str += task_name + ": " + " - ".join( ["{:}: {:.2f}".format(k, v) for k, v in task_results.items()]) + "\n" f.write(results_str) utils.write_pickle(results, config.results_pkl) def run_finetuning(config: configure_finetuning.FinetuningConfig):
"""Run finetuning.""" # Setup for training results = [] trial = 1 heading_info = "model={:}, trial {:}/{:}".format( config.model_name, trial, config.num_trials) heading = lambda msg: utils.heading(msg + ": " + heading_info) heading("Config") utils.log_config(config) generic_model_dir = config.model_dir tasks = task_builder.get_tasks(config) # Train and evaluate num_trials models with different random seeds while config.num_trials < 0 or trial <= config.num_trials: config.model_dir = generic_model_dir + "_" + str(trial) if config.do_train: utils.rmkdir(config.model_dir) model_runner = ModelRunner(config, tasks)
identifier_body
run_finetuning.py
(object): """Finetuning model with support for multi-task training.""" def __init__(self, config: configure_finetuning.FinetuningConfig, tasks, is_training, features, num_train_steps): # Create a shared transformer encoder bert_config = training_utils.get_bert_config(config) self.bert_config = bert_config if config.debug: bert_config.num_hidden_layers = 3 bert_config.hidden_size = 144 bert_config.intermediate_size = 144 * 4 bert_config.num_attention_heads = 4 assert config.max_seq_length <= bert_config.max_position_embeddings bert_model = modeling.BertModel( bert_config=bert_config, is_training=is_training, input_ids=features["input_ids"], input_mask=features["input_mask"], token_type_ids=features["segment_ids"], use_one_hot_embeddings=config.use_tpu, embedding_size=config.embedding_size) percent_done = (tf.cast(tf.train.get_or_create_global_step(), tf.float32) / tf.cast(num_train_steps, tf.float32)) # Add specific tasks self.outputs = {"task_id": features["task_id"]} losses = [] for task in tasks: with tf.variable_scope("task_specific/" + task.name): task_losses, task_outputs = task.get_prediction_module( bert_model, features, is_training, percent_done) losses.append(task_losses) self.outputs[task.name] = task_outputs self.loss = tf.reduce_sum( tf.stack(losses, -1) * tf.one_hot(features["task_id"], len(config.task_names))) def model_fn_builder(config: configure_finetuning.FinetuningConfig, tasks, num_train_steps, pretraining_config=None): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" utils.log("Building model...") is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = FinetuningModel( config, tasks, is_training, features, num_train_steps) # Load pre-trained weights from checkpoint init_checkpoint = config.init_checkpoint if pretraining_config is not None: init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir) utils.log("Using checkpoint", init_checkpoint) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: assignment_map, _ = modeling.get_assignment_map_from_checkpoint( tvars, init_checkpoint) if config.use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) # Build model for training or prediction if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( model.loss, config.learning_rate, num_train_steps, weight_decay_rate=config.weight_decay_rate, use_tpu=config.use_tpu, warmup_proportion=config.warmup_proportion, layerwise_lr_decay_power=config.layerwise_lr_decay, n_transformer_layers=model.bert_config.num_hidden_layers ) output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=model.loss, train_op=train_op, scaffold_fn=scaffold_fn, training_hooks=[training_utils.ETAHook( {} if config.use_tpu else dict(loss=model.loss), num_train_steps, config.iterations_per_loop, config.use_tpu, 10)]) else: assert mode == tf.estimator.ModeKeys.PREDICT output_spec = tf.estimator.tpu.TPUEstimatorSpec( mode=mode, predictions=utils.flatten_dict(model.outputs), scaffold_fn=scaffold_fn) utils.log("Building complete") return output_spec return model_fn class ModelRunner(object): """Fine-tunes a model on a supervised task.""" def __init__(self, config: configure_finetuning.FinetuningConfig, tasks, pretraining_config=None): self._config = config self._tasks = tasks self._preprocessor = preprocessing.Preprocessor(config, self._tasks) is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 tpu_cluster_resolver = None if config.use_tpu and config.tpu_name: tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( config.tpu_name, zone=config.tpu_zone, project=config.gcp_project) tpu_config = tf.estimator.tpu.TPUConfig( iterations_per_loop=config.iterations_per_loop, num_shards=config.num_tpu_cores, per_host_input_for_training=is_per_host, tpu_job_name=config.tpu_job_name) run_config = tf.estimator.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=config.model_dir, save_checkpoints_steps=config.save_checkpoints_steps, save_checkpoints_secs=None, tpu_config=tpu_config) if self._config.do_train: (self._train_input_fn, self.train_steps) = self._preprocessor.prepare_train() else: self._train_input_fn, self.train_steps = None, 0 model_fn = model_fn_builder( config=config, tasks=self._tasks, num_train_steps=self.train_steps, pretraining_config=pretraining_config) self._estimator = tf.estimator.tpu.TPUEstimator( use_tpu=config.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=config.train_batch_size, eval_batch_size=config.eval_batch_size, predict_batch_size=config.predict_batch_size) def train(self): utils.log("Training for {:} steps".format(self.train_steps)) self._estimator.train( input_fn=self._train_input_fn, max_steps=self.train_steps) def
(self): return {task.name: self.evaluate_task(task) for task in self._tasks} def evaluate_task(self, task, split="dev", return_results=True): """Evaluate the current model.""" utils.log("Evaluating", task.name) eval_input_fn, _ = self._preprocessor.prepare_predict([task], split) results = self._estimator.predict(input_fn=eval_input_fn, yield_single_examples=True) scorer = task.get_scorer() for r in results: if r["task_id"] != len(self._tasks): # ignore padding examples r = utils.nest_dict(r, self._config.task_names) scorer.update(r[task.name]) if return_results: utils.log(task.name + ": " + scorer.results_str()) utils.log() return dict(scorer.get_results()) else: return scorer def write_classification_outputs(self, tasks, trial, split): """Write classification predictions to disk.""" utils.log("Writing out predictions for", tasks, split) predict_input_fn, _ = self._preprocessor.prepare_predict(tasks, split) results = self._estimator.predict(input_fn=predict_input_fn, yield_single_examples=True) # task name -> eid -> model-logits logits = collections.defaultdict(dict) for r in results: if r["task_id"] != len(self._tasks): r = utils.nest_dict(r, self._config.task_names) task_name = self._config.task_names[r["task_id"]] logits[task_name][r[task_name]["eid"]] = ( r[task_name]["logits"] if "logits" in r[task_name] else r[task_name]["predictions"]) for task_name in logits: utils.log("Pickling predictions for {:} {:} examples ({:})".format( len(logits[task_name]), task_name, split)) if trial <= self._config.n_writes_test: utils.write_pickle(logits[task_name], self._config.test_predictions( task_name, split, trial)) def write_results(config: configure_finetuning.FinetuningConfig, results): """Write evaluation metrics to disk.""" utils.log("Writing results to", config.results_txt) utils.mkdir(config.results_txt.rsplit("/", 1)[0]) utils.write_pickle(results, config.results_pkl) with tf.io.gfile.GFile(config.results_txt, "w") as f: results_str = "" for trial_results in results: for task_name, task_results in trial_results.items(): if task_name == "time" or task_name == "global_step": continue results_str += task_name + ": " + " - ".join( ["{:}: {:.2f}".format(k, v) for k, v in task_results.items()]) + "\n" f.write(results_str) utils.write_pickle(results, config.results_pkl) def run_finetuning(config: configure_finetuning.FinetuningConfig): """Run finetuning.""" # Setup for training results = [] trial = 1 heading_info = "model={:}, trial {:}/{:}".format( config.model_name, trial, config.num_trials) heading = lambda msg: utils.heading(msg + ": " + heading_info) heading("Config") utils.log_config(config) generic_model_dir = config.model_dir tasks = task_builder.get_tasks(config) # Train and
evaluate
identifier_name
tehgladiators.py
from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.core.comic_data import ComicDataBase class ComicData(ComicDataBase): name = "Teh Gladiators" language = "en" url = "http://www.tehgladiators.com/" start_date = "2008-03-18" rights = "Uros Jojic & Borislav Grabovic" class Crawler(CrawlerBase):
history_capable_days = 90 schedule = "We" time_zone = "Europe/Belgrade" def crawl(self, pub_date): feed = self.parse_feed("http://www.tehgladiators.com/rss.xml") for entry in feed.for_date(pub_date): page = self.parse_page(entry.link) url = page.src('img[alt^="Teh Gladiators Webcomic"]') title = entry.title return CrawlerImage(url, title)
identifier_body
tehgladiators.py
from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.core.comic_data import ComicDataBase class ComicData(ComicDataBase): name = "Teh Gladiators" language = "en" url = "http://www.tehgladiators.com/" start_date = "2008-03-18" rights = "Uros Jojic & Borislav Grabovic" class Crawler(CrawlerBase): history_capable_days = 90 schedule = "We" time_zone = "Europe/Belgrade" def crawl(self, pub_date): feed = self.parse_feed("http://www.tehgladiators.com/rss.xml") for entry in feed.for_date(pub_date):
title = entry.title return CrawlerImage(url, title)
page = self.parse_page(entry.link) url = page.src('img[alt^="Teh Gladiators Webcomic"]')
random_line_split
tehgladiators.py
from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.core.comic_data import ComicDataBase class ComicData(ComicDataBase): name = "Teh Gladiators" language = "en" url = "http://www.tehgladiators.com/" start_date = "2008-03-18" rights = "Uros Jojic & Borislav Grabovic" class Crawler(CrawlerBase): history_capable_days = 90 schedule = "We" time_zone = "Europe/Belgrade" def crawl(self, pub_date): feed = self.parse_feed("http://www.tehgladiators.com/rss.xml") for entry in feed.for_date(pub_date):
page = self.parse_page(entry.link) url = page.src('img[alt^="Teh Gladiators Webcomic"]') title = entry.title return CrawlerImage(url, title)
conditional_block
tehgladiators.py
from comics.aggregator.crawler import CrawlerBase, CrawlerImage from comics.core.comic_data import ComicDataBase class
(ComicDataBase): name = "Teh Gladiators" language = "en" url = "http://www.tehgladiators.com/" start_date = "2008-03-18" rights = "Uros Jojic & Borislav Grabovic" class Crawler(CrawlerBase): history_capable_days = 90 schedule = "We" time_zone = "Europe/Belgrade" def crawl(self, pub_date): feed = self.parse_feed("http://www.tehgladiators.com/rss.xml") for entry in feed.for_date(pub_date): page = self.parse_page(entry.link) url = page.src('img[alt^="Teh Gladiators Webcomic"]') title = entry.title return CrawlerImage(url, title)
ComicData
identifier_name
request_proxy.rs
use std::io::Read; use std::net::SocketAddr; use conduit;
#[allow(missing_debug_implementations)] pub struct RequestProxy<'a> { pub other: &'a mut (Request + 'a), pub path: Option<&'a str>, pub method: Option<conduit::Method>, } impl<'a> Request for RequestProxy<'a> { fn http_version(&self) -> semver::Version { self.other.http_version() } fn conduit_version(&self) -> semver::Version { self.other.conduit_version() } fn method(&self) -> conduit::Method { self.method.clone().unwrap_or_else( || self.other.method().clone(), ) } fn scheme(&self) -> conduit::Scheme { self.other.scheme() } fn host(&self) -> conduit::Host { self.other.host() } fn virtual_root(&self) -> Option<&str> { self.other.virtual_root() } fn path(&self) -> &str { self.path.map(|s| &*s).unwrap_or_else(|| self.other.path()) } fn query_string(&self) -> Option<&str> { self.other.query_string() } fn remote_addr(&self) -> SocketAddr { self.other.remote_addr() } fn content_length(&self) -> Option<u64> { self.other.content_length() } fn headers(&self) -> &conduit::Headers { self.other.headers() } fn body(&mut self) -> &mut Read { self.other.body() } fn extensions(&self) -> &conduit::Extensions { self.other.extensions() } fn mut_extensions(&mut self) -> &mut conduit::Extensions { self.other.mut_extensions() } }
use conduit::Request; use semver; // Can't derive Debug because of Request.
random_line_split
request_proxy.rs
use std::io::Read; use std::net::SocketAddr; use conduit; use conduit::Request; use semver; // Can't derive Debug because of Request. #[allow(missing_debug_implementations)] pub struct RequestProxy<'a> { pub other: &'a mut (Request + 'a), pub path: Option<&'a str>, pub method: Option<conduit::Method>, } impl<'a> Request for RequestProxy<'a> { fn http_version(&self) -> semver::Version { self.other.http_version() } fn conduit_version(&self) -> semver::Version { self.other.conduit_version() } fn method(&self) -> conduit::Method { self.method.clone().unwrap_or_else( || self.other.method().clone(), ) } fn scheme(&self) -> conduit::Scheme
fn host(&self) -> conduit::Host { self.other.host() } fn virtual_root(&self) -> Option<&str> { self.other.virtual_root() } fn path(&self) -> &str { self.path.map(|s| &*s).unwrap_or_else(|| self.other.path()) } fn query_string(&self) -> Option<&str> { self.other.query_string() } fn remote_addr(&self) -> SocketAddr { self.other.remote_addr() } fn content_length(&self) -> Option<u64> { self.other.content_length() } fn headers(&self) -> &conduit::Headers { self.other.headers() } fn body(&mut self) -> &mut Read { self.other.body() } fn extensions(&self) -> &conduit::Extensions { self.other.extensions() } fn mut_extensions(&mut self) -> &mut conduit::Extensions { self.other.mut_extensions() } }
{ self.other.scheme() }
identifier_body
request_proxy.rs
use std::io::Read; use std::net::SocketAddr; use conduit; use conduit::Request; use semver; // Can't derive Debug because of Request. #[allow(missing_debug_implementations)] pub struct RequestProxy<'a> { pub other: &'a mut (Request + 'a), pub path: Option<&'a str>, pub method: Option<conduit::Method>, } impl<'a> Request for RequestProxy<'a> { fn http_version(&self) -> semver::Version { self.other.http_version() } fn conduit_version(&self) -> semver::Version { self.other.conduit_version() } fn method(&self) -> conduit::Method { self.method.clone().unwrap_or_else( || self.other.method().clone(), ) } fn scheme(&self) -> conduit::Scheme { self.other.scheme() } fn host(&self) -> conduit::Host { self.other.host() } fn virtual_root(&self) -> Option<&str> { self.other.virtual_root() } fn path(&self) -> &str { self.path.map(|s| &*s).unwrap_or_else(|| self.other.path()) } fn query_string(&self) -> Option<&str> { self.other.query_string() } fn remote_addr(&self) -> SocketAddr { self.other.remote_addr() } fn
(&self) -> Option<u64> { self.other.content_length() } fn headers(&self) -> &conduit::Headers { self.other.headers() } fn body(&mut self) -> &mut Read { self.other.body() } fn extensions(&self) -> &conduit::Extensions { self.other.extensions() } fn mut_extensions(&mut self) -> &mut conduit::Extensions { self.other.mut_extensions() } }
content_length
identifier_name
ControlButton.js
import React from 'react' import {TouchableOpacity, Image} from 'react-native' import {ResponsiveStyleSheet} from 'react-native-responsive-stylesheet' import {resource} from '../utils/image'
return ( <TouchableOpacity onPress={onPress} style={[s.button, {backgroundColor}]} activeOpacity={0.8}> <Image source={resource(`icons/${type}.png`)} style={s.image} /> </TouchableOpacity> ) } const makeStyles = ResponsiveStyleSheet.create(({controlButtonSize}) => { const padding = 20 return { button: { padding: padding, width: controlButtonSize, height: controlButtonSize, borderRadius: controlButtonSize / 2, }, image: { width: controlButtonSize - 2 * padding, height: controlButtonSize - 2 * padding, }, } })
export const ControlButton = ({onPress, type, backgroundColor}) => { const s = makeStyles()
random_line_split
zepto.js
/*! * CanJS - 1.1.4 (2013-02-05) * http://canjs.us/ * Copyright (c) 2013 Bitovi * Licensed MIT */ define(['can/util/can', 'zepto', 'can/util/object/isplain', 'can/util/event', 'can/util/fragment', 'can/util/deferred', 'can/util/array/each'], function (can) { var $ = Zepto; // data.js // --------- // _jQuery-like data methods._ var data = {}, dataAttr = $.fn.data, uuid = $.uuid = +new Date(), exp = $.expando = 'Zepto' + uuid; function getData(node, name) { var id = node[exp], store = id && data[id]; return name === undefined ? store || setData(node) : (store && store[name]) || dataAttr.call($(node), name); } function
(node, name, value) { var id = node[exp] || (node[exp] = ++uuid), store = data[id] || (data[id] = {}); if (name !== undefined) store[name] = value; return store; }; $.fn.data = function (name, value) { return value === undefined ? this.length == 0 ? undefined : getData(this[0], name) : this.each(function (idx) { setData(this, name, $.isFunction(value) ? value.call(this, idx, getData(this, name)) : value); }); }; $.cleanData = function (elems) { for (var i = 0, elem; (elem = elems[i]) !== undefined; i++) { can.trigger(elem, "destroyed", [], false) var id = elem[exp] delete data[id]; } } // zepto.js // --------- // _Zepto node list._ var oldEach = can.each; // Extend what you can out of Zepto. $.extend(can, Zepto); can.each = oldEach; var arrHas = function (obj, name) { return obj[0] && obj[0][name] || obj[name] } // Do what's similar for jQuery. can.trigger = function (obj, event, args, bubble) { if (obj.trigger) { obj.trigger(event, args) } else if (arrHas(obj, "dispatchEvent")) { if (bubble === false) { $([obj]).triggerHandler(event, args) } else { $([obj]).trigger(event, args) } } else { if (typeof event == "string") { event = { type: event } } event.target = event.target || obj; event.data = args; can.dispatch.call(obj, event) } } can.$ = Zepto; can.bind = function (ev, cb) { // If we can bind to it... if (this.bind) { this.bind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).bind(ev, cb) } else { can.addEvent.call(this, ev, cb) } return this; } can.unbind = function (ev, cb) { // If we can bind to it... if (this.unbind) { this.unbind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).unbind(ev, cb) } else { can.removeEvent.call(this, ev, cb) } return this; } can.delegate = function (selector, ev, cb) { if (this.delegate) { this.delegate(selector, ev, cb) } else { $([this]).delegate(selector, ev, cb) } } can.undelegate = function (selector, ev, cb) { if (this.undelegate) { this.undelegate(selector, ev, cb) } else { $([this]).undelegate(selector, ev, cb) } } $.each(["append", "filter", "addClass", "remove", "data"], function (i, name) { can[name] = function (wrapped) { return wrapped[name].apply(wrapped, can.makeArray(arguments).slice(1)) } }) can.makeArray = function (arr) { var ret = [] can.each(arr, function (a, i) { ret[i] = a }) return ret; }; can.proxy = function (f, ctx) { return function () { return f.apply(ctx, arguments) } } // Make ajax. var XHR = $.ajaxSettings.xhr; $.ajaxSettings.xhr = function () { var xhr = XHR() var open = xhr.open; xhr.open = function (type, url, async) { open.call(this, type, url, ASYNC === undefined ? true : ASYNC) } return xhr; } var ASYNC; var AJAX = $.ajax; var updateDeferred = function (xhr, d) { for (var prop in xhr) { if (typeof d[prop] == 'function') { d[prop] = function () { xhr[prop].apply(xhr, arguments) } } else { d[prop] = prop[xhr] } } } can.ajax = function (options) { var success = options.success, error = options.error; var d = can.Deferred(); options.success = function (data) { updateDeferred(xhr, d); d.resolve.call(d, data); success && success.apply(this, arguments); } options.error = function () { updateDeferred(xhr, d); d.reject.apply(d, arguments); error && error.apply(this, arguments); } if (options.async === false) { ASYNC = false } var xhr = AJAX(options); ASYNC = undefined; updateDeferred(xhr, d); return d; }; // Make destroyed and empty work. $.fn.empty = function () { return this.each(function () { $.cleanData(this.getElementsByTagName('*')) this.innerHTML = '' }) } $.fn.remove = function () { $.cleanData(this); this.each(function () { if (this.parentNode != null) { // might be a text node this.getElementsByTagName && $.cleanData(this.getElementsByTagName('*')) this.parentNode.removeChild(this); } }); return this; } can.trim = function (str) { return str.trim(); } can.isEmptyObject = function (object) { var name; for (name in object) {}; return name === undefined; } // Make extend handle `true` for deep. can.extend = function (first) { if (first === true) { var args = can.makeArray(arguments); args.shift(); return $.extend.apply($, args) } return $.extend.apply($, arguments) } can.get = function (wrapped, index) { return wrapped[index]; } return can; });
setData
identifier_name
zepto.js
/*! * CanJS - 1.1.4 (2013-02-05) * http://canjs.us/ * Copyright (c) 2013 Bitovi * Licensed MIT */ define(['can/util/can', 'zepto', 'can/util/object/isplain', 'can/util/event', 'can/util/fragment', 'can/util/deferred', 'can/util/array/each'], function (can) { var $ = Zepto; // data.js // --------- // _jQuery-like data methods._ var data = {}, dataAttr = $.fn.data, uuid = $.uuid = +new Date(), exp = $.expando = 'Zepto' + uuid; function getData(node, name) { var id = node[exp], store = id && data[id]; return name === undefined ? store || setData(node) : (store && store[name]) || dataAttr.call($(node), name); } function setData(node, name, value) { var id = node[exp] || (node[exp] = ++uuid), store = data[id] || (data[id] = {}); if (name !== undefined) store[name] = value; return store; }; $.fn.data = function (name, value) { return value === undefined ? this.length == 0 ? undefined : getData(this[0], name) : this.each(function (idx) { setData(this, name, $.isFunction(value) ? value.call(this, idx, getData(this, name)) : value); }); }; $.cleanData = function (elems) { for (var i = 0, elem; (elem = elems[i]) !== undefined; i++) { can.trigger(elem, "destroyed", [], false) var id = elem[exp] delete data[id]; } } // zepto.js // --------- // _Zepto node list._ var oldEach = can.each; // Extend what you can out of Zepto. $.extend(can, Zepto); can.each = oldEach; var arrHas = function (obj, name) { return obj[0] && obj[0][name] || obj[name] } // Do what's similar for jQuery. can.trigger = function (obj, event, args, bubble) { if (obj.trigger) { obj.trigger(event, args) } else if (arrHas(obj, "dispatchEvent")) { if (bubble === false) { $([obj]).triggerHandler(event, args) } else { $([obj]).trigger(event, args) } } else { if (typeof event == "string") { event = { type: event } } event.target = event.target || obj; event.data = args; can.dispatch.call(obj, event) } } can.$ = Zepto; can.bind = function (ev, cb) { // If we can bind to it... if (this.bind) { this.bind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).bind(ev, cb) } else { can.addEvent.call(this, ev, cb) } return this; } can.unbind = function (ev, cb) { // If we can bind to it... if (this.unbind) { this.unbind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).unbind(ev, cb) } else { can.removeEvent.call(this, ev, cb) } return this; } can.delegate = function (selector, ev, cb) { if (this.delegate) { this.delegate(selector, ev, cb) } else { $([this]).delegate(selector, ev, cb) } } can.undelegate = function (selector, ev, cb) { if (this.undelegate) { this.undelegate(selector, ev, cb) } else { $([this]).undelegate(selector, ev, cb) } } $.each(["append", "filter", "addClass", "remove", "data"], function (i, name) { can[name] = function (wrapped) { return wrapped[name].apply(wrapped, can.makeArray(arguments).slice(1)) } }) can.makeArray = function (arr) { var ret = [] can.each(arr, function (a, i) { ret[i] = a }) return ret; };
} // Make ajax. var XHR = $.ajaxSettings.xhr; $.ajaxSettings.xhr = function () { var xhr = XHR() var open = xhr.open; xhr.open = function (type, url, async) { open.call(this, type, url, ASYNC === undefined ? true : ASYNC) } return xhr; } var ASYNC; var AJAX = $.ajax; var updateDeferred = function (xhr, d) { for (var prop in xhr) { if (typeof d[prop] == 'function') { d[prop] = function () { xhr[prop].apply(xhr, arguments) } } else { d[prop] = prop[xhr] } } } can.ajax = function (options) { var success = options.success, error = options.error; var d = can.Deferred(); options.success = function (data) { updateDeferred(xhr, d); d.resolve.call(d, data); success && success.apply(this, arguments); } options.error = function () { updateDeferred(xhr, d); d.reject.apply(d, arguments); error && error.apply(this, arguments); } if (options.async === false) { ASYNC = false } var xhr = AJAX(options); ASYNC = undefined; updateDeferred(xhr, d); return d; }; // Make destroyed and empty work. $.fn.empty = function () { return this.each(function () { $.cleanData(this.getElementsByTagName('*')) this.innerHTML = '' }) } $.fn.remove = function () { $.cleanData(this); this.each(function () { if (this.parentNode != null) { // might be a text node this.getElementsByTagName && $.cleanData(this.getElementsByTagName('*')) this.parentNode.removeChild(this); } }); return this; } can.trim = function (str) { return str.trim(); } can.isEmptyObject = function (object) { var name; for (name in object) {}; return name === undefined; } // Make extend handle `true` for deep. can.extend = function (first) { if (first === true) { var args = can.makeArray(arguments); args.shift(); return $.extend.apply($, args) } return $.extend.apply($, arguments) } can.get = function (wrapped, index) { return wrapped[index]; } return can; });
can.proxy = function (f, ctx) { return function () { return f.apply(ctx, arguments) }
random_line_split
zepto.js
/*! * CanJS - 1.1.4 (2013-02-05) * http://canjs.us/ * Copyright (c) 2013 Bitovi * Licensed MIT */ define(['can/util/can', 'zepto', 'can/util/object/isplain', 'can/util/event', 'can/util/fragment', 'can/util/deferred', 'can/util/array/each'], function (can) { var $ = Zepto; // data.js // --------- // _jQuery-like data methods._ var data = {}, dataAttr = $.fn.data, uuid = $.uuid = +new Date(), exp = $.expando = 'Zepto' + uuid; function getData(node, name) { var id = node[exp], store = id && data[id]; return name === undefined ? store || setData(node) : (store && store[name]) || dataAttr.call($(node), name); } function setData(node, name, value) { var id = node[exp] || (node[exp] = ++uuid), store = data[id] || (data[id] = {}); if (name !== undefined) store[name] = value; return store; }; $.fn.data = function (name, value) { return value === undefined ? this.length == 0 ? undefined : getData(this[0], name) : this.each(function (idx) { setData(this, name, $.isFunction(value) ? value.call(this, idx, getData(this, name)) : value); }); }; $.cleanData = function (elems) { for (var i = 0, elem; (elem = elems[i]) !== undefined; i++) { can.trigger(elem, "destroyed", [], false) var id = elem[exp] delete data[id]; } } // zepto.js // --------- // _Zepto node list._ var oldEach = can.each; // Extend what you can out of Zepto. $.extend(can, Zepto); can.each = oldEach; var arrHas = function (obj, name) { return obj[0] && obj[0][name] || obj[name] } // Do what's similar for jQuery. can.trigger = function (obj, event, args, bubble) { if (obj.trigger) { obj.trigger(event, args) } else if (arrHas(obj, "dispatchEvent")) { if (bubble === false) { $([obj]).triggerHandler(event, args) } else { $([obj]).trigger(event, args) } } else { if (typeof event == "string") { event = { type: event } } event.target = event.target || obj; event.data = args; can.dispatch.call(obj, event) } } can.$ = Zepto; can.bind = function (ev, cb) { // If we can bind to it... if (this.bind) { this.bind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).bind(ev, cb) } else { can.addEvent.call(this, ev, cb) } return this; } can.unbind = function (ev, cb) { // If we can bind to it... if (this.unbind) { this.unbind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).unbind(ev, cb) } else { can.removeEvent.call(this, ev, cb) } return this; } can.delegate = function (selector, ev, cb) { if (this.delegate) { this.delegate(selector, ev, cb) } else { $([this]).delegate(selector, ev, cb) } } can.undelegate = function (selector, ev, cb) { if (this.undelegate) { this.undelegate(selector, ev, cb) } else { $([this]).undelegate(selector, ev, cb) } } $.each(["append", "filter", "addClass", "remove", "data"], function (i, name) { can[name] = function (wrapped) { return wrapped[name].apply(wrapped, can.makeArray(arguments).slice(1)) } }) can.makeArray = function (arr) { var ret = [] can.each(arr, function (a, i) { ret[i] = a }) return ret; }; can.proxy = function (f, ctx) { return function () { return f.apply(ctx, arguments) } } // Make ajax. var XHR = $.ajaxSettings.xhr; $.ajaxSettings.xhr = function () { var xhr = XHR() var open = xhr.open; xhr.open = function (type, url, async) { open.call(this, type, url, ASYNC === undefined ? true : ASYNC) } return xhr; } var ASYNC; var AJAX = $.ajax; var updateDeferred = function (xhr, d) { for (var prop in xhr) { if (typeof d[prop] == 'function') { d[prop] = function () { xhr[prop].apply(xhr, arguments) } } else
} } can.ajax = function (options) { var success = options.success, error = options.error; var d = can.Deferred(); options.success = function (data) { updateDeferred(xhr, d); d.resolve.call(d, data); success && success.apply(this, arguments); } options.error = function () { updateDeferred(xhr, d); d.reject.apply(d, arguments); error && error.apply(this, arguments); } if (options.async === false) { ASYNC = false } var xhr = AJAX(options); ASYNC = undefined; updateDeferred(xhr, d); return d; }; // Make destroyed and empty work. $.fn.empty = function () { return this.each(function () { $.cleanData(this.getElementsByTagName('*')) this.innerHTML = '' }) } $.fn.remove = function () { $.cleanData(this); this.each(function () { if (this.parentNode != null) { // might be a text node this.getElementsByTagName && $.cleanData(this.getElementsByTagName('*')) this.parentNode.removeChild(this); } }); return this; } can.trim = function (str) { return str.trim(); } can.isEmptyObject = function (object) { var name; for (name in object) {}; return name === undefined; } // Make extend handle `true` for deep. can.extend = function (first) { if (first === true) { var args = can.makeArray(arguments); args.shift(); return $.extend.apply($, args) } return $.extend.apply($, arguments) } can.get = function (wrapped, index) { return wrapped[index]; } return can; });
{ d[prop] = prop[xhr] }
conditional_block
zepto.js
/*! * CanJS - 1.1.4 (2013-02-05) * http://canjs.us/ * Copyright (c) 2013 Bitovi * Licensed MIT */ define(['can/util/can', 'zepto', 'can/util/object/isplain', 'can/util/event', 'can/util/fragment', 'can/util/deferred', 'can/util/array/each'], function (can) { var $ = Zepto; // data.js // --------- // _jQuery-like data methods._ var data = {}, dataAttr = $.fn.data, uuid = $.uuid = +new Date(), exp = $.expando = 'Zepto' + uuid; function getData(node, name) { var id = node[exp], store = id && data[id]; return name === undefined ? store || setData(node) : (store && store[name]) || dataAttr.call($(node), name); } function setData(node, name, value)
; $.fn.data = function (name, value) { return value === undefined ? this.length == 0 ? undefined : getData(this[0], name) : this.each(function (idx) { setData(this, name, $.isFunction(value) ? value.call(this, idx, getData(this, name)) : value); }); }; $.cleanData = function (elems) { for (var i = 0, elem; (elem = elems[i]) !== undefined; i++) { can.trigger(elem, "destroyed", [], false) var id = elem[exp] delete data[id]; } } // zepto.js // --------- // _Zepto node list._ var oldEach = can.each; // Extend what you can out of Zepto. $.extend(can, Zepto); can.each = oldEach; var arrHas = function (obj, name) { return obj[0] && obj[0][name] || obj[name] } // Do what's similar for jQuery. can.trigger = function (obj, event, args, bubble) { if (obj.trigger) { obj.trigger(event, args) } else if (arrHas(obj, "dispatchEvent")) { if (bubble === false) { $([obj]).triggerHandler(event, args) } else { $([obj]).trigger(event, args) } } else { if (typeof event == "string") { event = { type: event } } event.target = event.target || obj; event.data = args; can.dispatch.call(obj, event) } } can.$ = Zepto; can.bind = function (ev, cb) { // If we can bind to it... if (this.bind) { this.bind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).bind(ev, cb) } else { can.addEvent.call(this, ev, cb) } return this; } can.unbind = function (ev, cb) { // If we can bind to it... if (this.unbind) { this.unbind(ev, cb) } else if (arrHas(this, "addEventListener")) { $([this]).unbind(ev, cb) } else { can.removeEvent.call(this, ev, cb) } return this; } can.delegate = function (selector, ev, cb) { if (this.delegate) { this.delegate(selector, ev, cb) } else { $([this]).delegate(selector, ev, cb) } } can.undelegate = function (selector, ev, cb) { if (this.undelegate) { this.undelegate(selector, ev, cb) } else { $([this]).undelegate(selector, ev, cb) } } $.each(["append", "filter", "addClass", "remove", "data"], function (i, name) { can[name] = function (wrapped) { return wrapped[name].apply(wrapped, can.makeArray(arguments).slice(1)) } }) can.makeArray = function (arr) { var ret = [] can.each(arr, function (a, i) { ret[i] = a }) return ret; }; can.proxy = function (f, ctx) { return function () { return f.apply(ctx, arguments) } } // Make ajax. var XHR = $.ajaxSettings.xhr; $.ajaxSettings.xhr = function () { var xhr = XHR() var open = xhr.open; xhr.open = function (type, url, async) { open.call(this, type, url, ASYNC === undefined ? true : ASYNC) } return xhr; } var ASYNC; var AJAX = $.ajax; var updateDeferred = function (xhr, d) { for (var prop in xhr) { if (typeof d[prop] == 'function') { d[prop] = function () { xhr[prop].apply(xhr, arguments) } } else { d[prop] = prop[xhr] } } } can.ajax = function (options) { var success = options.success, error = options.error; var d = can.Deferred(); options.success = function (data) { updateDeferred(xhr, d); d.resolve.call(d, data); success && success.apply(this, arguments); } options.error = function () { updateDeferred(xhr, d); d.reject.apply(d, arguments); error && error.apply(this, arguments); } if (options.async === false) { ASYNC = false } var xhr = AJAX(options); ASYNC = undefined; updateDeferred(xhr, d); return d; }; // Make destroyed and empty work. $.fn.empty = function () { return this.each(function () { $.cleanData(this.getElementsByTagName('*')) this.innerHTML = '' }) } $.fn.remove = function () { $.cleanData(this); this.each(function () { if (this.parentNode != null) { // might be a text node this.getElementsByTagName && $.cleanData(this.getElementsByTagName('*')) this.parentNode.removeChild(this); } }); return this; } can.trim = function (str) { return str.trim(); } can.isEmptyObject = function (object) { var name; for (name in object) {}; return name === undefined; } // Make extend handle `true` for deep. can.extend = function (first) { if (first === true) { var args = can.makeArray(arguments); args.shift(); return $.extend.apply($, args) } return $.extend.apply($, arguments) } can.get = function (wrapped, index) { return wrapped[index]; } return can; });
{ var id = node[exp] || (node[exp] = ++uuid), store = data[id] || (data[id] = {}); if (name !== undefined) store[name] = value; return store; }
identifier_body
auth.js
import * as Cookies from 'js-cookie'; import axios from 'axios'; import { route } from 'preact-router'; function storeUserCredentials({ user, accessToken, expires }) { Cookies.set('crendentials', { user, accessToken }, { expires: new Date(expires) }); } export function
() { return Cookies.getJSON('crendentials'); } export function isAuthenticated() { const crendentials = getUserCredentials(); return !(crendentials === null || crendentials === undefined); } export function onLogin(response) { storeUserCredentials(response); route('/', true); } export function logout() { Cookies.remove('crendentials'); route('/login', true); } export function authenticate(params = {}) { axios .post(`/auth/${params.network}`, { access_token: params.access_token }) .then(response => response.data) .then(onLogin) .catch((error) => { alert('User could not login'); console.error(error); }); }
getUserCredentials
identifier_name
auth.js
import * as Cookies from 'js-cookie'; import axios from 'axios'; import { route } from 'preact-router'; function storeUserCredentials({ user, accessToken, expires }) { Cookies.set('crendentials', { user, accessToken }, { expires: new Date(expires) }); } export function getUserCredentials() { return Cookies.getJSON('crendentials'); } export function isAuthenticated() { const crendentials = getUserCredentials(); return !(crendentials === null || crendentials === undefined); } export function onLogin(response) { storeUserCredentials(response); route('/', true); } export function logout() { Cookies.remove('crendentials'); route('/login', true); } export function authenticate(params = {}) { axios .post(`/auth/${params.network}`, { access_token: params.access_token }) .then(response => response.data) .then(onLogin) .catch((error) => {
}); }
alert('User could not login'); console.error(error);
random_line_split
auth.js
import * as Cookies from 'js-cookie'; import axios from 'axios'; import { route } from 'preact-router'; function storeUserCredentials({ user, accessToken, expires }) { Cookies.set('crendentials', { user, accessToken }, { expires: new Date(expires) }); } export function getUserCredentials() { return Cookies.getJSON('crendentials'); } export function isAuthenticated()
export function onLogin(response) { storeUserCredentials(response); route('/', true); } export function logout() { Cookies.remove('crendentials'); route('/login', true); } export function authenticate(params = {}) { axios .post(`/auth/${params.network}`, { access_token: params.access_token }) .then(response => response.data) .then(onLogin) .catch((error) => { alert('User could not login'); console.error(error); }); }
{ const crendentials = getUserCredentials(); return !(crendentials === null || crendentials === undefined); }
identifier_body
example_grid_time.py
#!/usr/bin/env python from datetime import timedelta import numpy as np from opendrift.readers import reader_basemap_landmask from opendrift.readers import reader_netCDF_CF_generic from opendrift.models.oceandrift import OceanDrift o = OceanDrift(loglevel=0) # Set loglevel to 0 for debug information reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc') # Landmask (Basemap) reader_basemap = reader_basemap_landmask.Reader( llcrnrlon=4.0, llcrnrlat=59.9, urcrnrlon=5.5, urcrnrlat=61.2, resolution='h', projection='merc') o.add_reader([reader_basemap, reader_norkyst]) # Seeding some particles lons = np.linspace(4.4, 4.6, 10) lats = np.linspace(60.0, 60.1, 10) lons, lats = np.meshgrid(lons, lats) lons = lons.ravel() lats = lats.ravel() # Seed oil elements on a grid at regular time interval start_time = reader_norkyst.start_time time_step = timedelta(hours=6) num_steps = 10 for i in range(num_steps+1):
# Running model (until end of driver data) o.run(steps=66*4, time_step=900) # Print and plot results print(o) o.animation()
o.seed_elements(lons, lats, radius=0, number=100, time=start_time + i*time_step)
random_line_split
example_grid_time.py
#!/usr/bin/env python from datetime import timedelta import numpy as np from opendrift.readers import reader_basemap_landmask from opendrift.readers import reader_netCDF_CF_generic from opendrift.models.oceandrift import OceanDrift o = OceanDrift(loglevel=0) # Set loglevel to 0 for debug information reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc') # Landmask (Basemap) reader_basemap = reader_basemap_landmask.Reader( llcrnrlon=4.0, llcrnrlat=59.9, urcrnrlon=5.5, urcrnrlat=61.2, resolution='h', projection='merc') o.add_reader([reader_basemap, reader_norkyst]) # Seeding some particles lons = np.linspace(4.4, 4.6, 10) lats = np.linspace(60.0, 60.1, 10) lons, lats = np.meshgrid(lons, lats) lons = lons.ravel() lats = lats.ravel() # Seed oil elements on a grid at regular time interval start_time = reader_norkyst.start_time time_step = timedelta(hours=6) num_steps = 10 for i in range(num_steps+1):
# Running model (until end of driver data) o.run(steps=66*4, time_step=900) # Print and plot results print(o) o.animation()
o.seed_elements(lons, lats, radius=0, number=100, time=start_time + i*time_step)
conditional_block
views.py
""" Views for PubSite app. """ from django.conf import settings from django.contrib.auth.views import ( PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView, ) from django.shortcuts import render import requests import logging logger = logging.getLogger(__name__) def _get_context(page_name): return { "pages": settings.PUBLIC_PAGES, "current_page_name": page_name, } # Regular index # def index(request): # """ # View for the static index page # """ # return render(request, 'public/home.html', _get_context('Home')) def index(request): """ View for the static index page """ return render(request, "public/home.html", _get_context("Home")) def about(request): """ View for the static chapter history page. """ return render(request, "public/about.html", _get_context("About")) def activities(request): """ View for the static chapter service page. """ return render( request, "public/activities.html", _get_context("Service & Activities"), ) def rush(request): """ View for the static chapter service page. """ return render( request, "public/rush.html", _get_context("Rush"), ) def campaign(request): """ View for the campaign service page. """ # Overrride requests Session authentication handling class NoRebuildAuthSession(requests.Session): def rebuild_auth(self, prepared_request, response): """ No code here means requests will always preserve the Authorization header when redirected. Be careful not to leak your credentials to untrusted hosts! """ url = "https://api.givebutter.com/v1/transactions/" headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"} response = None # Create custom requests session session = NoRebuildAuthSession() # Make GET request to server, timeout in seconds try: r = session.get(url, headers=headers, timeout=0.75) if r.status_code == 200: response = r.json() else: logger.error(f"ERROR in request: {r.status_code}") except requests.exceptions.Timeout: logger.warning("Connection to GiveButter API Timed out") except requests.ConnectionError: logger.warning("Connection to GiveButter API could not be resolved") except requests.exceptions.RequestException: logger.error( "An unknown issue occurred while trying to retrieve GiveButter Donor List" ) # Grab context object to use later ctx = _get_context("Campaign") # Check for successful response, if so - filter, sort, and format data if response and "data" in response: response = response["data"] # Pull data from GET response object logger.debug(f"GiveButter API Response: {response}") # Filter by only successful transactions, then sort by amount descending successful_txs = [tx for tx in response if tx["status"] == "succeeded"] sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True) # Clean data to a list of dictionaries & remove unnecessary data transactions = [ { "name": tx["giving_space"]["name"], "amount": tx["giving_space"]["amount"], "message": tx["giving_space"]["message"], } for tx in sorted_txs[:20] ] # Attach transaction dictionary & length to context object ctx["transactions"] = transactions ctx["num_txs"] = len(successful_txs) return render( request, "public/campaign.html", ctx, ) def permission_denied(request): """
View for 403 (Permission Denied) error. """ return render( request, "common/403.html", _get_context("Permission Denied"), ) def handler404(request, exception): """ """ return render(request, "common/404.html", _get_context("Page Not Found")) class ResetPassword(PasswordResetView): template_name = "password_reset/password_reset_form.html" class ResetPasswordDone(PasswordResetDoneView): template_name = "password_reset/password_reset_done.html" class ResetPasswordConfirm(PasswordResetConfirmView): template_name = "password_reset/password_reset_confirm.html" class ResetPasswordComplete(PasswordResetCompleteView): template_name = "password_reset/password_reset_complete.html"
random_line_split
views.py
""" Views for PubSite app. """ from django.conf import settings from django.contrib.auth.views import ( PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView, ) from django.shortcuts import render import requests import logging logger = logging.getLogger(__name__) def _get_context(page_name): return { "pages": settings.PUBLIC_PAGES, "current_page_name": page_name, } # Regular index # def index(request): # """ # View for the static index page # """ # return render(request, 'public/home.html', _get_context('Home')) def index(request): """ View for the static index page """ return render(request, "public/home.html", _get_context("Home")) def about(request): """ View for the static chapter history page. """ return render(request, "public/about.html", _get_context("About")) def activities(request): """ View for the static chapter service page. """ return render( request, "public/activities.html", _get_context("Service & Activities"), ) def rush(request): """ View for the static chapter service page. """ return render( request, "public/rush.html", _get_context("Rush"), ) def campaign(request): """ View for the campaign service page. """ # Overrride requests Session authentication handling class NoRebuildAuthSession(requests.Session): def rebuild_auth(self, prepared_request, response): """ No code here means requests will always preserve the Authorization header when redirected. Be careful not to leak your credentials to untrusted hosts! """ url = "https://api.givebutter.com/v1/transactions/" headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"} response = None # Create custom requests session session = NoRebuildAuthSession() # Make GET request to server, timeout in seconds try: r = session.get(url, headers=headers, timeout=0.75) if r.status_code == 200: response = r.json() else:
except requests.exceptions.Timeout: logger.warning("Connection to GiveButter API Timed out") except requests.ConnectionError: logger.warning("Connection to GiveButter API could not be resolved") except requests.exceptions.RequestException: logger.error( "An unknown issue occurred while trying to retrieve GiveButter Donor List" ) # Grab context object to use later ctx = _get_context("Campaign") # Check for successful response, if so - filter, sort, and format data if response and "data" in response: response = response["data"] # Pull data from GET response object logger.debug(f"GiveButter API Response: {response}") # Filter by only successful transactions, then sort by amount descending successful_txs = [tx for tx in response if tx["status"] == "succeeded"] sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True) # Clean data to a list of dictionaries & remove unnecessary data transactions = [ { "name": tx["giving_space"]["name"], "amount": tx["giving_space"]["amount"], "message": tx["giving_space"]["message"], } for tx in sorted_txs[:20] ] # Attach transaction dictionary & length to context object ctx["transactions"] = transactions ctx["num_txs"] = len(successful_txs) return render( request, "public/campaign.html", ctx, ) def permission_denied(request): """ View for 403 (Permission Denied) error. """ return render( request, "common/403.html", _get_context("Permission Denied"), ) def handler404(request, exception): """ """ return render(request, "common/404.html", _get_context("Page Not Found")) class ResetPassword(PasswordResetView): template_name = "password_reset/password_reset_form.html" class ResetPasswordDone(PasswordResetDoneView): template_name = "password_reset/password_reset_done.html" class ResetPasswordConfirm(PasswordResetConfirmView): template_name = "password_reset/password_reset_confirm.html" class ResetPasswordComplete(PasswordResetCompleteView): template_name = "password_reset/password_reset_complete.html"
logger.error(f"ERROR in request: {r.status_code}")
conditional_block
views.py
""" Views for PubSite app. """ from django.conf import settings from django.contrib.auth.views import ( PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView, ) from django.shortcuts import render import requests import logging logger = logging.getLogger(__name__) def _get_context(page_name): return { "pages": settings.PUBLIC_PAGES, "current_page_name": page_name, } # Regular index # def index(request): # """ # View for the static index page # """ # return render(request, 'public/home.html', _get_context('Home')) def index(request): """ View for the static index page """ return render(request, "public/home.html", _get_context("Home")) def about(request): """ View for the static chapter history page. """ return render(request, "public/about.html", _get_context("About")) def
(request): """ View for the static chapter service page. """ return render( request, "public/activities.html", _get_context("Service & Activities"), ) def rush(request): """ View for the static chapter service page. """ return render( request, "public/rush.html", _get_context("Rush"), ) def campaign(request): """ View for the campaign service page. """ # Overrride requests Session authentication handling class NoRebuildAuthSession(requests.Session): def rebuild_auth(self, prepared_request, response): """ No code here means requests will always preserve the Authorization header when redirected. Be careful not to leak your credentials to untrusted hosts! """ url = "https://api.givebutter.com/v1/transactions/" headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"} response = None # Create custom requests session session = NoRebuildAuthSession() # Make GET request to server, timeout in seconds try: r = session.get(url, headers=headers, timeout=0.75) if r.status_code == 200: response = r.json() else: logger.error(f"ERROR in request: {r.status_code}") except requests.exceptions.Timeout: logger.warning("Connection to GiveButter API Timed out") except requests.ConnectionError: logger.warning("Connection to GiveButter API could not be resolved") except requests.exceptions.RequestException: logger.error( "An unknown issue occurred while trying to retrieve GiveButter Donor List" ) # Grab context object to use later ctx = _get_context("Campaign") # Check for successful response, if so - filter, sort, and format data if response and "data" in response: response = response["data"] # Pull data from GET response object logger.debug(f"GiveButter API Response: {response}") # Filter by only successful transactions, then sort by amount descending successful_txs = [tx for tx in response if tx["status"] == "succeeded"] sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True) # Clean data to a list of dictionaries & remove unnecessary data transactions = [ { "name": tx["giving_space"]["name"], "amount": tx["giving_space"]["amount"], "message": tx["giving_space"]["message"], } for tx in sorted_txs[:20] ] # Attach transaction dictionary & length to context object ctx["transactions"] = transactions ctx["num_txs"] = len(successful_txs) return render( request, "public/campaign.html", ctx, ) def permission_denied(request): """ View for 403 (Permission Denied) error. """ return render( request, "common/403.html", _get_context("Permission Denied"), ) def handler404(request, exception): """ """ return render(request, "common/404.html", _get_context("Page Not Found")) class ResetPassword(PasswordResetView): template_name = "password_reset/password_reset_form.html" class ResetPasswordDone(PasswordResetDoneView): template_name = "password_reset/password_reset_done.html" class ResetPasswordConfirm(PasswordResetConfirmView): template_name = "password_reset/password_reset_confirm.html" class ResetPasswordComplete(PasswordResetCompleteView): template_name = "password_reset/password_reset_complete.html"
activities
identifier_name
views.py
""" Views for PubSite app. """ from django.conf import settings from django.contrib.auth.views import ( PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView, ) from django.shortcuts import render import requests import logging logger = logging.getLogger(__name__) def _get_context(page_name): return { "pages": settings.PUBLIC_PAGES, "current_page_name": page_name, } # Regular index # def index(request): # """ # View for the static index page # """ # return render(request, 'public/home.html', _get_context('Home')) def index(request): """ View for the static index page """ return render(request, "public/home.html", _get_context("Home")) def about(request): """ View for the static chapter history page. """ return render(request, "public/about.html", _get_context("About")) def activities(request): """ View for the static chapter service page. """ return render( request, "public/activities.html", _get_context("Service & Activities"), ) def rush(request): """ View for the static chapter service page. """ return render( request, "public/rush.html", _get_context("Rush"), ) def campaign(request): """ View for the campaign service page. """ # Overrride requests Session authentication handling class NoRebuildAuthSession(requests.Session): def rebuild_auth(self, prepared_request, response): """ No code here means requests will always preserve the Authorization header when redirected. Be careful not to leak your credentials to untrusted hosts! """ url = "https://api.givebutter.com/v1/transactions/" headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"} response = None # Create custom requests session session = NoRebuildAuthSession() # Make GET request to server, timeout in seconds try: r = session.get(url, headers=headers, timeout=0.75) if r.status_code == 200: response = r.json() else: logger.error(f"ERROR in request: {r.status_code}") except requests.exceptions.Timeout: logger.warning("Connection to GiveButter API Timed out") except requests.ConnectionError: logger.warning("Connection to GiveButter API could not be resolved") except requests.exceptions.RequestException: logger.error( "An unknown issue occurred while trying to retrieve GiveButter Donor List" ) # Grab context object to use later ctx = _get_context("Campaign") # Check for successful response, if so - filter, sort, and format data if response and "data" in response: response = response["data"] # Pull data from GET response object logger.debug(f"GiveButter API Response: {response}") # Filter by only successful transactions, then sort by amount descending successful_txs = [tx for tx in response if tx["status"] == "succeeded"] sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True) # Clean data to a list of dictionaries & remove unnecessary data transactions = [ { "name": tx["giving_space"]["name"], "amount": tx["giving_space"]["amount"], "message": tx["giving_space"]["message"], } for tx in sorted_txs[:20] ] # Attach transaction dictionary & length to context object ctx["transactions"] = transactions ctx["num_txs"] = len(successful_txs) return render( request, "public/campaign.html", ctx, ) def permission_denied(request): """ View for 403 (Permission Denied) error. """ return render( request, "common/403.html", _get_context("Permission Denied"), ) def handler404(request, exception): """ """ return render(request, "common/404.html", _get_context("Page Not Found")) class ResetPassword(PasswordResetView): template_name = "password_reset/password_reset_form.html" class ResetPasswordDone(PasswordResetDoneView):
class ResetPasswordConfirm(PasswordResetConfirmView): template_name = "password_reset/password_reset_confirm.html" class ResetPasswordComplete(PasswordResetCompleteView): template_name = "password_reset/password_reset_complete.html"
template_name = "password_reset/password_reset_done.html"
identifier_body
main.rs
// Task : Guess number game // Date : 10 Sept 2016 // Author : Vigneshwer extern crate rand; use std::io; use std::cmp::Ordering; use rand::Rng; fn main()
match guess.cmp(&secret_number) { Ordering::Less => println!("Too Small"), Ordering::Greater => println!("Too big"), Ordering::Equal => { println!("Win"); break; } } } }
{ println!("Guess the Number game!"); let secret_number = rand::thread_rng().gen_range(1,101); // println!("You secret number is {}", secret_number); loop { println!("Enter the number in your mind"); let mut guess = String::new(); io::stdin().read_line(&mut guess).expect("Failed to read line"); let guess: u32 =match guess.trim().parse() { Ok(num) => num, Err(_) => continue, }; print!("You guessed: {}",guess);
identifier_body
main.rs
// Task : Guess number game // Date : 10 Sept 2016 // Author : Vigneshwer extern crate rand; use std::io; use std::cmp::Ordering; use rand::Rng; fn
() { println!("Guess the Number game!"); let secret_number = rand::thread_rng().gen_range(1,101); // println!("You secret number is {}", secret_number); loop { println!("Enter the number in your mind"); let mut guess = String::new(); io::stdin().read_line(&mut guess).expect("Failed to read line"); let guess: u32 =match guess.trim().parse() { Ok(num) => num, Err(_) => continue, }; print!("You guessed: {}",guess); match guess.cmp(&secret_number) { Ordering::Less => println!("Too Small"), Ordering::Greater => println!("Too big"), Ordering::Equal => { println!("Win"); break; } } } }
main
identifier_name
main.rs
// Task : Guess number game
use std::io; use std::cmp::Ordering; use rand::Rng; fn main() { println!("Guess the Number game!"); let secret_number = rand::thread_rng().gen_range(1,101); // println!("You secret number is {}", secret_number); loop { println!("Enter the number in your mind"); let mut guess = String::new(); io::stdin().read_line(&mut guess).expect("Failed to read line"); let guess: u32 =match guess.trim().parse() { Ok(num) => num, Err(_) => continue, }; print!("You guessed: {}",guess); match guess.cmp(&secret_number) { Ordering::Less => println!("Too Small"), Ordering::Greater => println!("Too big"), Ordering::Equal => { println!("Win"); break; } } } }
// Date : 10 Sept 2016 // Author : Vigneshwer extern crate rand;
random_line_split
propeditor.py
from gui.component import InitSpec, StyleSpec, Spec, EventSpec, DimensionSpec from gui.font import Font DEBUG = False class PropertyEditorPanel(wx.Panel): def __init__( self, parent, log ): wx.Panel.__init__(self, parent, wx.ID_ANY) self.log = log self.callback = None self.panel = panel = wx.Panel(self, wx.ID_ANY) topsizer = wx.BoxSizer(wx.VERTICAL) # Difference between using PropertyGridManager vs PropertyGrid is that # the manager supports multiple pages and a description box. self.pg = pg = wxpg.PropertyGrid(panel, style=wxpg.PG_SPLITTER_AUTO_CENTER | wxpg.PG_AUTO_SORT | wxpg.PG_TOOLBAR) # Show help as tooltips pg.SetExtraStyle(wxpg.PG_EX_HELP_AS_TOOLTIPS) pg.Bind( wxpg.EVT_PG_CHANGED, self.OnPropGridChange ) pg.Bind( wxpg.EVT_PG_PAGE_CHANGED, self.OnPropGridPageChange ) pg.Bind( wxpg.EVT_PG_SELECTED, self.OnPropGridSelect ) pg.Bind( wxpg.EVT_PG_RIGHT_CLICK, self.OnPropGridRightClick ) ##pg.AddPage( "Page 1 - Testing All" ) # store the property grid for future reference self.pg = pg # load empty object (just draws categories) self.load_object(None) # sizing stuff: topsizer.Add(pg, 1, wx.EXPAND) panel.SetSizer(topsizer) topsizer.SetSizeHints(panel) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(panel, 1, wx.EXPAND) self.SetSizer(sizer) self.SetAutoLayout(True) def load_object(self, obj, callback=None): pg = self.pg # get the property grid reference self.callback = callback # store the update method # delete all properties pg.Clear() # clean references and aux structures appended = set() self.obj = obj self.groups = {} # loop on specs and append each property (categorized): for i, cat, class_ in ((1, 'Init Specs', InitSpec), (2, 'Dimension Specs', DimensionSpec), (3, 'Style Specs', StyleSpec), (5, 'Events', EventSpec), (4, 'Basic Specs', Spec), ): pg.Append(wxpg.PropertyCategory("%s - %s" % (i, cat))) if obj is None: continue specs = sorted(obj._meta.specs.items(), key=lambda it: it[0]) for name, spec in specs: if DEBUG: print "setting prop", spec, class_, spec.type if isinstance(spec, class_): prop = {'string': wxpg.StringProperty, 'integer': wxpg.IntProperty, 'float': wxpg.FloatProperty, 'boolean': wxpg.BoolProperty, 'text': wxpg.LongStringProperty, 'code': wxpg.LongStringProperty, 'enum': wxpg.EnumProperty, 'edit_enum': wxpg.EditEnumProperty, 'expr': wxpg.StringProperty, 'array': wxpg.ArrayStringProperty, 'font': wxpg.FontProperty, 'image_file': wxpg.ImageFileProperty, 'colour': wxpg.ColourProperty}.get(spec.type) if prop and name not in appended: value = getattr(obj, name) if DEBUG: print "name", name, value if spec.type == "code" and value is None: value = "" if spec.type == "boolean" and value is None: value = False if spec.type == "integer" and value is None: value = -1 if spec.type in ("string", "text") and value is None: value = "" if spec.type == "expr": value = repr(value) if spec.type == "font": if value is None: value = wx.NullFont else: value = value.get_wx_font() if callable(value): # event binded at runtime cannot be modified: value = str(value) readonly = True else: readonly = False if spec.type == "enum": prop = prop(name, name, spec.mapping.keys(), spec.mapping.values(), value=spec.mapping.get(value, 0)) elif spec.type == "edit_enum": prop = prop(name, name, spec.mapping.keys(), range(len(spec.mapping.values())), value=spec.mapping[value]) else: try: prop = prop(name, value=value) except Exception, e: print "CANNOT LOAD PROPERTY", name, value, e prop.SetPyClientData(spec) appended.add(name) if spec.group is None: pg.Append(prop) if readonly: pg.SetPropertyReadOnly(prop) else: # create a group hierachy (wxpg uses dot notation) group = "" prop_parent = None for grp in spec.group.split("."): prev_group = group # ancestor group += ("." if group else "") + grp # path if group in self.groups: prop_parent = self.groups[group] else: prop_group = wxpg.StringProperty(grp, value="<composed>") if not prop_parent: pg.Append(prop_group) else: pg.AppendIn(prev_group, prop_group) prop_parent = prop_group self.groups[group] = prop_parent pg.SetPropertyReadOnly(group) pg.AppendIn(spec.group, prop) pg.Collapse(spec.group) name = spec.group + "." + name if spec.type == "boolean": pg.SetPropertyAttribute(name, "UseCheckbox", True) doc = spec.__doc__ if doc: pg.SetPropertyHelpString(name, doc) def edit(self, name=""): "Programatically select a (default) property to start editing it" # for more info see DoSelectAndEdit in propgrid.cpp for name in (name, "label", "value", "text", "title", "filename", "name"): prop = self.pg.GetPropertyByName(name) if prop is not None: break self.Parent.SetFocus() self.Parent.Raise() self.pg.SetFocus() # give time to the ui to show the prop grid and set focus: wx.CallLater(250, self.select, prop.GetName()) def select(self, name, flags=0): "Select a property (and start the editor)" # do not call this directly from another window, use edit() instead # // wxPropertyGrid::DoSelectProperty flags (selFlags) -see propgrid.h- wxPG_SEL_FOCUS=0x0001 # Focuses to created editor wxPG_SEL_FORCE=0x0002 # Forces deletion and recreation of editor flags |= wxPG_SEL_FOCUS # | wxPG_SEL_FORCE prop = self.pg.GetPropertyByName(name) self.pg.SelectProperty(prop, flags) if DEBUG: print "selected!", prop def OnPropGridChange(self, event): p = event.GetProperty() if DEBUG: print "change!", p if p: name = p.GetName() spec = p.GetPyClientData() if spec and 'enum' in spec.type: value = p.GetValueAsString() else: value = p.GetValue() #self.log.write(u'%s changed to "%s"\n' % (p,p.GetValueAsString())) # if it a property child (parent.child), extract its name if "." in name: name = name[name.rindex(".") + 1:] if spec and not name in self.groups: if name == 'font': # TODO: detect property type # create a gui font from the wx.Font font = Font() font.set_wx_font(value) value = font # expressions must be evaluated to store the python object if spec.type == "expr": value = eval(value) # re-create the wx_object with the new property value # (this is required at least to apply new styles and init specs) if DEBUG: print "changed", self.obj.name kwargs = {str(name): value} wx.CallAfter(self.obj.rebuild, **kwargs) if name == 'name': wx.CallAfter(self.callback, **dict(name=self.obj.name)) def OnPropGridSelect(self, event):
def OnDeleteProperty(self, event): p = self.pg.GetSelectedProperty() if p: self.pg.DeleteProperty(p) else: wx.MessageBox("First select a property to delete") def OnReserved(self, event): pass def OnPropGridRightClick(self, event): p = event.GetProperty() if p: self.log.write(u'%s right clicked\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing right clicked\n') #self.obj.get_parent().Refresh() def OnPropGridPageChange(self, event): index = self.pg.GetSelectedPage() self.log.write('Page Changed to \'%
p = event.GetProperty() if p: self.log.write(u'%s selected\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing selected\n')
identifier_body
propeditor.py
from gui.component import InitSpec, StyleSpec, Spec, EventSpec, DimensionSpec from gui.font import Font DEBUG = False class PropertyEditorPanel(wx.Panel): def __init__( self, parent, log ): wx.Panel.__init__(self, parent, wx.ID_ANY) self.log = log self.callback = None self.panel = panel = wx.Panel(self, wx.ID_ANY) topsizer = wx.BoxSizer(wx.VERTICAL) # Difference between using PropertyGridManager vs PropertyGrid is that # the manager supports multiple pages and a description box. self.pg = pg = wxpg.PropertyGrid(panel, style=wxpg.PG_SPLITTER_AUTO_CENTER | wxpg.PG_AUTO_SORT | wxpg.PG_TOOLBAR) # Show help as tooltips pg.SetExtraStyle(wxpg.PG_EX_HELP_AS_TOOLTIPS) pg.Bind( wxpg.EVT_PG_CHANGED, self.OnPropGridChange ) pg.Bind( wxpg.EVT_PG_PAGE_CHANGED, self.OnPropGridPageChange ) pg.Bind( wxpg.EVT_PG_SELECTED, self.OnPropGridSelect ) pg.Bind( wxpg.EVT_PG_RIGHT_CLICK, self.OnPropGridRightClick ) ##pg.AddPage( "Page 1 - Testing All" ) # store the property grid for future reference self.pg = pg # load empty object (just draws categories) self.load_object(None) # sizing stuff: topsizer.Add(pg, 1, wx.EXPAND) panel.SetSizer(topsizer) topsizer.SetSizeHints(panel) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(panel, 1, wx.EXPAND) self.SetSizer(sizer) self.SetAutoLayout(True) def load_object(self, obj, callback=None): pg = self.pg # get the property grid reference self.callback = callback # store the update method # delete all properties pg.Clear() # clean references and aux structures appended = set() self.obj = obj self.groups = {} # loop on specs and append each property (categorized): for i, cat, class_ in ((1, 'Init Specs', InitSpec), (2, 'Dimension Specs', DimensionSpec), (3, 'Style Specs', StyleSpec), (5, 'Events', EventSpec), (4, 'Basic Specs', Spec), ): pg.Append(wxpg.PropertyCategory("%s - %s" % (i, cat))) if obj is None: continue specs = sorted(obj._meta.specs.items(), key=lambda it: it[0]) for name, spec in specs: if DEBUG: print "setting prop", spec, class_, spec.type if isinstance(spec, class_): prop = {'string': wxpg.StringProperty, 'integer': wxpg.IntProperty, 'float': wxpg.FloatProperty, 'boolean': wxpg.BoolProperty, 'text': wxpg.LongStringProperty, 'code': wxpg.LongStringProperty, 'enum': wxpg.EnumProperty, 'edit_enum': wxpg.EditEnumProperty, 'expr': wxpg.StringProperty, 'array': wxpg.ArrayStringProperty, 'font': wxpg.FontProperty, 'image_file': wxpg.ImageFileProperty, 'colour': wxpg.ColourProperty}.get(spec.type) if prop and name not in appended: value = getattr(obj, name) if DEBUG: print "name", name, value if spec.type == "code" and value is None: value = "" if spec.type == "boolean" and value is None: value = False if spec.type == "integer" and value is None: value = -1 if spec.type in ("string", "text") and value is None: value = "" if spec.type == "expr": value = repr(value) if spec.type == "font": if value is None: value = wx.NullFont else: value = value.get_wx_font() if callable(value): # event binded at runtime cannot be modified: value = str(value) readonly = True else: readonly = False if spec.type == "enum": prop = prop(name, name, spec.mapping.keys(), spec.mapping.values(), value=spec.mapping.get(value, 0)) elif spec.type == "edit_enum": prop = prop(name, name, spec.mapping.keys(), range(len(spec.mapping.values())), value=spec.mapping[value]) else: try: prop = prop(name, value=value) except Exception, e: print "CANNOT LOAD PROPERTY", name, value, e prop.SetPyClientData(spec) appended.add(name) if spec.group is None: pg.Append(prop) if readonly: pg.SetPropertyReadOnly(prop) else: # create a group hierachy (wxpg uses dot notation) group = "" prop_parent = None for grp in spec.group.split("."): prev_group = group # ancestor group += ("." if group else "") + grp # path if group in self.groups: prop_parent = self.groups[group] else: prop_group = wxpg.StringProperty(grp, value="<composed>") if not prop_parent: pg.Append(prop_group) else: pg.AppendIn(prev_group, prop_group) prop_parent = prop_group self.groups[group] = prop_parent pg.SetPropertyReadOnly(group) pg.AppendIn(spec.group, prop) pg.Collapse(spec.group) name = spec.group + "." + name if spec.type == "boolean": pg.SetPropertyAttribute(name, "UseCheckbox", True) doc = spec.__doc__ if doc: pg.SetPropertyHelpString(name, doc) def edit(self, name=""): "Programatically select a (default) property to start editing it" # for more info see DoSelectAndEdit in propgrid.cpp for name in (name, "label", "value", "text", "title", "filename", "name"): prop = self.pg.GetPropertyByName(name) if prop is not None: break self.Parent.SetFocus() self.Parent.Raise() self.pg.SetFocus() # give time to the ui to show the prop grid and set focus: wx.CallLater(250, self.select, prop.GetName()) def select(self, name, flags=0): "Select a property (and start the editor)" # do not call this directly from another window, use edit() instead # // wxPropertyGrid::DoSelectProperty flags (selFlags) -see propgrid.h- wxPG_SEL_FOCUS=0x0001 # Focuses to created editor wxPG_SEL_FORCE=0x0002 # Forces deletion and recreation of editor flags |= wxPG_SEL_FOCUS # | wxPG_SEL_FORCE prop = self.pg.GetPropertyByName(name) self.pg.SelectProperty(prop, flags) if DEBUG: print "selected!", prop def OnPropGridChange(self, event): p = event.GetProperty() if DEBUG: print "change!", p if p: name = p.GetName() spec = p.GetPyClientData() if spec and 'enum' in spec.type: value = p.GetValueAsString() else: value = p.GetValue() #self.log.write(u'%s changed to "%s"\n' % (p,p.GetValueAsString())) # if it a property child (parent.child), extract its name if "." in name: name = name[name.rindex(".") + 1:] if spec and not name in self.groups: if name == 'font': # TODO: detect property type # create a gui font from the wx.Font font = Font() font.set_wx_font(value) value = font # expressions must be evaluated to store the python object if spec.type == "expr": value = eval(value) # re-create the wx_object with the new property value # (this is required at least to apply new styles and init specs) if DEBUG: print "changed", self.obj.name kwargs = {str(name): value} wx.CallAfter(self.obj.rebuild, **kwargs) if name == 'name': wx.CallAfter(self.callback, **dict(name=self.obj.name)) def OnPropGridSelect(self, event): p = event.GetProperty() if p: self.log.write(u'%s selected\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing selected\n') def
(self, event): p = self.pg.GetSelectedProperty() if p: self.pg.DeleteProperty(p) else: wx.MessageBox("First select a property to delete") def OnReserved(self, event): pass def OnPropGridRightClick(self, event): p = event.GetProperty() if p: self.log.write(u'%s right clicked\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing right clicked\n') #self.obj.get_parent().Refresh() def OnPropGridPageChange(self, event): index = self.pg.GetSelectedPage() self.log.write('Page Changed to \'%
OnDeleteProperty
identifier_name
propeditor.py
.load_object(None) # sizing stuff: topsizer.Add(pg, 1, wx.EXPAND) panel.SetSizer(topsizer) topsizer.SetSizeHints(panel) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(panel, 1, wx.EXPAND) self.SetSizer(sizer) self.SetAutoLayout(True) def load_object(self, obj, callback=None): pg = self.pg # get the property grid reference self.callback = callback # store the update method # delete all properties pg.Clear() # clean references and aux structures appended = set() self.obj = obj self.groups = {} # loop on specs and append each property (categorized): for i, cat, class_ in ((1, 'Init Specs', InitSpec), (2, 'Dimension Specs', DimensionSpec), (3, 'Style Specs', StyleSpec), (5, 'Events', EventSpec), (4, 'Basic Specs', Spec), ): pg.Append(wxpg.PropertyCategory("%s - %s" % (i, cat))) if obj is None: continue specs = sorted(obj._meta.specs.items(), key=lambda it: it[0]) for name, spec in specs: if DEBUG: print "setting prop", spec, class_, spec.type if isinstance(spec, class_): prop = {'string': wxpg.StringProperty, 'integer': wxpg.IntProperty, 'float': wxpg.FloatProperty, 'boolean': wxpg.BoolProperty, 'text': wxpg.LongStringProperty, 'code': wxpg.LongStringProperty, 'enum': wxpg.EnumProperty, 'edit_enum': wxpg.EditEnumProperty, 'expr': wxpg.StringProperty, 'array': wxpg.ArrayStringProperty, 'font': wxpg.FontProperty, 'image_file': wxpg.ImageFileProperty, 'colour': wxpg.ColourProperty}.get(spec.type) if prop and name not in appended: value = getattr(obj, name) if DEBUG: print "name", name, value if spec.type == "code" and value is None: value = "" if spec.type == "boolean" and value is None: value = False if spec.type == "integer" and value is None: value = -1 if spec.type in ("string", "text") and value is None: value = "" if spec.type == "expr": value = repr(value) if spec.type == "font": if value is None: value = wx.NullFont else: value = value.get_wx_font() if callable(value): # event binded at runtime cannot be modified: value = str(value) readonly = True else: readonly = False if spec.type == "enum": prop = prop(name, name, spec.mapping.keys(), spec.mapping.values(), value=spec.mapping.get(value, 0)) elif spec.type == "edit_enum": prop = prop(name, name, spec.mapping.keys(), range(len(spec.mapping.values())), value=spec.mapping[value]) else: try: prop = prop(name, value=value) except Exception, e: print "CANNOT LOAD PROPERTY", name, value, e prop.SetPyClientData(spec) appended.add(name) if spec.group is None: pg.Append(prop) if readonly: pg.SetPropertyReadOnly(prop) else: # create a group hierachy (wxpg uses dot notation) group = "" prop_parent = None for grp in spec.group.split("."): prev_group = group # ancestor group += ("." if group else "") + grp # path if group in self.groups: prop_parent = self.groups[group] else: prop_group = wxpg.StringProperty(grp, value="<composed>") if not prop_parent: pg.Append(prop_group) else: pg.AppendIn(prev_group, prop_group) prop_parent = prop_group self.groups[group] = prop_parent pg.SetPropertyReadOnly(group) pg.AppendIn(spec.group, prop) pg.Collapse(spec.group) name = spec.group + "." + name if spec.type == "boolean": pg.SetPropertyAttribute(name, "UseCheckbox", True) doc = spec.__doc__ if doc: pg.SetPropertyHelpString(name, doc) def edit(self, name=""): "Programatically select a (default) property to start editing it" # for more info see DoSelectAndEdit in propgrid.cpp for name in (name, "label", "value", "text", "title", "filename", "name"): prop = self.pg.GetPropertyByName(name) if prop is not None: break self.Parent.SetFocus() self.Parent.Raise() self.pg.SetFocus() # give time to the ui to show the prop grid and set focus: wx.CallLater(250, self.select, prop.GetName()) def select(self, name, flags=0): "Select a property (and start the editor)" # do not call this directly from another window, use edit() instead # // wxPropertyGrid::DoSelectProperty flags (selFlags) -see propgrid.h- wxPG_SEL_FOCUS=0x0001 # Focuses to created editor wxPG_SEL_FORCE=0x0002 # Forces deletion and recreation of editor flags |= wxPG_SEL_FOCUS # | wxPG_SEL_FORCE prop = self.pg.GetPropertyByName(name) self.pg.SelectProperty(prop, flags) if DEBUG: print "selected!", prop def OnPropGridChange(self, event): p = event.GetProperty() if DEBUG: print "change!", p if p: name = p.GetName() spec = p.GetPyClientData() if spec and 'enum' in spec.type: value = p.GetValueAsString() else: value = p.GetValue() #self.log.write(u'%s changed to "%s"\n' % (p,p.GetValueAsString())) # if it a property child (parent.child), extract its name if "." in name: name = name[name.rindex(".") + 1:] if spec and not name in self.groups: if name == 'font': # TODO: detect property type # create a gui font from the wx.Font font = Font() font.set_wx_font(value) value = font # expressions must be evaluated to store the python object if spec.type == "expr": value = eval(value) # re-create the wx_object with the new property value # (this is required at least to apply new styles and init specs) if DEBUG: print "changed", self.obj.name kwargs = {str(name): value} wx.CallAfter(self.obj.rebuild, **kwargs) if name == 'name': wx.CallAfter(self.callback, **dict(name=self.obj.name)) def OnPropGridSelect(self, event): p = event.GetProperty() if p: self.log.write(u'%s selected\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing selected\n') def OnDeleteProperty(self, event): p = self.pg.GetSelectedProperty() if p: self.pg.DeleteProperty(p) else: wx.MessageBox("First select a property to delete") def OnReserved(self, event): pass def OnPropGridRightClick(self, event): p = event.GetProperty() if p: self.log.write(u'%s right clicked\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing right clicked\n') #self.obj.get_parent().Refresh() def OnPropGridPageChange(self, event): index = self.pg.GetSelectedPage() self.log.write('Page Changed to \'%s\'\n' % (self.pg.GetPageName(index))) if __name__ == '__main__': import sys,os app = wx.App() f = wx.Frame(None) from gui.controls import Button, Label, TextBox, CheckBox, ListBox, ComboBox frame = wx.Frame(None) #o = Button(frame, name="btnTest", label="click me!", default=True) #o = Label(frame, name="lblTest", alignment="right", size=(-1, 500), text="hello!") o = TextBox(frame, name="txtTest", border=False, text="hello world!") #o = CheckBox(frame, name="chkTest", border='none', label="Check me!") #o = ListBox(frame, name="lstTest", border='none', # items={'datum1': 'a', 'datum2':'b', 'datum3':'c'}, # multiselect="--multiselect" in sys.argv) #o = ComboBox(frame, name="cboTest", # items={'datum1': 'a', 'datum2':'b', 'datum3':'c'}, # readonly='--readonly' in sys.argv, # ) frame.Show() log = sys.stdout w = PropertyEditorPanel(f, log)
w.load_object(o) f.Show() app.MainLoop()
random_line_split
propeditor.py
from gui.component import InitSpec, StyleSpec, Spec, EventSpec, DimensionSpec from gui.font import Font DEBUG = False class PropertyEditorPanel(wx.Panel): def __init__( self, parent, log ): wx.Panel.__init__(self, parent, wx.ID_ANY) self.log = log self.callback = None self.panel = panel = wx.Panel(self, wx.ID_ANY) topsizer = wx.BoxSizer(wx.VERTICAL) # Difference between using PropertyGridManager vs PropertyGrid is that # the manager supports multiple pages and a description box. self.pg = pg = wxpg.PropertyGrid(panel, style=wxpg.PG_SPLITTER_AUTO_CENTER | wxpg.PG_AUTO_SORT | wxpg.PG_TOOLBAR) # Show help as tooltips pg.SetExtraStyle(wxpg.PG_EX_HELP_AS_TOOLTIPS) pg.Bind( wxpg.EVT_PG_CHANGED, self.OnPropGridChange ) pg.Bind( wxpg.EVT_PG_PAGE_CHANGED, self.OnPropGridPageChange ) pg.Bind( wxpg.EVT_PG_SELECTED, self.OnPropGridSelect ) pg.Bind( wxpg.EVT_PG_RIGHT_CLICK, self.OnPropGridRightClick ) ##pg.AddPage( "Page 1 - Testing All" ) # store the property grid for future reference self.pg = pg # load empty object (just draws categories) self.load_object(None) # sizing stuff: topsizer.Add(pg, 1, wx.EXPAND) panel.SetSizer(topsizer) topsizer.SetSizeHints(panel) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(panel, 1, wx.EXPAND) self.SetSizer(sizer) self.SetAutoLayout(True) def load_object(self, obj, callback=None): pg = self.pg # get the property grid reference self.callback = callback # store the update method # delete all properties pg.Clear() # clean references and aux structures appended = set() self.obj = obj self.groups = {} # loop on specs and append each property (categorized): for i, cat, class_ in ((1, 'Init Specs', InitSpec), (2, 'Dimension Specs', DimensionSpec), (3, 'Style Specs', StyleSpec), (5, 'Events', EventSpec), (4, 'Basic Specs', Spec), ): pg.Append(wxpg.PropertyCategory("%s - %s" % (i, cat))) if obj is None: continue specs = sorted(obj._meta.specs.items(), key=lambda it: it[0]) for name, spec in specs: if DEBUG: print "setting prop", spec, class_, spec.type if isinstance(spec, class_): prop = {'string': wxpg.StringProperty, 'integer': wxpg.IntProperty, 'float': wxpg.FloatProperty, 'boolean': wxpg.BoolProperty, 'text': wxpg.LongStringProperty, 'code': wxpg.LongStringProperty, 'enum': wxpg.EnumProperty, 'edit_enum': wxpg.EditEnumProperty, 'expr': wxpg.StringProperty, 'array': wxpg.ArrayStringProperty, 'font': wxpg.FontProperty, 'image_file': wxpg.ImageFileProperty, 'colour': wxpg.ColourProperty}.get(spec.type) if prop and name not in appended: value = getattr(obj, name) if DEBUG: print "name", name, value if spec.type == "code" and value is None: value = "" if spec.type == "boolean" and value is None: value = False if spec.type == "integer" and value is None: value = -1 if spec.type in ("string", "text") and value is None: value = "" if spec.type == "expr": value = repr(value) if spec.type == "font": if value is None: value = wx.NullFont else: value = value.get_wx_font() if callable(value): # event binded at runtime cannot be modified: value = str(value) readonly = True else: readonly = False if spec.type == "enum": prop = prop(name, name, spec.mapping.keys(), spec.mapping.values(), value=spec.mapping.get(value, 0)) elif spec.type == "edit_enum": prop = prop(name, name, spec.mapping.keys(), range(len(spec.mapping.values())), value=spec.mapping[value]) else: try: prop = prop(name, value=value) except Exception, e: print "CANNOT LOAD PROPERTY", name, value, e prop.SetPyClientData(spec) appended.add(name) if spec.group is None: pg.Append(prop) if readonly: pg.SetPropertyReadOnly(prop) else: # create a group hierachy (wxpg uses dot notation) group = "" prop_parent = None for grp in spec.group.split("."): prev_group = group # ancestor group += ("." if group else "") + grp # path if group in self.groups: prop_parent = self.groups[group] else: prop_group = wxpg.StringProperty(grp, value="<composed>") if not prop_parent: pg.Append(prop_group) else: pg.AppendIn(prev_group, prop_group) prop_parent = prop_group self.groups[group] = prop_parent pg.SetPropertyReadOnly(group) pg.AppendIn(spec.group, prop) pg.Collapse(spec.group) name = spec.group + "." + name if spec.type == "boolean": pg.SetPropertyAttribute(name, "UseCheckbox", True) doc = spec.__doc__ if doc: pg.SetPropertyHelpString(name, doc) def edit(self, name=""): "Programatically select a (default) property to start editing it" # for more info see DoSelectAndEdit in propgrid.cpp for name in (name, "label", "value", "text", "title", "filename", "name"): prop = self.pg.GetPropertyByName(name) if prop is not None: break self.Parent.SetFocus() self.Parent.Raise() self.pg.SetFocus() # give time to the ui to show the prop grid and set focus: wx.CallLater(250, self.select, prop.GetName()) def select(self, name, flags=0): "Select a property (and start the editor)" # do not call this directly from another window, use edit() instead # // wxPropertyGrid::DoSelectProperty flags (selFlags) -see propgrid.h- wxPG_SEL_FOCUS=0x0001 # Focuses to created editor wxPG_SEL_FORCE=0x0002 # Forces deletion and recreation of editor flags |= wxPG_SEL_FOCUS # | wxPG_SEL_FORCE prop = self.pg.GetPropertyByName(name) self.pg.SelectProperty(prop, flags) if DEBUG: print "selected!", prop def OnPropGridChange(self, event): p = event.GetProperty() if DEBUG:
if p: name = p.GetName() spec = p.GetPyClientData() if spec and 'enum' in spec.type: value = p.GetValueAsString() else: value = p.GetValue() #self.log.write(u'%s changed to "%s"\n' % (p,p.GetValueAsString())) # if it a property child (parent.child), extract its name if "." in name: name = name[name.rindex(".") + 1:] if spec and not name in self.groups: if name == 'font': # TODO: detect property type # create a gui font from the wx.Font font = Font() font.set_wx_font(value) value = font # expressions must be evaluated to store the python object if spec.type == "expr": value = eval(value) # re-create the wx_object with the new property value # (this is required at least to apply new styles and init specs) if DEBUG: print "changed", self.obj.name kwargs = {str(name): value} wx.CallAfter(self.obj.rebuild, **kwargs) if name == 'name': wx.CallAfter(self.callback, **dict(name=self.obj.name)) def OnPropGridSelect(self, event): p = event.GetProperty() if p: self.log.write(u'%s selected\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing selected\n') def OnDeleteProperty(self, event): p = self.pg.GetSelectedProperty() if p: self.pg.DeleteProperty(p) else: wx.MessageBox("First select a property to delete") def OnReserved(self, event): pass def OnPropGridRightClick(self, event): p = event.GetProperty() if p: self.log.write(u'%s right clicked\n' % (event.GetProperty().GetName())) else: self.log.write(u'Nothing right clicked\n') #self.obj.get_parent().Refresh() def OnPropGridPageChange(self, event): index = self.pg.GetSelectedPage() self.log.write('Page Changed to \'%
print "change!", p
conditional_block
updateWeight.js
/** * @file js for changing weights of terms with Up and Down arrows */ (function ($) { //object to store weights (tid => weight) var termWeightsData = new Object(); Drupal.behaviors.TaxonomyManagerWeights = { attach: function(context, settings) { var weightSettings = settings.updateWeight || []; if (!$('#edit-toolbar.tm-weights-processed').length) { $('#edit-toolbar').addClass('tm-weights-processed'); termWeightsData['form_token'] = $('input[name=form_token]').val(); termWeightsData['form_id'] = $('input[name=form_id]').val(); termWeightsData['weights'] = new Object(); Drupal.attachUpdateWeightToolbar(weightSettings['up'], weightSettings['down']); Drupal.attachUpdateWeightTerms(); } } } /** * adds click events for Up and Down buttons in the toolbar, which * allow the moving of selected (can be more) terms */ Drupal.attachUpdateWeightToolbar = function(upButton, downButton) { var selected; var url = Drupal.settings.updateWeight['url']; $('#'+ upButton).click(function() { selected = Drupal.getSelectedTerms(); for (var i=0; i < selected.length; i++) { var upTerm = selected[i]; var downTerm = $(upTerm).prev(); Drupal.orderTerms(upTerm, downTerm); } if (selected.length > 0) { $.post(url, termWeightsData); } }); $('#'+ downButton).click(function() { selected = Drupal.getSelectedTerms(); for (var i=selected.length-1; i >= 0; i--) { var downTerm = selected[i]; var upTerm = $(downTerm).next(); Drupal.orderTerms(upTerm, downTerm); } if (selected.length > 0) { $.post(url, termWeightsData); } }); } /** * adds small up and down arrows to each term * arrows get displayed on mouseover */ Drupal.attachUpdateWeightTerms = function(parent, currentIndex) { var settings = Drupal.settings.updateWeight || []; var disable = settings['disable_mouseover']; if (!disable) { var url = Drupal.settings.updateWeight['url']; var termLineClass = 'div.term-line'; var termUpClass = 'img.term-up'; var termDownClass = 'img.term-down'; if (parent && currentIndex) { parent = $(parent).slice(currentIndex); } if (parent) { termLineClass = $(parent).find(termLineClass); termUpClass = $(parent).find(termUpClass); termDownClass = $(parent).find(termDownClass); } $(termLineClass).mouseover(function() { $(this).find('div.term-operations').show(); }); $(termLineClass).mouseout(function() { $(this).find('div.term-operations').hide(); }); $(termUpClass).click(function() { var upTerm = $(this).parents("li").eq(0); var downTerm = $(upTerm).prev(); Drupal.orderTerms(upTerm, downTerm); $.post(url, termWeightsData); $(downTerm).find(termLineClass).unbind('mouseover'); setTimeout(function() { $(upTerm).find('div.term-operations').hide(); $(downTerm).find(termLineClass).mouseover(function() { $(this).find('div.term-operations').show(); }); }, 1500); }); $(termDownClass).click(function() { var downTerm = $(this).parents("li").eq(0); var upTerm = $(downTerm).next(); Drupal.orderTerms(upTerm, downTerm); $.post(url, termWeightsData); $(upTerm).find(termLineClass).unbind('mouseover'); setTimeout(function() { $(downTerm).find('div.term-operations').hide(); $(upTerm).find(termLineClass).mouseover(function() { $(this).find('div.term-operations').show(); }); }, 1500); }); }
* return array of selected terms */ Drupal.getSelectedTerms = function() { var terms = new Array(); $('.treeview').find("input:checked").each(function() { var term = $(this).parents("li").eq(0); terms.push(term); }); return terms; } /** * reorders terms * - swap list elements in DOM * - post updated weights to callback in php * - update classes of tree view */ Drupal.orderTerms = function(upTerm, downTerm) { try { Drupal.getTermId(upTerm); Drupal.swapTerms(upTerm, downTerm); Drupal.swapWeights(upTerm, downTerm); Drupal.updateTree(upTerm, downTerm); } catch(e) { //no next item, because term to update is last child, continue } } /** * simple swap of two elements */ Drupal.swapTerms = function(upTerm, downTerm) { $(upTerm).after(downTerm); $(downTerm).before(upTerm); } /** * updating weights of swaped terms * if two terms have different weights, then weights are being swapped * else, if both have same weights, upTerm gets decreased * * if prev/next siblings of up/down terms have same weights as current * swapped, they have to be updated by de/increasing weight (by 1) to ensure * unique position of swapped terms */ Drupal.swapWeights = function(upTerm, downTerm) { var upWeight = Drupal.getWeight(upTerm); var downWeight = Drupal.getWeight(downTerm); var downTid = Drupal.getTermId(downTerm); var upTid = Drupal.getTermId(upTerm); //same weight, decrease upTerm if (upWeight == downWeight) { termWeightsData['weights'][upTid] = --upWeight; } //different weights, swap else { termWeightsData['weights'][upTid] = downWeight; termWeightsData['weights'][downTid] = upWeight; } //update prev siblings if necessary try { if (Drupal.getWeight($(upTerm).prev()) >= upWeight) { $(upTerm).prevAll().each(function() { var id = Drupal.getTermId(this); var weight = Drupal.getWeight(this); termWeightsData['weights'][id] = --weight; }); } } catch(e) { //no prev } //update next siblings if necessary try { if (Drupal.getWeight($(downTerm).next()) <= downWeight) { $(downTerm).nextAll().each(function() { var id = Drupal.getTermId(this); var weight = Drupal.getWeight(this); termWeightsData['weights'][id] = ++weight; }); } } catch(e) { //no next } } /** * helper to return weight of a term */ Drupal.getWeight = function(li) { var id = Drupal.getTermId(li); var weight; if (termWeightsData['weights'][id] != null) { weight = termWeightsData['weights'][id]; } else { weight = $(li).find("input:hidden[class=weight-form]").attr("value"); } return weight; } })(jQuery);
} /**
random_line_split
updateWeight.js
/** * @file js for changing weights of terms with Up and Down arrows */ (function ($) { //object to store weights (tid => weight) var termWeightsData = new Object(); Drupal.behaviors.TaxonomyManagerWeights = { attach: function(context, settings) { var weightSettings = settings.updateWeight || []; if (!$('#edit-toolbar.tm-weights-processed').length) { $('#edit-toolbar').addClass('tm-weights-processed'); termWeightsData['form_token'] = $('input[name=form_token]').val(); termWeightsData['form_id'] = $('input[name=form_id]').val(); termWeightsData['weights'] = new Object(); Drupal.attachUpdateWeightToolbar(weightSettings['up'], weightSettings['down']); Drupal.attachUpdateWeightTerms(); } } } /** * adds click events for Up and Down buttons in the toolbar, which * allow the moving of selected (can be more) terms */ Drupal.attachUpdateWeightToolbar = function(upButton, downButton) { var selected; var url = Drupal.settings.updateWeight['url']; $('#'+ upButton).click(function() { selected = Drupal.getSelectedTerms(); for (var i=0; i < selected.length; i++) { var upTerm = selected[i]; var downTerm = $(upTerm).prev(); Drupal.orderTerms(upTerm, downTerm); } if (selected.length > 0) { $.post(url, termWeightsData); } }); $('#'+ downButton).click(function() { selected = Drupal.getSelectedTerms(); for (var i=selected.length-1; i >= 0; i--) { var downTerm = selected[i]; var upTerm = $(downTerm).next(); Drupal.orderTerms(upTerm, downTerm); } if (selected.length > 0) { $.post(url, termWeightsData); } }); } /** * adds small up and down arrows to each term * arrows get displayed on mouseover */ Drupal.attachUpdateWeightTerms = function(parent, currentIndex) { var settings = Drupal.settings.updateWeight || []; var disable = settings['disable_mouseover']; if (!disable) { var url = Drupal.settings.updateWeight['url']; var termLineClass = 'div.term-line'; var termUpClass = 'img.term-up'; var termDownClass = 'img.term-down'; if (parent && currentIndex) { parent = $(parent).slice(currentIndex); } if (parent) { termLineClass = $(parent).find(termLineClass); termUpClass = $(parent).find(termUpClass); termDownClass = $(parent).find(termDownClass); } $(termLineClass).mouseover(function() { $(this).find('div.term-operations').show(); }); $(termLineClass).mouseout(function() { $(this).find('div.term-operations').hide(); }); $(termUpClass).click(function() { var upTerm = $(this).parents("li").eq(0); var downTerm = $(upTerm).prev(); Drupal.orderTerms(upTerm, downTerm); $.post(url, termWeightsData); $(downTerm).find(termLineClass).unbind('mouseover'); setTimeout(function() { $(upTerm).find('div.term-operations').hide(); $(downTerm).find(termLineClass).mouseover(function() { $(this).find('div.term-operations').show(); }); }, 1500); }); $(termDownClass).click(function() { var downTerm = $(this).parents("li").eq(0); var upTerm = $(downTerm).next(); Drupal.orderTerms(upTerm, downTerm); $.post(url, termWeightsData); $(upTerm).find(termLineClass).unbind('mouseover'); setTimeout(function() { $(downTerm).find('div.term-operations').hide(); $(upTerm).find(termLineClass).mouseover(function() { $(this).find('div.term-operations').show(); }); }, 1500); }); } } /** * return array of selected terms */ Drupal.getSelectedTerms = function() { var terms = new Array(); $('.treeview').find("input:checked").each(function() { var term = $(this).parents("li").eq(0); terms.push(term); }); return terms; } /** * reorders terms * - swap list elements in DOM * - post updated weights to callback in php * - update classes of tree view */ Drupal.orderTerms = function(upTerm, downTerm) { try { Drupal.getTermId(upTerm); Drupal.swapTerms(upTerm, downTerm); Drupal.swapWeights(upTerm, downTerm); Drupal.updateTree(upTerm, downTerm); } catch(e) { //no next item, because term to update is last child, continue } } /** * simple swap of two elements */ Drupal.swapTerms = function(upTerm, downTerm) { $(upTerm).after(downTerm); $(downTerm).before(upTerm); } /** * updating weights of swaped terms * if two terms have different weights, then weights are being swapped * else, if both have same weights, upTerm gets decreased * * if prev/next siblings of up/down terms have same weights as current * swapped, they have to be updated by de/increasing weight (by 1) to ensure * unique position of swapped terms */ Drupal.swapWeights = function(upTerm, downTerm) { var upWeight = Drupal.getWeight(upTerm); var downWeight = Drupal.getWeight(downTerm); var downTid = Drupal.getTermId(downTerm); var upTid = Drupal.getTermId(upTerm); //same weight, decrease upTerm if (upWeight == downWeight) { termWeightsData['weights'][upTid] = --upWeight; } //different weights, swap else { termWeightsData['weights'][upTid] = downWeight; termWeightsData['weights'][downTid] = upWeight; } //update prev siblings if necessary try { if (Drupal.getWeight($(upTerm).prev()) >= upWeight) { $(upTerm).prevAll().each(function() { var id = Drupal.getTermId(this); var weight = Drupal.getWeight(this); termWeightsData['weights'][id] = --weight; }); } } catch(e) { //no prev } //update next siblings if necessary try { if (Drupal.getWeight($(downTerm).next()) <= downWeight) { $(downTerm).nextAll().each(function() { var id = Drupal.getTermId(this); var weight = Drupal.getWeight(this); termWeightsData['weights'][id] = ++weight; }); } } catch(e) { //no next } } /** * helper to return weight of a term */ Drupal.getWeight = function(li) { var id = Drupal.getTermId(li); var weight; if (termWeightsData['weights'][id] != null) { weight = termWeightsData['weights'][id]; } else
return weight; } })(jQuery);
{ weight = $(li).find("input:hidden[class=weight-form]").attr("value"); }
conditional_block
text_run.rs
, } } } impl TextRunTemplate { /// Update the GPU cache for a given primitive template. This may be called multiple /// times per frame, by each primitive reference that refers to this interned /// template. The initial request call to the GPU cache ensures that work is only /// done if the cache entry is invalid (due to first use or eviction). pub fn update( &mut self, frame_state: &mut FrameBuildingState, ) { self.write_prim_gpu_blocks(frame_state); self.opacity = PrimitiveOpacity::translucent(); } fn write_prim_gpu_blocks( &mut self, frame_state: &mut FrameBuildingState, ) { // corresponds to `fetch_glyph` in the shaders if let Some(mut request) = frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle)
// Ensure the last block is added in the case // of an odd number of glyphs. if (self.glyphs.len() & 1) != 0 { request.push(gpu_block); } assert!(request.current_used_block_num() <= MAX_VERTEX_TEXTURE_WIDTH); } } } pub type TextRunDataHandle = intern::Handle<TextRun>; #[derive(Debug, MallocSizeOf)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TextRun { pub font: FontInstance, #[ignore_malloc_size_of = "Measured via PrimaryArc"] pub glyphs: Arc<Vec<GlyphInstance>>, pub shadow: bool, pub requested_raster_space: RasterSpace, } impl intern::Internable for TextRun { type Key = TextRunKey; type StoreData = TextRunTemplate; type InternData = (); const PROFILE_COUNTER: usize = crate::profiler::INTERNED_TEXT_RUNS; } impl InternablePrimitive for TextRun { fn into_key( self, info: &LayoutPrimitiveInfo, ) -> TextRunKey { TextRunKey::new( info, self, ) } fn make_instance_kind( key: TextRunKey, data_handle: TextRunDataHandle, prim_store: &mut PrimitiveStore, reference_frame_relative_offset: LayoutVector2D, ) -> PrimitiveInstanceKind { let run_index = prim_store.text_runs.push(TextRunPrimitive { used_font: key.font.clone(), glyph_keys_range: storage::Range::empty(), reference_frame_relative_offset, snapped_reference_frame_relative_offset: reference_frame_relative_offset, shadow: key.shadow, raster_scale: 1.0, requested_raster_space: key.requested_raster_space, }); PrimitiveInstanceKind::TextRun{ data_handle, run_index } } } impl CreateShadow for TextRun { fn create_shadow( &self, shadow: &Shadow, blur_is_noop: bool, current_raster_space: RasterSpace, ) -> Self { let mut font = FontInstance { color: shadow.color.into(), ..self.font.clone() }; if shadow.blur_radius > 0.0 { font.disable_subpixel_aa(); } let requested_raster_space = if blur_is_noop { current_raster_space } else { RasterSpace::Local(1.0) }; TextRun { font, glyphs: self.glyphs.clone(), shadow: true, requested_raster_space, } } } impl IsVisible for TextRun { fn is_visible(&self) -> bool { self.font.color.a > 0 } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct TextRunPrimitive { pub used_font: FontInstance, pub glyph_keys_range: storage::Range<GlyphKey>, pub reference_frame_relative_offset: LayoutVector2D, pub snapped_reference_frame_relative_offset: LayoutVector2D, pub shadow: bool, pub raster_scale: f32, pub requested_raster_space: RasterSpace, } impl TextRunPrimitive { pub fn update_font_instance( &mut self, specified_font: &FontInstance, surface: &SurfaceInfo, spatial_node_index: SpatialNodeIndex, transform: &LayoutToWorldTransform, mut allow_subpixel: bool, raster_space: RasterSpace, spatial_tree: &SpatialTree, ) -> bool { // If local raster space is specified, include that in the scale // of the glyphs that get rasterized. // TODO(gw): Once we support proper local space raster modes, this // will implicitly be part of the device pixel ratio for // the (cached) local space surface, and so this code // will no longer be required. let raster_scale = raster_space.local_scale().unwrap_or(1.0).max(0.001); let dps = surface.device_pixel_scale.0; let font_size = specified_font.size.to_f32_px(); // Small floating point error can accumulate in the raster * device_pixel scale. // Round that to the nearest 100th of a scale factor to remove this error while // still allowing reasonably accurate scale factors when a pinch-zoom is stopped // at a fractional amount. let quantized_scale = (dps * raster_scale * 100.0).round() / 100.0; let mut device_font_size = font_size * quantized_scale; // Check there is a valid transform that doesn't exceed the font size limit. // Ensure the font is supposed to be rasterized in screen-space. // Only support transforms that can be coerced to simple 2D transforms. // Add texture padding to the rasterized glyph buffer when one anticipates // the glyph will need to be scaled when rendered. let (use_subpixel_aa, transform_glyphs, texture_padding, oversized) = if raster_space != RasterSpace::Screen || transform.has_perspective_component() || !transform.has_2d_inverse() { (false, false, true, device_font_size > FONT_SIZE_LIMIT) } else if transform.exceeds_2d_scale((FONT_SIZE_LIMIT / device_font_size) as f64) { (false, false, true, true) } else { (true, !transform.is_simple_2d_translation(), false, false) }; let font_transform = if transform_glyphs { // Get the font transform matrix (skew / scale) from the complete transform. // Fold in the device pixel scale. self.raster_scale = 1.0; FontTransform::from(transform) } else { if oversized { // Font sizes larger than the limit need to be scaled, thus can't use subpixels. // In this case we adjust the font size and raster space to ensure // we rasterize at the limit, to minimize the amount of scaling. let limited_raster_scale = FONT_SIZE_LIMIT / (font_size * dps); device_font_size = FONT_SIZE_LIMIT; // Record the raster space the text needs to be snapped in. The original raster // scale would have been too big. self.raster_scale = limited_raster_scale; } else { // Record the raster space the text needs to be snapped in. We may have changed // from RasterSpace::Screen due to a transform with perspective or without a 2d // inverse, or it may have been RasterSpace::Local all along. self.raster_scale = raster_scale; } // Rasterize the glyph without any transform FontTransform::identity() }; // TODO(aosmond): Snapping really ought to happen during scene building // as much as possible. This will allow clips to be already adjusted // based on the snapping requirements of the primitive. This may affect // complex clips that create a different task, and when we rasterize // glyphs without the transform (because the shader doesn't have the // snap offsets to adjust its clip). These rects are fairly conservative // to begin with and do not appear to be causing significant issues at // this time. self.snapped_reference_frame_relative_offset = if transform_glyphs { // Don't touch the reference frame relative offset. We'll let the // shader do the snapping in device pixels. self.reference_frame_relative_offset } else { // TODO(dp): The SurfaceInfo struct needs to be updated to use RasterPixelScale // rather than DevicePixelScale, however this is a large chunk of // work that
{ request.push(ColorF::from(self.font.color).premultiplied()); // this is the only case where we need to provide plain color to GPU let bg_color = ColorF::from(self.font.bg_color); request.push([bg_color.r, bg_color.g, bg_color.b, 1.0]); let mut gpu_block = [0.0; 4]; for (i, src) in self.glyphs.iter().enumerate() { // Two glyphs are packed per GPU block. if (i & 1) == 0 { gpu_block[0] = src.point.x; gpu_block[1] = src.point.y; } else { gpu_block[2] = src.point.x; gpu_block[3] = src.point.y; request.push(gpu_block); } }
conditional_block
text_run.rs
, } } } impl TextRunTemplate { /// Update the GPU cache for a given primitive template. This may be called multiple /// times per frame, by each primitive reference that refers to this interned /// template. The initial request call to the GPU cache ensures that work is only /// done if the cache entry is invalid (due to first use or eviction). pub fn update( &mut self, frame_state: &mut FrameBuildingState, ) { self.write_prim_gpu_blocks(frame_state); self.opacity = PrimitiveOpacity::translucent(); } fn write_prim_gpu_blocks( &mut self,
request.push(ColorF::from(self.font.color).premultiplied()); // this is the only case where we need to provide plain color to GPU let bg_color = ColorF::from(self.font.bg_color); request.push([bg_color.r, bg_color.g, bg_color.b, 1.0]); let mut gpu_block = [0.0; 4]; for (i, src) in self.glyphs.iter().enumerate() { // Two glyphs are packed per GPU block. if (i & 1) == 0 { gpu_block[0] = src.point.x; gpu_block[1] = src.point.y; } else { gpu_block[2] = src.point.x; gpu_block[3] = src.point.y; request.push(gpu_block); } } // Ensure the last block is added in the case // of an odd number of glyphs. if (self.glyphs.len() & 1) != 0 { request.push(gpu_block); } assert!(request.current_used_block_num() <= MAX_VERTEX_TEXTURE_WIDTH); } } } pub type TextRunDataHandle = intern::Handle<TextRun>; #[derive(Debug, MallocSizeOf)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TextRun { pub font: FontInstance, #[ignore_malloc_size_of = "Measured via PrimaryArc"] pub glyphs: Arc<Vec<GlyphInstance>>, pub shadow: bool, pub requested_raster_space: RasterSpace, } impl intern::Internable for TextRun { type Key = TextRunKey; type StoreData = TextRunTemplate; type InternData = (); const PROFILE_COUNTER: usize = crate::profiler::INTERNED_TEXT_RUNS; } impl InternablePrimitive for TextRun { fn into_key( self, info: &LayoutPrimitiveInfo, ) -> TextRunKey { TextRunKey::new( info, self, ) } fn make_instance_kind( key: TextRunKey, data_handle: TextRunDataHandle, prim_store: &mut PrimitiveStore, reference_frame_relative_offset: LayoutVector2D, ) -> PrimitiveInstanceKind { let run_index = prim_store.text_runs.push(TextRunPrimitive { used_font: key.font.clone(), glyph_keys_range: storage::Range::empty(), reference_frame_relative_offset, snapped_reference_frame_relative_offset: reference_frame_relative_offset, shadow: key.shadow, raster_scale: 1.0, requested_raster_space: key.requested_raster_space, }); PrimitiveInstanceKind::TextRun{ data_handle, run_index } } } impl CreateShadow for TextRun { fn create_shadow( &self, shadow: &Shadow, blur_is_noop: bool, current_raster_space: RasterSpace, ) -> Self { let mut font = FontInstance { color: shadow.color.into(), ..self.font.clone() }; if shadow.blur_radius > 0.0 { font.disable_subpixel_aa(); } let requested_raster_space = if blur_is_noop { current_raster_space } else { RasterSpace::Local(1.0) }; TextRun { font, glyphs: self.glyphs.clone(), shadow: true, requested_raster_space, } } } impl IsVisible for TextRun { fn is_visible(&self) -> bool { self.font.color.a > 0 } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct TextRunPrimitive { pub used_font: FontInstance, pub glyph_keys_range: storage::Range<GlyphKey>, pub reference_frame_relative_offset: LayoutVector2D, pub snapped_reference_frame_relative_offset: LayoutVector2D, pub shadow: bool, pub raster_scale: f32, pub requested_raster_space: RasterSpace, } impl TextRunPrimitive { pub fn update_font_instance( &mut self, specified_font: &FontInstance, surface: &SurfaceInfo, spatial_node_index: SpatialNodeIndex, transform: &LayoutToWorldTransform, mut allow_subpixel: bool, raster_space: RasterSpace, spatial_tree: &SpatialTree, ) -> bool { // If local raster space is specified, include that in the scale // of the glyphs that get rasterized. // TODO(gw): Once we support proper local space raster modes, this // will implicitly be part of the device pixel ratio for // the (cached) local space surface, and so this code // will no longer be required. let raster_scale = raster_space.local_scale().unwrap_or(1.0).max(0.001); let dps = surface.device_pixel_scale.0; let font_size = specified_font.size.to_f32_px(); // Small floating point error can accumulate in the raster * device_pixel scale. // Round that to the nearest 100th of a scale factor to remove this error while // still allowing reasonably accurate scale factors when a pinch-zoom is stopped // at a fractional amount. let quantized_scale = (dps * raster_scale * 100.0).round() / 100.0; let mut device_font_size = font_size * quantized_scale; // Check there is a valid transform that doesn't exceed the font size limit. // Ensure the font is supposed to be rasterized in screen-space. // Only support transforms that can be coerced to simple 2D transforms. // Add texture padding to the rasterized glyph buffer when one anticipates // the glyph will need to be scaled when rendered. let (use_subpixel_aa, transform_glyphs, texture_padding, oversized) = if raster_space != RasterSpace::Screen || transform.has_perspective_component() || !transform.has_2d_inverse() { (false, false, true, device_font_size > FONT_SIZE_LIMIT) } else if transform.exceeds_2d_scale((FONT_SIZE_LIMIT / device_font_size) as f64) { (false, false, true, true) } else { (true, !transform.is_simple_2d_translation(), false, false) }; let font_transform = if transform_glyphs { // Get the font transform matrix (skew / scale) from the complete transform. // Fold in the device pixel scale. self.raster_scale = 1.0; FontTransform::from(transform) } else { if oversized { // Font sizes larger than the limit need to be scaled, thus can't use subpixels. // In this case we adjust the font size and raster space to ensure // we rasterize at the limit, to minimize the amount of scaling. let limited_raster_scale = FONT_SIZE_LIMIT / (font_size * dps); device_font_size = FONT_SIZE_LIMIT; // Record the raster space the text needs to be snapped in. The original raster // scale would have been too big. self.raster_scale = limited_raster_scale; } else { // Record the raster space the text needs to be snapped in. We may have changed // from RasterSpace::Screen due to a transform with perspective or without a 2d // inverse, or it may have been RasterSpace::Local all along. self.raster_scale = raster_scale; } // Rasterize the glyph without any transform FontTransform::identity() }; // TODO(aosmond): Snapping really ought to happen during scene building // as much as possible. This will allow clips to be already adjusted // based on the snapping requirements of the primitive. This may affect // complex clips that create a different task, and when we rasterize // glyphs without the transform (because the shader doesn't have the // snap offsets to adjust its clip). These rects are fairly conservative // to begin with and do not appear to be causing significant issues at // this time. self.snapped_reference_frame_relative_offset = if transform_glyphs { // Don't touch the reference frame relative offset. We'll let the // shader do the snapping in device pixels. self.reference_frame_relative_offset } else { // TODO(dp): The SurfaceInfo struct needs to be updated to use RasterPixelScale // rather than DevicePixelScale, however this is a large chunk of // work that will
frame_state: &mut FrameBuildingState, ) { // corresponds to `fetch_glyph` in the shaders if let Some(mut request) = frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
random_line_split
text_run.rs
0.0 { font.disable_subpixel_aa(); } let requested_raster_space = if blur_is_noop { current_raster_space } else { RasterSpace::Local(1.0) }; TextRun { font, glyphs: self.glyphs.clone(), shadow: true, requested_raster_space, } } } impl IsVisible for TextRun { fn is_visible(&self) -> bool { self.font.color.a > 0 } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct TextRunPrimitive { pub used_font: FontInstance, pub glyph_keys_range: storage::Range<GlyphKey>, pub reference_frame_relative_offset: LayoutVector2D, pub snapped_reference_frame_relative_offset: LayoutVector2D, pub shadow: bool, pub raster_scale: f32, pub requested_raster_space: RasterSpace, } impl TextRunPrimitive { pub fn update_font_instance( &mut self, specified_font: &FontInstance, surface: &SurfaceInfo, spatial_node_index: SpatialNodeIndex, transform: &LayoutToWorldTransform, mut allow_subpixel: bool, raster_space: RasterSpace, spatial_tree: &SpatialTree, ) -> bool { // If local raster space is specified, include that in the scale // of the glyphs that get rasterized. // TODO(gw): Once we support proper local space raster modes, this // will implicitly be part of the device pixel ratio for // the (cached) local space surface, and so this code // will no longer be required. let raster_scale = raster_space.local_scale().unwrap_or(1.0).max(0.001); let dps = surface.device_pixel_scale.0; let font_size = specified_font.size.to_f32_px(); // Small floating point error can accumulate in the raster * device_pixel scale. // Round that to the nearest 100th of a scale factor to remove this error while // still allowing reasonably accurate scale factors when a pinch-zoom is stopped // at a fractional amount. let quantized_scale = (dps * raster_scale * 100.0).round() / 100.0; let mut device_font_size = font_size * quantized_scale; // Check there is a valid transform that doesn't exceed the font size limit. // Ensure the font is supposed to be rasterized in screen-space. // Only support transforms that can be coerced to simple 2D transforms. // Add texture padding to the rasterized glyph buffer when one anticipates // the glyph will need to be scaled when rendered. let (use_subpixel_aa, transform_glyphs, texture_padding, oversized) = if raster_space != RasterSpace::Screen || transform.has_perspective_component() || !transform.has_2d_inverse() { (false, false, true, device_font_size > FONT_SIZE_LIMIT) } else if transform.exceeds_2d_scale((FONT_SIZE_LIMIT / device_font_size) as f64) { (false, false, true, true) } else { (true, !transform.is_simple_2d_translation(), false, false) }; let font_transform = if transform_glyphs { // Get the font transform matrix (skew / scale) from the complete transform. // Fold in the device pixel scale. self.raster_scale = 1.0; FontTransform::from(transform) } else { if oversized { // Font sizes larger than the limit need to be scaled, thus can't use subpixels. // In this case we adjust the font size and raster space to ensure // we rasterize at the limit, to minimize the amount of scaling. let limited_raster_scale = FONT_SIZE_LIMIT / (font_size * dps); device_font_size = FONT_SIZE_LIMIT; // Record the raster space the text needs to be snapped in. The original raster // scale would have been too big. self.raster_scale = limited_raster_scale; } else { // Record the raster space the text needs to be snapped in. We may have changed // from RasterSpace::Screen due to a transform with perspective or without a 2d // inverse, or it may have been RasterSpace::Local all along. self.raster_scale = raster_scale; } // Rasterize the glyph without any transform FontTransform::identity() }; // TODO(aosmond): Snapping really ought to happen during scene building // as much as possible. This will allow clips to be already adjusted // based on the snapping requirements of the primitive. This may affect // complex clips that create a different task, and when we rasterize // glyphs without the transform (because the shader doesn't have the // snap offsets to adjust its clip). These rects are fairly conservative // to begin with and do not appear to be causing significant issues at // this time. self.snapped_reference_frame_relative_offset = if transform_glyphs { // Don't touch the reference frame relative offset. We'll let the // shader do the snapping in device pixels. self.reference_frame_relative_offset } else { // TODO(dp): The SurfaceInfo struct needs to be updated to use RasterPixelScale // rather than DevicePixelScale, however this is a large chunk of // work that will be done as a follow up patch. let raster_pixel_scale = RasterPixelScale::new(surface.device_pixel_scale.0); // There may be an animation, so snap the reference frame relative // offset such that it excludes the impact, if any. let snap_to_device = SpaceSnapper::new_with_target( surface.raster_spatial_node_index, spatial_node_index, raster_pixel_scale, spatial_tree, ); snap_to_device.snap_point(&self.reference_frame_relative_offset.to_point()).to_vector() }; let mut flags = specified_font.flags; if transform_glyphs { flags |= FontInstanceFlags::TRANSFORM_GLYPHS; } if texture_padding { flags |= FontInstanceFlags::TEXTURE_PADDING; } // If the transform or device size is different, then the caller of // this method needs to know to rebuild the glyphs. let cache_dirty = self.used_font.transform != font_transform || self.used_font.size != device_font_size.into() || self.used_font.flags != flags; // Construct used font instance from the specified font instance self.used_font = FontInstance { transform: font_transform, size: device_font_size.into(), flags, ..specified_font.clone() }; // If we are using special estimated background subpixel blending, then // we can allow it regardless of what the surface says. allow_subpixel |= self.used_font.bg_color.a != 0; // If using local space glyphs, we don't want subpixel AA. if !allow_subpixel || !use_subpixel_aa { self.used_font.disable_subpixel_aa(); // Disable subpixel positioning for oversized glyphs to avoid // thrashing the glyph cache with many subpixel variations of // big glyph textures. A possible subpixel positioning error // is small relative to the maximum font size and thus should // not be very noticeable. if oversized { self.used_font.disable_subpixel_position(); } } cache_dirty } /// Gets the raster space to use when rendering this primitive. /// Usually this would be the requested raster space. However, if /// the primitive's spatial node or one of its ancestors is being pinch zoomed /// then we round it. This prevents us rasterizing glyphs for every minor /// change in zoom level, as that would be too expensive. fn get_raster_space_for_prim( &self, prim_spatial_node_index: SpatialNodeIndex, low_quality_pinch_zoom: bool, device_pixel_scale: DevicePixelScale, spatial_tree: &SpatialTree, ) -> RasterSpace
{ let prim_spatial_node = spatial_tree.get_spatial_node(prim_spatial_node_index); if prim_spatial_node.is_ancestor_or_self_zooming { if low_quality_pinch_zoom { // In low-quality mode, we set the scale to be 1.0. However, the device-pixel // scale selected for the zoom will be taken into account in the caller to this // function when it's converted from local -> device pixels. Since in this mode // the device-pixel scale is constant during the zoom, this gives the desired // performance while also allowing the scale to be adjusted to a new factor at // the end of a pinch-zoom. RasterSpace::Local(1.0) } else { let root_spatial_node_index = spatial_tree.root_reference_frame_index(); // For high-quality mode, we quantize the exact scale factor as before. However, // we want to _undo_ the effect of the device-pixel scale on the picture cache // tiles (which changes now that they are raster roots). Divide the rounded value // by the device-pixel scale so that the local -> device conversion has no effect. let scale_factors = spatial_tree .get_relative_transform(prim_spatial_node_index, root_spatial_node_index)
identifier_body
text_run.rs
(&self) -> &Self::Target { &self.common } } impl ops::DerefMut for TextRunTemplate { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.common } } impl From<TextRunKey> for TextRunTemplate { fn from(item: TextRunKey) -> Self { let common = PrimTemplateCommonData::with_key_common(item.common); TextRunTemplate { common, font: item.font, glyphs: item.glyphs.0, } } } impl TextRunTemplate { /// Update the GPU cache for a given primitive template. This may be called multiple /// times per frame, by each primitive reference that refers to this interned /// template. The initial request call to the GPU cache ensures that work is only /// done if the cache entry is invalid (due to first use or eviction). pub fn update( &mut self, frame_state: &mut FrameBuildingState, ) { self.write_prim_gpu_blocks(frame_state); self.opacity = PrimitiveOpacity::translucent(); } fn write_prim_gpu_blocks( &mut self, frame_state: &mut FrameBuildingState, ) { // corresponds to `fetch_glyph` in the shaders if let Some(mut request) = frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) { request.push(ColorF::from(self.font.color).premultiplied()); // this is the only case where we need to provide plain color to GPU let bg_color = ColorF::from(self.font.bg_color); request.push([bg_color.r, bg_color.g, bg_color.b, 1.0]); let mut gpu_block = [0.0; 4]; for (i, src) in self.glyphs.iter().enumerate() { // Two glyphs are packed per GPU block. if (i & 1) == 0 { gpu_block[0] = src.point.x; gpu_block[1] = src.point.y; } else { gpu_block[2] = src.point.x; gpu_block[3] = src.point.y; request.push(gpu_block); } } // Ensure the last block is added in the case // of an odd number of glyphs. if (self.glyphs.len() & 1) != 0 { request.push(gpu_block); } assert!(request.current_used_block_num() <= MAX_VERTEX_TEXTURE_WIDTH); } } } pub type TextRunDataHandle = intern::Handle<TextRun>; #[derive(Debug, MallocSizeOf)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct TextRun { pub font: FontInstance, #[ignore_malloc_size_of = "Measured via PrimaryArc"] pub glyphs: Arc<Vec<GlyphInstance>>, pub shadow: bool, pub requested_raster_space: RasterSpace, } impl intern::Internable for TextRun { type Key = TextRunKey; type StoreData = TextRunTemplate; type InternData = (); const PROFILE_COUNTER: usize = crate::profiler::INTERNED_TEXT_RUNS; } impl InternablePrimitive for TextRun { fn into_key( self, info: &LayoutPrimitiveInfo, ) -> TextRunKey { TextRunKey::new( info, self, ) } fn make_instance_kind( key: TextRunKey, data_handle: TextRunDataHandle, prim_store: &mut PrimitiveStore, reference_frame_relative_offset: LayoutVector2D, ) -> PrimitiveInstanceKind { let run_index = prim_store.text_runs.push(TextRunPrimitive { used_font: key.font.clone(), glyph_keys_range: storage::Range::empty(), reference_frame_relative_offset, snapped_reference_frame_relative_offset: reference_frame_relative_offset, shadow: key.shadow, raster_scale: 1.0, requested_raster_space: key.requested_raster_space, }); PrimitiveInstanceKind::TextRun{ data_handle, run_index } } } impl CreateShadow for TextRun { fn create_shadow( &self, shadow: &Shadow, blur_is_noop: bool, current_raster_space: RasterSpace, ) -> Self { let mut font = FontInstance { color: shadow.color.into(), ..self.font.clone() }; if shadow.blur_radius > 0.0 { font.disable_subpixel_aa(); } let requested_raster_space = if blur_is_noop { current_raster_space } else { RasterSpace::Local(1.0) }; TextRun { font, glyphs: self.glyphs.clone(), shadow: true, requested_raster_space, } } } impl IsVisible for TextRun { fn is_visible(&self) -> bool { self.font.color.a > 0 } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] pub struct TextRunPrimitive { pub used_font: FontInstance, pub glyph_keys_range: storage::Range<GlyphKey>, pub reference_frame_relative_offset: LayoutVector2D, pub snapped_reference_frame_relative_offset: LayoutVector2D, pub shadow: bool, pub raster_scale: f32, pub requested_raster_space: RasterSpace, } impl TextRunPrimitive { pub fn update_font_instance( &mut self, specified_font: &FontInstance, surface: &SurfaceInfo, spatial_node_index: SpatialNodeIndex, transform: &LayoutToWorldTransform, mut allow_subpixel: bool, raster_space: RasterSpace, spatial_tree: &SpatialTree, ) -> bool { // If local raster space is specified, include that in the scale // of the glyphs that get rasterized. // TODO(gw): Once we support proper local space raster modes, this // will implicitly be part of the device pixel ratio for // the (cached) local space surface, and so this code // will no longer be required. let raster_scale = raster_space.local_scale().unwrap_or(1.0).max(0.001); let dps = surface.device_pixel_scale.0; let font_size = specified_font.size.to_f32_px(); // Small floating point error can accumulate in the raster * device_pixel scale. // Round that to the nearest 100th of a scale factor to remove this error while // still allowing reasonably accurate scale factors when a pinch-zoom is stopped // at a fractional amount. let quantized_scale = (dps * raster_scale * 100.0).round() / 100.0; let mut device_font_size = font_size * quantized_scale; // Check there is a valid transform that doesn't exceed the font size limit. // Ensure the font is supposed to be rasterized in screen-space. // Only support transforms that can be coerced to simple 2D transforms. // Add texture padding to the rasterized glyph buffer when one anticipates // the glyph will need to be scaled when rendered. let (use_subpixel_aa, transform_glyphs, texture_padding, oversized) = if raster_space != RasterSpace::Screen || transform.has_perspective_component() || !transform.has_2d_inverse() { (false, false, true, device_font_size > FONT_SIZE_LIMIT) } else if transform.exceeds_2d_scale((FONT_SIZE_LIMIT / device_font_size) as f64) { (false, false, true, true) } else { (true, !transform.is_simple_2d_translation(), false, false) }; let font_transform = if transform_glyphs { // Get the font transform matrix (skew / scale) from the complete transform. // Fold in the device pixel scale. self.raster_scale = 1.0; FontTransform::from(transform) } else { if oversized { // Font sizes larger than the limit need to be scaled, thus can't use subpixels. // In this case we adjust the font size and raster space to ensure // we rasterize at the limit, to minimize the amount of scaling. let limited_raster_scale = FONT_SIZE_LIMIT / (font_size * dps); device_font_size = FONT_SIZE_LIMIT; // Record the raster space the text needs to be snapped in. The original raster // scale would have been too big. self.raster_scale = limited_raster_scale; } else { // Record the raster space the text needs to be snapped in. We may have changed // from RasterSpace::Screen due to a transform with perspective or without a 2d // inverse, or it may have been RasterSpace::Local all along. self.raster_scale = raster_scale; } // Rasterize the glyph without any transform FontTransform::identity() }; // TODO(aosmond): Snapping really ought to happen during scene building // as much as possible. This will allow clips to be already adjusted // based on the snapping requirements of the primitive. This may affect // complex clips that create a different task, and when we rasterize // glyphs without the transform (because the shader doesn't have the // snap offsets to adjust its clip). These rects are fairly conservative
deref
identifier_name
GridTooltips.js
/** * @author Mike Hill * @version 1.0.0 (2014/09/30) * * This plugin attaches tooltips to grid cells. */ Ext.define('Ext.ux.GridTooltips.grid.plugin.GridTooltips', { extend: 'Ext.plugin.Abstract', alias: 'plugin.gridtooltips', /* * Internal */ tooltipEl: null, /* * Configurable */ // Tooltip configuration delegate: '.x-grid-cell-inner', showDelay: 100, dismissDelay: 0, // Disable automatic hiding anchor: 'top', trackMouse: false, renderTo: Ext.getBody(), // Plugin configuration /** * If set to true, show tooltips only when contents are overflowing. */ overflowOnly: true, /** * Initializes the tooltips plugin. */ init: function (grid) { grid.mon(grid, 'afterrender', this.createTooltip, this, { single: true }); }, /** * Creates the configured tooltip which will be used for the grid. */ createTooltip: function (grid) { var me; var tooltip; me = this; tooltip = Ext.create('Ext.tip.ToolTip', { target: grid.view.getEl(), delegate: me.delegate, showDelay: me.showDelay, dismissDelay: me.dismissDelay, anchor: me.anchor, trackMouse: me.trackMouse, renderTo: me.renderTo }); // Attach listener to manipulate tooltip contents tooltip.mon(tooltip, 'beforeshow', me.showTooltip, me); // Store internally me.tooltipEl = tooltip; }, /** * Evaluates the tooltip properties before it is shown. This function * determines whether the tooltip should be shown. If the tooltip is to be * shown, then this function also sets the contents of the tooltip. */ showTooltip: function (tooltip, listeners) { var me; var showTooltip; var target, clientWidth, scrollWidth; me = this; target = tooltip.anchorTarget; showTooltip = true; if (me.overflowOnly === true)
showTooltip = (scrollWidth > clientWidth); } if (showTooltip === true) { // Set tooltip contents to the target's text tooltip.update(target.innerText); } return showTooltip; }, /** * Deconstructs objects created by this plugin. */ destroy: function () { var me; me = this; // Delete/dereference tooltip me.tooltipEl.destroy(); me.tooltipEl = null; me.callParent(arguments); } });
{ // Show tooltip only if the target's contents are overflowing /* * TODO: (Tested in Chrome 37) When clientWidth is equal to the * minimum scrollWidth, CSS text-overflow: ellipsis will still * display an ellipsis. * * For example, consider the scenario where clientWidth = 50 and * scrollWidth = 100. In this case, there is clearly overflow and * this method will work. However, if the visible width is then * expanded so that clientWidth == scrollWidth == 100, then an * ellipsis will be shown, but this method will not display a * tooltip since clientWidth is not less than scrollWidth. If * clientWidth and scrollWidth are brought above 100 (scrollWidth's * minimum value) then all functionality will again be as expected. * * Try to find a workaround for this one failure-case. */ clientWidth = target.clientWidth; scrollWidth = target.scrollWidth;
conditional_block
GridTooltips.js
/** * @author Mike Hill * @version 1.0.0 (2014/09/30) * * This plugin attaches tooltips to grid cells. */ Ext.define('Ext.ux.GridTooltips.grid.plugin.GridTooltips', { extend: 'Ext.plugin.Abstract', alias: 'plugin.gridtooltips', /* * Internal */ tooltipEl: null, /* * Configurable */ // Tooltip configuration delegate: '.x-grid-cell-inner', showDelay: 100, dismissDelay: 0, // Disable automatic hiding anchor: 'top', trackMouse: false, renderTo: Ext.getBody(), // Plugin configuration /** * If set to true, show tooltips only when contents are overflowing. */ overflowOnly: true, /**
}); }, /** * Creates the configured tooltip which will be used for the grid. */ createTooltip: function (grid) { var me; var tooltip; me = this; tooltip = Ext.create('Ext.tip.ToolTip', { target: grid.view.getEl(), delegate: me.delegate, showDelay: me.showDelay, dismissDelay: me.dismissDelay, anchor: me.anchor, trackMouse: me.trackMouse, renderTo: me.renderTo }); // Attach listener to manipulate tooltip contents tooltip.mon(tooltip, 'beforeshow', me.showTooltip, me); // Store internally me.tooltipEl = tooltip; }, /** * Evaluates the tooltip properties before it is shown. This function * determines whether the tooltip should be shown. If the tooltip is to be * shown, then this function also sets the contents of the tooltip. */ showTooltip: function (tooltip, listeners) { var me; var showTooltip; var target, clientWidth, scrollWidth; me = this; target = tooltip.anchorTarget; showTooltip = true; if (me.overflowOnly === true) { // Show tooltip only if the target's contents are overflowing /* * TODO: (Tested in Chrome 37) When clientWidth is equal to the * minimum scrollWidth, CSS text-overflow: ellipsis will still * display an ellipsis. * * For example, consider the scenario where clientWidth = 50 and * scrollWidth = 100. In this case, there is clearly overflow and * this method will work. However, if the visible width is then * expanded so that clientWidth == scrollWidth == 100, then an * ellipsis will be shown, but this method will not display a * tooltip since clientWidth is not less than scrollWidth. If * clientWidth and scrollWidth are brought above 100 (scrollWidth's * minimum value) then all functionality will again be as expected. * * Try to find a workaround for this one failure-case. */ clientWidth = target.clientWidth; scrollWidth = target.scrollWidth; showTooltip = (scrollWidth > clientWidth); } if (showTooltip === true) { // Set tooltip contents to the target's text tooltip.update(target.innerText); } return showTooltip; }, /** * Deconstructs objects created by this plugin. */ destroy: function () { var me; me = this; // Delete/dereference tooltip me.tooltipEl.destroy(); me.tooltipEl = null; me.callParent(arguments); } });
* Initializes the tooltips plugin. */ init: function (grid) { grid.mon(grid, 'afterrender', this.createTooltip, this, { single: true
random_line_split
test_triangularbarkbands.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * import numpy as np class TestTriangularBarkBands(TestCase): def InitTriangularBarkBands(self, nbands): return TriangularBarkBands(inputSize=1024, numberBands=nbands, lowFrequencyBound=0, highFrequencyBound=44100*.5) def testRegression(self): spectrum = [1]*1024 mbands = self.InitTriangularBarkBands(24)(spectrum) self.assertEqual(len(mbands), 24 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*24, 1e-5) mbands = self.InitTriangularBarkBands(128)(spectrum) self.assertEqual(len(mbands), 128 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*128, 1e-5) def testRegressionRastaMode(self): # Test the BFCC extractor compared to Rastamat specifications audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/vignesh.wav'), sampleRate = 44100)()*2**15 #Expected values generated in Rastamat/MATLAB expected = [ 20.28919141, 23.80362425, 26.69797305, 27.10461133, 26.64508125, 26.7758322, 27.1787682, 27.10699792, 26.29040982, 25.04243486, 24.24791966, 24.17377063, 24.61976518, 25.29554584, 24.87617598, 23.79018513, 23.04026225, 23.20707811, 23.09716777, 23.33050168, 22.8201923, 21.49477903, 21.63639095, 22.12937291, 22.01981441, 21.70728156] frameSize = 1102 hopSize = 441 fftsize = 2048 paddingSize = fftsize - frameSize spectrumSize = int(fftsize/2) + 1 w = Windowing(type = 'hann', size = frameSize, zeroPadding = paddingSize, normalized = False, zeroPhase = False) spectrum = Spectrum(size = fftsize) mbands = TriangularBarkBands(inputSize= spectrumSize, type = 'power', highFrequencyBound = 8000, lowFrequencyBound = 0, numberBands = 26, weighting = 'linear', normalize = 'unit_max') pool = Pool() for frame in FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize, startFromZero = True, validFrameThresholdRatio = 1): pool.add('TriangularBarkBands', mbands(spectrum(w(frame)))) np.savetxt("out.csv", np.mean(np.log(pool['TriangularBarkBands']),0), delimiter=',') self.assertAlmostEqualVector( np.mean(np.log(pool['TriangularBarkBands']),0), expected,1e-2) def testZero(self): # Inputting zeros should return zero. Try with different sizes size = 1024 while (size >= 256 ): self.assertEqualVector(TriangularBarkBands()(zeros(size)), zeros(24)) size /= 2 def testInvalidInput(self): # mel bands should fail for a spectrum with less than 2 bins self.assertComputeFails(TriangularBarkBands(), []) self.assertComputeFails(TriangularBarkBands(), [0.5]) def testInvalidParam(self): self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 0 }) self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 1 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': -100 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': 100, 'highFrequencyBound': 50 }) self.assertConfigureFails(TriangularBarkBands(), { 'highFrequencyBound': 30000, 'sampleRate': 22050}) def testWrongInputSize(self): # This test makes sure that even though the inputSize given at # configure time does not match the input spectrum, the algorithm does # not crash and correctly resizes internal structures to avoid errors.
""" def testNotEnoughSpectrumBins(self): self.assertConfigureFails(TriangularBarkBands(), {'numberBands': 256, 'inputSize': 1025}) """ suite = allTests(TestTriangularBarkBands) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
spec = [.1,.4,.5,.2,.1,.01,.04]*100 np.savetxt("out.csv", TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), delimiter=',') self.assertAlmostEqualVector( TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), [0.0460643246769905]*24, 1e-6)
identifier_body
test_triangularbarkbands.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * import numpy as np class TestTriangularBarkBands(TestCase): def InitTriangularBarkBands(self, nbands): return TriangularBarkBands(inputSize=1024, numberBands=nbands, lowFrequencyBound=0, highFrequencyBound=44100*.5) def testRegression(self): spectrum = [1]*1024 mbands = self.InitTriangularBarkBands(24)(spectrum) self.assertEqual(len(mbands), 24 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*24, 1e-5) mbands = self.InitTriangularBarkBands(128)(spectrum) self.assertEqual(len(mbands), 128 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*128, 1e-5) def testRegressionRastaMode(self): # Test the BFCC extractor compared to Rastamat specifications audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/vignesh.wav'), sampleRate = 44100)()*2**15 #Expected values generated in Rastamat/MATLAB expected = [ 20.28919141, 23.80362425, 26.69797305, 27.10461133, 26.64508125, 26.7758322, 27.1787682, 27.10699792, 26.29040982, 25.04243486, 24.24791966, 24.17377063, 24.61976518, 25.29554584, 24.87617598, 23.79018513, 23.04026225, 23.20707811, 23.09716777, 23.33050168, 22.8201923, 21.49477903, 21.63639095, 22.12937291, 22.01981441, 21.70728156] frameSize = 1102 hopSize = 441 fftsize = 2048 paddingSize = fftsize - frameSize spectrumSize = int(fftsize/2) + 1 w = Windowing(type = 'hann', size = frameSize, zeroPadding = paddingSize, normalized = False, zeroPhase = False) spectrum = Spectrum(size = fftsize) mbands = TriangularBarkBands(inputSize= spectrumSize, type = 'power', highFrequencyBound = 8000, lowFrequencyBound = 0, numberBands = 26, weighting = 'linear', normalize = 'unit_max') pool = Pool() for frame in FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize, startFromZero = True, validFrameThresholdRatio = 1): pool.add('TriangularBarkBands', mbands(spectrum(w(frame)))) np.savetxt("out.csv", np.mean(np.log(pool['TriangularBarkBands']),0), delimiter=',') self.assertAlmostEqualVector( np.mean(np.log(pool['TriangularBarkBands']),0), expected,1e-2) def testZero(self): # Inputting zeros should return zero. Try with different sizes size = 1024 while (size >= 256 ): self.assertEqualVector(TriangularBarkBands()(zeros(size)), zeros(24)) size /= 2 def testInvalidInput(self): # mel bands should fail for a spectrum with less than 2 bins self.assertComputeFails(TriangularBarkBands(), []) self.assertComputeFails(TriangularBarkBands(), [0.5]) def testInvalidParam(self): self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 0 }) self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 1 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': -100 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': 100, 'highFrequencyBound': 50 }) self.assertConfigureFails(TriangularBarkBands(), { 'highFrequencyBound': 30000, 'sampleRate': 22050}) def testWrongInputSize(self): # This test makes sure that even though the inputSize given at # configure time does not match the input spectrum, the algorithm does # not crash and correctly resizes internal structures to avoid errors. spec = [.1,.4,.5,.2,.1,.01,.04]*100 np.savetxt("out.csv", TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), delimiter=',') self.assertAlmostEqualVector( TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), [0.0460643246769905]*24, 1e-6) """ def testNotEnoughSpectrumBins(self): self.assertConfigureFails(TriangularBarkBands(), {'numberBands': 256, 'inputSize': 1025}) """ suite = allTests(TestTriangularBarkBands) if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
conditional_block
test_triangularbarkbands.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * import numpy as np class TestTriangularBarkBands(TestCase): def InitTriangularBarkBands(self, nbands): return TriangularBarkBands(inputSize=1024, numberBands=nbands, lowFrequencyBound=0, highFrequencyBound=44100*.5) def testRegression(self): spectrum = [1]*1024 mbands = self.InitTriangularBarkBands(24)(spectrum) self.assertEqual(len(mbands), 24 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*24, 1e-5) mbands = self.InitTriangularBarkBands(128)(spectrum) self.assertEqual(len(mbands), 128 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*128, 1e-5) def testRegressionRastaMode(self): # Test the BFCC extractor compared to Rastamat specifications audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/vignesh.wav'), sampleRate = 44100)()*2**15 #Expected values generated in Rastamat/MATLAB expected = [ 20.28919141, 23.80362425, 26.69797305, 27.10461133, 26.64508125, 26.7758322, 27.1787682, 27.10699792, 26.29040982, 25.04243486, 24.24791966, 24.17377063, 24.61976518, 25.29554584, 24.87617598, 23.79018513, 23.04026225, 23.20707811, 23.09716777, 23.33050168, 22.8201923, 21.49477903, 21.63639095, 22.12937291, 22.01981441, 21.70728156] frameSize = 1102 hopSize = 441 fftsize = 2048 paddingSize = fftsize - frameSize spectrumSize = int(fftsize/2) + 1 w = Windowing(type = 'hann', size = frameSize, zeroPadding = paddingSize, normalized = False, zeroPhase = False) spectrum = Spectrum(size = fftsize) mbands = TriangularBarkBands(inputSize= spectrumSize, type = 'power', highFrequencyBound = 8000, lowFrequencyBound = 0, numberBands = 26, weighting = 'linear', normalize = 'unit_max') pool = Pool() for frame in FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize, startFromZero = True, validFrameThresholdRatio = 1): pool.add('TriangularBarkBands', mbands(spectrum(w(frame)))) np.savetxt("out.csv", np.mean(np.log(pool['TriangularBarkBands']),0), delimiter=',') self.assertAlmostEqualVector( np.mean(np.log(pool['TriangularBarkBands']),0), expected,1e-2) def testZero(self): # Inputting zeros should return zero. Try with different sizes size = 1024 while (size >= 256 ): self.assertEqualVector(TriangularBarkBands()(zeros(size)), zeros(24)) size /= 2 def testInvalidInput(self): # mel bands should fail for a spectrum with less than 2 bins self.assertComputeFails(TriangularBarkBands(), []) self.assertComputeFails(TriangularBarkBands(), [0.5]) def testInvalidParam(self): self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 0 }) self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 1 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': -100 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': 100, 'highFrequencyBound': 50 }) self.assertConfigureFails(TriangularBarkBands(), { 'highFrequencyBound': 30000, 'sampleRate': 22050}) def
(self): # This test makes sure that even though the inputSize given at # configure time does not match the input spectrum, the algorithm does # not crash and correctly resizes internal structures to avoid errors. spec = [.1,.4,.5,.2,.1,.01,.04]*100 np.savetxt("out.csv", TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), delimiter=',') self.assertAlmostEqualVector( TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), [0.0460643246769905]*24, 1e-6) """ def testNotEnoughSpectrumBins(self): self.assertConfigureFails(TriangularBarkBands(), {'numberBands': 256, 'inputSize': 1025}) """ suite = allTests(TestTriangularBarkBands) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
testWrongInputSize
identifier_name
test_triangularbarkbands.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * import numpy as np class TestTriangularBarkBands(TestCase): def InitTriangularBarkBands(self, nbands): return TriangularBarkBands(inputSize=1024, numberBands=nbands, lowFrequencyBound=0, highFrequencyBound=44100*.5) def testRegression(self): spectrum = [1]*1024 mbands = self.InitTriangularBarkBands(24)(spectrum) self.assertEqual(len(mbands), 24 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*24, 1e-5) mbands = self.InitTriangularBarkBands(128)(spectrum) self.assertEqual(len(mbands), 128 ) self.assert_(not any(numpy.isnan(mbands))) self.assert_(not any(numpy.isinf(mbands))) self.assertAlmostEqualVector(mbands, [1]*128, 1e-5) def testRegressionRastaMode(self): # Test the BFCC extractor compared to Rastamat specifications audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/vignesh.wav'), sampleRate = 44100)()*2**15 #Expected values generated in Rastamat/MATLAB expected = [ 20.28919141, 23.80362425, 26.69797305, 27.10461133, 26.64508125, 26.7758322, 27.1787682, 27.10699792, 26.29040982, 25.04243486, 24.24791966, 24.17377063, 24.61976518, 25.29554584, 24.87617598, 23.79018513, 23.04026225, 23.20707811, 23.09716777, 23.33050168, 22.8201923, 21.49477903, 21.63639095, 22.12937291, 22.01981441, 21.70728156] frameSize = 1102 hopSize = 441 fftsize = 2048 paddingSize = fftsize - frameSize spectrumSize = int(fftsize/2) + 1 w = Windowing(type = 'hann', size = frameSize, zeroPadding = paddingSize, normalized = False, zeroPhase = False) spectrum = Spectrum(size = fftsize) mbands = TriangularBarkBands(inputSize= spectrumSize, type = 'power', highFrequencyBound = 8000, lowFrequencyBound = 0, numberBands = 26, weighting = 'linear', normalize = 'unit_max') pool = Pool() for frame in FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize, startFromZero = True, validFrameThresholdRatio = 1): pool.add('TriangularBarkBands', mbands(spectrum(w(frame))))
self.assertAlmostEqualVector( np.mean(np.log(pool['TriangularBarkBands']),0), expected,1e-2) def testZero(self): # Inputting zeros should return zero. Try with different sizes size = 1024 while (size >= 256 ): self.assertEqualVector(TriangularBarkBands()(zeros(size)), zeros(24)) size /= 2 def testInvalidInput(self): # mel bands should fail for a spectrum with less than 2 bins self.assertComputeFails(TriangularBarkBands(), []) self.assertComputeFails(TriangularBarkBands(), [0.5]) def testInvalidParam(self): self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 0 }) self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 1 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': -100 }) self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': 100, 'highFrequencyBound': 50 }) self.assertConfigureFails(TriangularBarkBands(), { 'highFrequencyBound': 30000, 'sampleRate': 22050}) def testWrongInputSize(self): # This test makes sure that even though the inputSize given at # configure time does not match the input spectrum, the algorithm does # not crash and correctly resizes internal structures to avoid errors. spec = [.1,.4,.5,.2,.1,.01,.04]*100 np.savetxt("out.csv", TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), delimiter=',') self.assertAlmostEqualVector( TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), [0.0460643246769905]*24, 1e-6) """ def testNotEnoughSpectrumBins(self): self.assertConfigureFails(TriangularBarkBands(), {'numberBands': 256, 'inputSize': 1025}) """ suite = allTests(TestTriangularBarkBands) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
np.savetxt("out.csv", np.mean(np.log(pool['TriangularBarkBands']),0), delimiter=',')
random_line_split
keyboard.rs
use serde::Deserialize; use smithay::wayland::seat::Keysym; pub use smithay::{ backend::input::KeyState, wayland::seat::{keysyms as KeySyms, ModifiersState as KeyModifiers}, }; use xkbcommon::xkb; #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] pub enum KeyModifier { Ctrl, Alt, Shift, Logo, CapsLock, NumLock, } impl std::ops::AddAssign<KeyModifier> for KeyModifiers { fn add_assign(&mut self, rhs: KeyModifier) { match rhs { KeyModifier::Ctrl => self.ctrl = true, KeyModifier::Alt => self.alt = true, KeyModifier::Shift => self.shift = true, KeyModifier::Logo => self.logo = true, KeyModifier::CapsLock => self.caps_lock = true, KeyModifier::NumLock => self.num_lock = true, }; } } impl std::ops::BitOr for KeyModifier { type Output = KeyModifiers; fn bitor(self, rhs: KeyModifier) -> Self::Output { let mut modifiers = self.into(); modifiers += rhs; modifiers } } impl Into<KeyModifiers> for KeyModifier { fn into(self) -> KeyModifiers { let mut modifiers = KeyModifiers { ctrl: false, alt: false, shift: false, caps_lock: false, logo: false, num_lock: false, }; modifiers += self; modifiers } } #[derive(Deserialize)] #[serde(transparent)] struct
(Vec<KeyModifier>); impl From<KeyModifiersDef> for KeyModifiers { fn from(src: KeyModifiersDef) -> Self { src.0.into_iter().fold( KeyModifiers { ctrl: false, alt: false, shift: false, caps_lock: false, logo: false, num_lock: false, }, |mut modis, modi| { modis += modi; modis }, ) } } #[allow(non_snake_case)] fn deserialize_KeyModifiers<'de, D>(deserializer: D) -> Result<KeyModifiers, D::Error> where D: serde::Deserializer<'de>, { KeyModifiersDef::deserialize(deserializer).map(Into::into) } #[allow(non_snake_case)] fn deserialize_Keysym<'de, D>(deserializer: D) -> Result<Keysym, D::Error> where D: serde::Deserializer<'de>, { use serde::de::{Error, Unexpected}; let name = String::deserialize(deserializer)?; //let name = format!("KEY_{}", code); match xkb::keysym_from_name(&name, xkb::KEYSYM_NO_FLAGS) { KeySyms::KEY_NoSymbol => match xkb::keysym_from_name(&name, xkb::KEYSYM_CASE_INSENSITIVE) { KeySyms::KEY_NoSymbol => Err(<D::Error as Error>::invalid_value( Unexpected::Str(&name), &"One of the keysym names of xkbcommon.h without the 'KEY_' prefix", )), x => { slog_scope::warn!( "Key-Binding '{}' only matched case insensitive for {:?}", name, xkb::keysym_get_name(x) ); Ok(x) } }, x => Ok(x), } } /// Describtion of a key combination that might be /// handled by the compositor. #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] #[serde(deny_unknown_fields)] pub struct KeyPattern { /// What modifiers are expected to be pressed alongside the key #[serde(deserialize_with = "deserialize_KeyModifiers")] pub modifiers: KeyModifiers, /// The actual key, that was pressed #[serde(deserialize_with = "deserialize_Keysym")] pub key: u32, } impl KeyPattern { pub fn new(modifiers: impl Into<KeyModifiers>, key: u32) -> KeyPattern { KeyPattern { modifiers: modifiers.into(), key, } } }
KeyModifiersDef
identifier_name
keyboard.rs
use serde::Deserialize; use smithay::wayland::seat::Keysym; pub use smithay::{ backend::input::KeyState, wayland::seat::{keysyms as KeySyms, ModifiersState as KeyModifiers}, }; use xkbcommon::xkb; #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] pub enum KeyModifier { Ctrl, Alt, Shift, Logo, CapsLock, NumLock, } impl std::ops::AddAssign<KeyModifier> for KeyModifiers { fn add_assign(&mut self, rhs: KeyModifier) { match rhs { KeyModifier::Ctrl => self.ctrl = true, KeyModifier::Alt => self.alt = true, KeyModifier::Shift => self.shift = true, KeyModifier::Logo => self.logo = true, KeyModifier::CapsLock => self.caps_lock = true, KeyModifier::NumLock => self.num_lock = true, }; } } impl std::ops::BitOr for KeyModifier { type Output = KeyModifiers; fn bitor(self, rhs: KeyModifier) -> Self::Output { let mut modifiers = self.into(); modifiers += rhs; modifiers } } impl Into<KeyModifiers> for KeyModifier { fn into(self) -> KeyModifiers { let mut modifiers = KeyModifiers { ctrl: false, alt: false, shift: false, caps_lock: false, logo: false, num_lock: false, }; modifiers += self; modifiers } } #[derive(Deserialize)] #[serde(transparent)] struct KeyModifiersDef(Vec<KeyModifier>); impl From<KeyModifiersDef> for KeyModifiers { fn from(src: KeyModifiersDef) -> Self { src.0.into_iter().fold( KeyModifiers { ctrl: false, alt: false, shift: false, caps_lock: false, logo: false, num_lock: false, }, |mut modis, modi| { modis += modi; modis }, ) } } #[allow(non_snake_case)] fn deserialize_KeyModifiers<'de, D>(deserializer: D) -> Result<KeyModifiers, D::Error> where D: serde::Deserializer<'de>, { KeyModifiersDef::deserialize(deserializer).map(Into::into) } #[allow(non_snake_case)] fn deserialize_Keysym<'de, D>(deserializer: D) -> Result<Keysym, D::Error> where D: serde::Deserializer<'de>, {
match xkb::keysym_from_name(&name, xkb::KEYSYM_NO_FLAGS) { KeySyms::KEY_NoSymbol => match xkb::keysym_from_name(&name, xkb::KEYSYM_CASE_INSENSITIVE) { KeySyms::KEY_NoSymbol => Err(<D::Error as Error>::invalid_value( Unexpected::Str(&name), &"One of the keysym names of xkbcommon.h without the 'KEY_' prefix", )), x => { slog_scope::warn!( "Key-Binding '{}' only matched case insensitive for {:?}", name, xkb::keysym_get_name(x) ); Ok(x) } }, x => Ok(x), } } /// Describtion of a key combination that might be /// handled by the compositor. #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] #[serde(deny_unknown_fields)] pub struct KeyPattern { /// What modifiers are expected to be pressed alongside the key #[serde(deserialize_with = "deserialize_KeyModifiers")] pub modifiers: KeyModifiers, /// The actual key, that was pressed #[serde(deserialize_with = "deserialize_Keysym")] pub key: u32, } impl KeyPattern { pub fn new(modifiers: impl Into<KeyModifiers>, key: u32) -> KeyPattern { KeyPattern { modifiers: modifiers.into(), key, } } }
use serde::de::{Error, Unexpected}; let name = String::deserialize(deserializer)?; //let name = format!("KEY_{}", code);
random_line_split
customImageSearchAPIClient.d.ts
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ import { ServiceClient, ServiceClientOptions, ServiceClientCredentials } from 'ms-rest'; import * as models from "./models"; import * as operations from "./operations"; export default class CustomImageSearchAPIClient extends ServiceClient { /** * @class * Initializes a new instance of the CustomImageSearchAPIClient class. * @constructor *
* @param {string} [baseUri] - The base URI of the service. * * @param {object} [options] - The parameter options * * @param {Array} [options.filters] - Filters to be added to the request pipeline * * @param {object} [options.requestOptions] - Options for the underlying request object * {@link https://github.com/request/request#requestoptions-callback Options doc} * * @param {boolean} [options.noRetryPolicy] - If set to true, turn off default retry policy * */ constructor(credentials: ServiceClientCredentials, baseUri?: string, options?: ServiceClientOptions); credentials: ServiceClientCredentials; // Operation groups customInstance: operations.CustomInstance; } export { CustomImageSearchAPIClient, models as CustomImageSearchAPIModels };
* @param {credentials} credentials - Subscription credentials which uniquely identify client subscription. *
random_line_split
customImageSearchAPIClient.d.ts
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ import { ServiceClient, ServiceClientOptions, ServiceClientCredentials } from 'ms-rest'; import * as models from "./models"; import * as operations from "./operations"; export default class
extends ServiceClient { /** * @class * Initializes a new instance of the CustomImageSearchAPIClient class. * @constructor * * @param {credentials} credentials - Subscription credentials which uniquely identify client subscription. * * @param {string} [baseUri] - The base URI of the service. * * @param {object} [options] - The parameter options * * @param {Array} [options.filters] - Filters to be added to the request pipeline * * @param {object} [options.requestOptions] - Options for the underlying request object * {@link https://github.com/request/request#requestoptions-callback Options doc} * * @param {boolean} [options.noRetryPolicy] - If set to true, turn off default retry policy * */ constructor(credentials: ServiceClientCredentials, baseUri?: string, options?: ServiceClientOptions); credentials: ServiceClientCredentials; // Operation groups customInstance: operations.CustomInstance; } export { CustomImageSearchAPIClient, models as CustomImageSearchAPIModels };
CustomImageSearchAPIClient
identifier_name
check_tftp.py
# Copyright 2014 Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Health Check module for TFTP service.""" import os import socket import xmlrpclib from compass.actions.health_check import base from compass.actions.health_check import utils as health_check_utils class TftpCheck(base.BaseCheck): """tftp health check class.""" NAME = "TFTP Check" def run(self): """do health check.""" installer = self.config.OS_INSTALLER method_name = "self.check_" + installer + "_tftp()" return eval(method_name) def check_cobbler_tftp(self): """Checks if Cobbler manages TFTP service. :note: we assume TFTP service is running at the same machine where this health check runs at """ try: remote = xmlrpclib.Server( self.config.COBBLER_INSTALLER_URL, allow_none=True) remote.login( *self.config.COBBLER_INSTALLER_TOKEN) except Exception: self._set_status( 0, "[%s]Error: Cannot login to Cobbler with the tokens " " provided in the config file" % self.NAME) return (self.code, self.messages) cobbler_settings = remote.get_settings() if cobbler_settings['manage_tftp'] == 0: self.messages.append( '[TFTP]Info: tftp service is not managed by Compass') return (0, self.messages) self.check_tftp_dir() print "[Done]" self.check_tftp_service() print "[Done]" if self.code == 1: self.messages.append( "[%s]Info: tftp service health check has completed. " "No problems found, all systems go." % self.NAME) return (self.code, self.messages) def check_tftp_dir(self): """Validates TFTP directories and configurations.""" print "Checking TFTP directories......", if not os.path.exists('/var/lib/tftpboot/'):
return True def check_tftp_service(self): """Checks if TFTP is running on port 69.""" print "Checking TFTP services......", serv_err_msg = health_check_utils.check_service_running(self.NAME, 'xinetd') if not serv_err_msg == "": self._set_status(0, serv_err_msg) if 'tftp' != socket.getservbyport(69): self._set_status( 0, "[%s]Error: tftp doesn't seem to be listening " "on Port 60." % self.NAME) return True
self._set_status( 0, "[%s]Error: No tftp-boot libraries found, " "please check if tftp server is properly " "installed/managed" % self.NAME)
conditional_block
check_tftp.py
# Copyright 2014 Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Health Check module for TFTP service.""" import os import socket import xmlrpclib from compass.actions.health_check import base from compass.actions.health_check import utils as health_check_utils class TftpCheck(base.BaseCheck): """tftp health check class.""" NAME = "TFTP Check" def run(self): """do health check.""" installer = self.config.OS_INSTALLER method_name = "self.check_" + installer + "_tftp()" return eval(method_name) def check_cobbler_tftp(self): """Checks if Cobbler manages TFTP service.
same machine where this health check runs at """ try: remote = xmlrpclib.Server( self.config.COBBLER_INSTALLER_URL, allow_none=True) remote.login( *self.config.COBBLER_INSTALLER_TOKEN) except Exception: self._set_status( 0, "[%s]Error: Cannot login to Cobbler with the tokens " " provided in the config file" % self.NAME) return (self.code, self.messages) cobbler_settings = remote.get_settings() if cobbler_settings['manage_tftp'] == 0: self.messages.append( '[TFTP]Info: tftp service is not managed by Compass') return (0, self.messages) self.check_tftp_dir() print "[Done]" self.check_tftp_service() print "[Done]" if self.code == 1: self.messages.append( "[%s]Info: tftp service health check has completed. " "No problems found, all systems go." % self.NAME) return (self.code, self.messages) def check_tftp_dir(self): """Validates TFTP directories and configurations.""" print "Checking TFTP directories......", if not os.path.exists('/var/lib/tftpboot/'): self._set_status( 0, "[%s]Error: No tftp-boot libraries found, " "please check if tftp server is properly " "installed/managed" % self.NAME) return True def check_tftp_service(self): """Checks if TFTP is running on port 69.""" print "Checking TFTP services......", serv_err_msg = health_check_utils.check_service_running(self.NAME, 'xinetd') if not serv_err_msg == "": self._set_status(0, serv_err_msg) if 'tftp' != socket.getservbyport(69): self._set_status( 0, "[%s]Error: tftp doesn't seem to be listening " "on Port 60." % self.NAME) return True
:note: we assume TFTP service is running at the
random_line_split
check_tftp.py
# Copyright 2014 Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Health Check module for TFTP service.""" import os import socket import xmlrpclib from compass.actions.health_check import base from compass.actions.health_check import utils as health_check_utils class TftpCheck(base.BaseCheck):
remote.login( *self.config.COBBLER_INSTALLER_TOKEN) except Exception: self._set_status( 0, "[%s]Error: Cannot login to Cobbler with the tokens " " provided in the config file" % self.NAME) return (self.code, self.messages) cobbler_settings = remote.get_settings() if cobbler_settings['manage_tftp'] == 0: self.messages.append( '[TFTP]Info: tftp service is not managed by Compass') return (0, self.messages) self.check_tftp_dir() print "[Done]" self.check_tftp_service() print "[Done]" if self.code == 1: self.messages.append( "[%s]Info: tftp service health check has completed. " "No problems found, all systems go." % self.NAME) return (self.code, self.messages) def check_tftp_dir(self): """Validates TFTP directories and configurations.""" print "Checking TFTP directories......", if not os.path.exists('/var/lib/tftpboot/'): self._set_status( 0, "[%s]Error: No tftp-boot libraries found, " "please check if tftp server is properly " "installed/managed" % self.NAME) return True def check_tftp_service(self): """Checks if TFTP is running on port 69.""" print "Checking TFTP services......", serv_err_msg = health_check_utils.check_service_running(self.NAME, 'xinetd') if not serv_err_msg == "": self._set_status(0, serv_err_msg) if 'tftp' != socket.getservbyport(69): self._set_status( 0, "[%s]Error: tftp doesn't seem to be listening " "on Port 60." % self.NAME) return True
"""tftp health check class.""" NAME = "TFTP Check" def run(self): """do health check.""" installer = self.config.OS_INSTALLER method_name = "self.check_" + installer + "_tftp()" return eval(method_name) def check_cobbler_tftp(self): """Checks if Cobbler manages TFTP service. :note: we assume TFTP service is running at the same machine where this health check runs at """ try: remote = xmlrpclib.Server( self.config.COBBLER_INSTALLER_URL, allow_none=True)
identifier_body
check_tftp.py
# Copyright 2014 Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Health Check module for TFTP service.""" import os import socket import xmlrpclib from compass.actions.health_check import base from compass.actions.health_check import utils as health_check_utils class TftpCheck(base.BaseCheck): """tftp health check class.""" NAME = "TFTP Check" def run(self): """do health check.""" installer = self.config.OS_INSTALLER method_name = "self.check_" + installer + "_tftp()" return eval(method_name) def check_cobbler_tftp(self): """Checks if Cobbler manages TFTP service. :note: we assume TFTP service is running at the same machine where this health check runs at """ try: remote = xmlrpclib.Server( self.config.COBBLER_INSTALLER_URL, allow_none=True) remote.login( *self.config.COBBLER_INSTALLER_TOKEN) except Exception: self._set_status( 0, "[%s]Error: Cannot login to Cobbler with the tokens " " provided in the config file" % self.NAME) return (self.code, self.messages) cobbler_settings = remote.get_settings() if cobbler_settings['manage_tftp'] == 0: self.messages.append( '[TFTP]Info: tftp service is not managed by Compass') return (0, self.messages) self.check_tftp_dir() print "[Done]" self.check_tftp_service() print "[Done]" if self.code == 1: self.messages.append( "[%s]Info: tftp service health check has completed. " "No problems found, all systems go." % self.NAME) return (self.code, self.messages) def check_tftp_dir(self): """Validates TFTP directories and configurations.""" print "Checking TFTP directories......", if not os.path.exists('/var/lib/tftpboot/'): self._set_status( 0, "[%s]Error: No tftp-boot libraries found, " "please check if tftp server is properly " "installed/managed" % self.NAME) return True def
(self): """Checks if TFTP is running on port 69.""" print "Checking TFTP services......", serv_err_msg = health_check_utils.check_service_running(self.NAME, 'xinetd') if not serv_err_msg == "": self._set_status(0, serv_err_msg) if 'tftp' != socket.getservbyport(69): self._set_status( 0, "[%s]Error: tftp doesn't seem to be listening " "on Port 60." % self.NAME) return True
check_tftp_service
identifier_name
players-list.component.ts
import { Component, OnInit } from '@angular/core'; import { Player } from './player'; import { PlayerService } from './player.service'; import { PlayerSailsService } from './player.sails.service'; import { PlayerJSONService } from './player.json.service';
}) export class PlayersListComponent implements OnInit { players: Player[]; selectedPlayer: Player; isAddingPlayer: boolean; constructor( private playerService: PlayerService, private playerSailsService: PlayerSailsService, private playerJSONService: PlayerJSONService ) { } getPlayers(): void { this.playerService.getPlayers().then((players) => { this.players = players; }); } create(firstName: string, lastName: string): void { firstName = firstName.trim(); lastName = lastName.trim(); if (!firstName || !lastName) { this.add(); return; } this.playerService.create(firstName, lastName) .then(player => { this.players.push(player); this.selectedPlayer = null; this.add(); }); } add() { this.isAddingPlayer = !this.isAddingPlayer; } delete(player: Player): void { this.playerService .delete(player.id) .then(() => { this.getPlayers(); }); } ngOnInit(): void { this.getPlayers(); this.isAddingPlayer = false; } onSelect(player: Player): void { this.selectedPlayer = player; } }
@Component({ selector: 'players-list', templateUrl: 'app/templates/players-list.html', styleUrls: ['app/stylesheets/css/players-list.css']
random_line_split
players-list.component.ts
import { Component, OnInit } from '@angular/core'; import { Player } from './player'; import { PlayerService } from './player.service'; import { PlayerSailsService } from './player.sails.service'; import { PlayerJSONService } from './player.json.service'; @Component({ selector: 'players-list', templateUrl: 'app/templates/players-list.html', styleUrls: ['app/stylesheets/css/players-list.css'] }) export class PlayersListComponent implements OnInit { players: Player[]; selectedPlayer: Player; isAddingPlayer: boolean; constructor( private playerService: PlayerService, private playerSailsService: PlayerSailsService, private playerJSONService: PlayerJSONService ) { } getPlayers(): void { this.playerService.getPlayers().then((players) => { this.players = players; }); } create(firstName: string, lastName: string): void { firstName = firstName.trim(); lastName = lastName.trim(); if (!firstName || !lastName)
this.playerService.create(firstName, lastName) .then(player => { this.players.push(player); this.selectedPlayer = null; this.add(); }); } add() { this.isAddingPlayer = !this.isAddingPlayer; } delete(player: Player): void { this.playerService .delete(player.id) .then(() => { this.getPlayers(); }); } ngOnInit(): void { this.getPlayers(); this.isAddingPlayer = false; } onSelect(player: Player): void { this.selectedPlayer = player; } }
{ this.add(); return; }
conditional_block
players-list.component.ts
import { Component, OnInit } from '@angular/core'; import { Player } from './player'; import { PlayerService } from './player.service'; import { PlayerSailsService } from './player.sails.service'; import { PlayerJSONService } from './player.json.service'; @Component({ selector: 'players-list', templateUrl: 'app/templates/players-list.html', styleUrls: ['app/stylesheets/css/players-list.css'] }) export class PlayersListComponent implements OnInit { players: Player[]; selectedPlayer: Player; isAddingPlayer: boolean; constructor( private playerService: PlayerService, private playerSailsService: PlayerSailsService, private playerJSONService: PlayerJSONService ) { }
(): void { this.playerService.getPlayers().then((players) => { this.players = players; }); } create(firstName: string, lastName: string): void { firstName = firstName.trim(); lastName = lastName.trim(); if (!firstName || !lastName) { this.add(); return; } this.playerService.create(firstName, lastName) .then(player => { this.players.push(player); this.selectedPlayer = null; this.add(); }); } add() { this.isAddingPlayer = !this.isAddingPlayer; } delete(player: Player): void { this.playerService .delete(player.id) .then(() => { this.getPlayers(); }); } ngOnInit(): void { this.getPlayers(); this.isAddingPlayer = false; } onSelect(player: Player): void { this.selectedPlayer = player; } }
getPlayers
identifier_name
players-list.component.ts
import { Component, OnInit } from '@angular/core'; import { Player } from './player'; import { PlayerService } from './player.service'; import { PlayerSailsService } from './player.sails.service'; import { PlayerJSONService } from './player.json.service'; @Component({ selector: 'players-list', templateUrl: 'app/templates/players-list.html', styleUrls: ['app/stylesheets/css/players-list.css'] }) export class PlayersListComponent implements OnInit { players: Player[]; selectedPlayer: Player; isAddingPlayer: boolean; constructor( private playerService: PlayerService, private playerSailsService: PlayerSailsService, private playerJSONService: PlayerJSONService ) { } getPlayers(): void { this.playerService.getPlayers().then((players) => { this.players = players; }); } create(firstName: string, lastName: string): void { firstName = firstName.trim(); lastName = lastName.trim(); if (!firstName || !lastName) { this.add(); return; } this.playerService.create(firstName, lastName) .then(player => { this.players.push(player); this.selectedPlayer = null; this.add(); }); } add() { this.isAddingPlayer = !this.isAddingPlayer; } delete(player: Player): void
ngOnInit(): void { this.getPlayers(); this.isAddingPlayer = false; } onSelect(player: Player): void { this.selectedPlayer = player; } }
{ this.playerService .delete(player.id) .then(() => { this.getPlayers(); }); }
identifier_body
test_ucs_inventory_v2.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Shubhangi Satras, Cisco Systems, Inc. # @author: Tyler Smith, Cisco Systems, Inc. import logging import unittest from quantum.common import exceptions as exc from quantum.openstack.common import uuidutils from quantum.plugins.cisco.common import cisco_constants as const from quantum.plugins.cisco.common import cisco_credentials_v2 as creds from quantum.plugins.cisco.db import network_db_v2 as cdb from quantum.plugins.cisco.tests.unit.v2.ucs.cisco_ucs_inventory_fake import ( UCSInventory, ) LOG = logging.getLogger(__name__) # Set some data to use in tests tenant = 'shubh' net_name = 'TestNetwork1' port_state = const.PORT_UP interface_id = 'vif-01' class TestUCSInventory(unittest.TestCase): """ Tests for the UCS Inventory. Each high-level operation should return some information about which devices to perform the action on. """ def setUp(self): """Setup our tests""" cdb.initialize() creds.Store.initialize() # Create the ucs inventory object self._ucs_inventory = UCSInventory() self.inventory = self._ucs_inventory._inventory def assertValidUCM(self, ip_address): """Asserts that the given ip is in the UCS inventory""" if ip_address in self.inventory.keys():
assert(0) def _test_get_all_ucms(self, cmd): """Runs tests for commands that expect a list of all UCMS""" LOG.debug("test_%s - START", cmd) results = getattr(self._ucs_inventory, cmd)([]) self.assertEqual(results[const.DEVICE_IP], self.inventory.keys()) LOG.debug("test_%s - END", cmd) def _test_with_port_creation(self, cmd, params=None): """Tests commands that requires a port to exist""" LOG.debug("test_%s - START", cmd) net_uuid = uuidutils.generate_uuid() device_params = self._ucs_inventory.create_port(tenant, net_uuid, port_state, state=port_state) args = [tenant, net_uuid, port[const.PORT_ID]] if params is not None: args.extend(params) ip_address = getattr(self._ucs_inventory, cmd)(args) ip_address = ip_address[const.DEVICE_IP][0] self.assertValidUCM(ip_address) cdb.clear_db() LOG.debug("test_%s - END", cmd) def test_create_port(self): """Test that the UCS Inventory returns the correct devices to use""" LOG.debug("test_create_port - START") results = self._ucs_inventory.create_port([]) results = results[const.LEAST_RSVD_BLADE_DICT] ip_address = results[const.LEAST_RSVD_BLADE_UCSM] chassis = results[const.LEAST_RSVD_BLADE_CHASSIS] blade = results[const.LEAST_RSVD_BLADE_ID] if blade not in self.inventory[ip_address][chassis]: self.assertEqual(0, 1) self.assertEqual(1, 1) LOG.debug("test_create_port - END") def test_get_all_networks(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_networks') def test_create_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('create_network') def test_delete_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('delete_network') def test_get_network_details(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_network_details') def test_update_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('update_network') def test_get_all_ports(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_ports')
assert(1) return
conditional_block
test_ucs_inventory_v2.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Shubhangi Satras, Cisco Systems, Inc. # @author: Tyler Smith, Cisco Systems, Inc. import logging import unittest from quantum.common import exceptions as exc from quantum.openstack.common import uuidutils from quantum.plugins.cisco.common import cisco_constants as const from quantum.plugins.cisco.common import cisco_credentials_v2 as creds from quantum.plugins.cisco.db import network_db_v2 as cdb from quantum.plugins.cisco.tests.unit.v2.ucs.cisco_ucs_inventory_fake import ( UCSInventory, ) LOG = logging.getLogger(__name__) # Set some data to use in tests tenant = 'shubh' net_name = 'TestNetwork1' port_state = const.PORT_UP interface_id = 'vif-01' class TestUCSInventory(unittest.TestCase): """ Tests for the UCS Inventory. Each high-level operation should return some information about which devices to perform the action on. """ def setUp(self): """Setup our tests""" cdb.initialize() creds.Store.initialize() # Create the ucs inventory object self._ucs_inventory = UCSInventory() self.inventory = self._ucs_inventory._inventory def assertValidUCM(self, ip_address): """Asserts that the given ip is in the UCS inventory""" if ip_address in self.inventory.keys(): assert(1) return assert(0) def
(self, cmd): """Runs tests for commands that expect a list of all UCMS""" LOG.debug("test_%s - START", cmd) results = getattr(self._ucs_inventory, cmd)([]) self.assertEqual(results[const.DEVICE_IP], self.inventory.keys()) LOG.debug("test_%s - END", cmd) def _test_with_port_creation(self, cmd, params=None): """Tests commands that requires a port to exist""" LOG.debug("test_%s - START", cmd) net_uuid = uuidutils.generate_uuid() device_params = self._ucs_inventory.create_port(tenant, net_uuid, port_state, state=port_state) args = [tenant, net_uuid, port[const.PORT_ID]] if params is not None: args.extend(params) ip_address = getattr(self._ucs_inventory, cmd)(args) ip_address = ip_address[const.DEVICE_IP][0] self.assertValidUCM(ip_address) cdb.clear_db() LOG.debug("test_%s - END", cmd) def test_create_port(self): """Test that the UCS Inventory returns the correct devices to use""" LOG.debug("test_create_port - START") results = self._ucs_inventory.create_port([]) results = results[const.LEAST_RSVD_BLADE_DICT] ip_address = results[const.LEAST_RSVD_BLADE_UCSM] chassis = results[const.LEAST_RSVD_BLADE_CHASSIS] blade = results[const.LEAST_RSVD_BLADE_ID] if blade not in self.inventory[ip_address][chassis]: self.assertEqual(0, 1) self.assertEqual(1, 1) LOG.debug("test_create_port - END") def test_get_all_networks(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_networks') def test_create_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('create_network') def test_delete_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('delete_network') def test_get_network_details(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_network_details') def test_update_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('update_network') def test_get_all_ports(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_ports')
_test_get_all_ucms
identifier_name
test_ucs_inventory_v2.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Shubhangi Satras, Cisco Systems, Inc. # @author: Tyler Smith, Cisco Systems, Inc. import logging import unittest from quantum.common import exceptions as exc from quantum.openstack.common import uuidutils from quantum.plugins.cisco.common import cisco_constants as const from quantum.plugins.cisco.common import cisco_credentials_v2 as creds from quantum.plugins.cisco.db import network_db_v2 as cdb from quantum.plugins.cisco.tests.unit.v2.ucs.cisco_ucs_inventory_fake import ( UCSInventory, ) LOG = logging.getLogger(__name__) # Set some data to use in tests tenant = 'shubh' net_name = 'TestNetwork1' port_state = const.PORT_UP interface_id = 'vif-01' class TestUCSInventory(unittest.TestCase): """ Tests for the UCS Inventory. Each high-level operation should return some information about which devices to perform the action on. """ def setUp(self): """Setup our tests""" cdb.initialize() creds.Store.initialize() # Create the ucs inventory object self._ucs_inventory = UCSInventory() self.inventory = self._ucs_inventory._inventory def assertValidUCM(self, ip_address): """Asserts that the given ip is in the UCS inventory""" if ip_address in self.inventory.keys(): assert(1) return assert(0) def _test_get_all_ucms(self, cmd): """Runs tests for commands that expect a list of all UCMS""" LOG.debug("test_%s - START", cmd) results = getattr(self._ucs_inventory, cmd)([]) self.assertEqual(results[const.DEVICE_IP], self.inventory.keys()) LOG.debug("test_%s - END", cmd) def _test_with_port_creation(self, cmd, params=None): """Tests commands that requires a port to exist""" LOG.debug("test_%s - START", cmd) net_uuid = uuidutils.generate_uuid() device_params = self._ucs_inventory.create_port(tenant, net_uuid, port_state, state=port_state) args = [tenant, net_uuid, port[const.PORT_ID]] if params is not None: args.extend(params) ip_address = getattr(self._ucs_inventory, cmd)(args) ip_address = ip_address[const.DEVICE_IP][0] self.assertValidUCM(ip_address) cdb.clear_db() LOG.debug("test_%s - END", cmd) def test_create_port(self): """Test that the UCS Inventory returns the correct devices to use""" LOG.debug("test_create_port - START") results = self._ucs_inventory.create_port([]) results = results[const.LEAST_RSVD_BLADE_DICT] ip_address = results[const.LEAST_RSVD_BLADE_UCSM] chassis = results[const.LEAST_RSVD_BLADE_CHASSIS] blade = results[const.LEAST_RSVD_BLADE_ID] if blade not in self.inventory[ip_address][chassis]: self.assertEqual(0, 1) self.assertEqual(1, 1) LOG.debug("test_create_port - END") def test_get_all_networks(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_networks') def test_create_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('create_network') def test_delete_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('delete_network') def test_get_network_details(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_network_details') def test_update_network(self):
def test_get_all_ports(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_ports')
"""Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('update_network')
random_line_split
test_ucs_inventory_v2.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Shubhangi Satras, Cisco Systems, Inc. # @author: Tyler Smith, Cisco Systems, Inc. import logging import unittest from quantum.common import exceptions as exc from quantum.openstack.common import uuidutils from quantum.plugins.cisco.common import cisco_constants as const from quantum.plugins.cisco.common import cisco_credentials_v2 as creds from quantum.plugins.cisco.db import network_db_v2 as cdb from quantum.plugins.cisco.tests.unit.v2.ucs.cisco_ucs_inventory_fake import ( UCSInventory, ) LOG = logging.getLogger(__name__) # Set some data to use in tests tenant = 'shubh' net_name = 'TestNetwork1' port_state = const.PORT_UP interface_id = 'vif-01' class TestUCSInventory(unittest.TestCase):
def _test_get_all_ucms(self, cmd): """Runs tests for commands that expect a list of all UCMS""" LOG.debug("test_%s - START", cmd) results = getattr(self._ucs_inventory, cmd)([]) self.assertEqual(results[const.DEVICE_IP], self.inventory.keys()) LOG.debug("test_%s - END", cmd) def _test_with_port_creation(self, cmd, params=None): """Tests commands that requires a port to exist""" LOG.debug("test_%s - START", cmd) net_uuid = uuidutils.generate_uuid() device_params = self._ucs_inventory.create_port(tenant, net_uuid, port_state, state=port_state) args = [tenant, net_uuid, port[const.PORT_ID]] if params is not None: args.extend(params) ip_address = getattr(self._ucs_inventory, cmd)(args) ip_address = ip_address[const.DEVICE_IP][0] self.assertValidUCM(ip_address) cdb.clear_db() LOG.debug("test_%s - END", cmd) def test_create_port(self): """Test that the UCS Inventory returns the correct devices to use""" LOG.debug("test_create_port - START") results = self._ucs_inventory.create_port([]) results = results[const.LEAST_RSVD_BLADE_DICT] ip_address = results[const.LEAST_RSVD_BLADE_UCSM] chassis = results[const.LEAST_RSVD_BLADE_CHASSIS] blade = results[const.LEAST_RSVD_BLADE_ID] if blade not in self.inventory[ip_address][chassis]: self.assertEqual(0, 1) self.assertEqual(1, 1) LOG.debug("test_create_port - END") def test_get_all_networks(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_networks') def test_create_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('create_network') def test_delete_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('delete_network') def test_get_network_details(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_network_details') def test_update_network(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('update_network') def test_get_all_ports(self): """Test that the UCS Inventory returns the correct devices to use""" self._test_get_all_ucms('get_all_ports')
""" Tests for the UCS Inventory. Each high-level operation should return some information about which devices to perform the action on. """ def setUp(self): """Setup our tests""" cdb.initialize() creds.Store.initialize() # Create the ucs inventory object self._ucs_inventory = UCSInventory() self.inventory = self._ucs_inventory._inventory def assertValidUCM(self, ip_address): """Asserts that the given ip is in the UCS inventory""" if ip_address in self.inventory.keys(): assert(1) return assert(0)
identifier_body
index.js
/** * React Static Boilerplate * https://github.com/koistya/react-static-boilerplate * Copyright (c) Konstantin Tarkus (@koistya) | MIT license */ import './index.scss' import React, { Component } from 'react' // import { Grid, Col, Row } from 'react-bootstrap'; export default class IndexPage extends Component {
() { return ( <div className="top-page"> <div> <img className="top-image" src="/cover2.jpg" width="100%" alt="cover image" /> </div> <div className="top-page--footer"> The source code of this website is available&nbsp; <a href="https://github.com/odoruinu/odoruinu.net-pug" target="_blank" rel="noopener noreferrer" > here on GitHub </a> . </div> </div> ) } }
render
identifier_name
index.js
/** * React Static Boilerplate * https://github.com/koistya/react-static-boilerplate * Copyright (c) Konstantin Tarkus (@koistya) | MIT license */ import './index.scss' import React, { Component } from 'react' // import { Grid, Col, Row } from 'react-bootstrap'; export default class IndexPage extends Component { render() { return ( <div className="top-page"> <div> <img className="top-image" src="/cover2.jpg" width="100%" alt="cover image" /> </div> <div className="top-page--footer"> The source code of this website is available&nbsp; <a href="https://github.com/odoruinu/odoruinu.net-pug" target="_blank" rel="noopener noreferrer" > here on GitHub </a> . </div>
}
</div> ) }
random_line_split
combaseapi.rs
// Copyright © 2016 winapi-rs developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. //! Base Component Object Model defintions. use shared::basetsd::UINT64; use shared::minwindef::DWORD; use shared::wtypesbase::{ CLSCTX, CLSCTX_INPROC_HANDLER, CLSCTX_INPROC_SERVER, CLSCTX_LOCAL_SERVER, CLSCTX_REMOTE_SERVER, }; use um::objidlbase::LPMALLOC; use um::winnt::HRESULT; pub const CLSCTX_INPROC: CLSCTX = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER; pub const CLSCTX_ALL: CLSCTX = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER; pub const CLSCTX_SERVER: CLSCTX = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER; ENUM!{enum REGCLS { REGCLS_SINGLEUSE = 0, REGCLS_MULTIPLEUSE = 1, REGCLS_MULTI_SEPARATE = 2, REGCLS_SUSPENDED = 4, REGCLS_SURROGATE = 8, REGCLS_AGILE = 0x10, }} ENUM!{enum COINITBASE { COINITBASE_MULTITHREADED = 0x0, }} EXTERN!{stdcall fn CoGetMalloc(
ppMalloc: *mut LPMALLOC ) -> HRESULT} STRUCT!{struct ServerInformation { dwServerPid: DWORD, dwServerTid: DWORD, ui64ServerAddress: UINT64, }} pub type PServerInformation = *mut ServerInformation; DECLARE_HANDLE!(CO_MTA_USAGE_COOKIE, CO_MTA_USAGE_COOKIE__);
dwMemContext: DWORD,
random_line_split
interface.d.ts
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { Observable, SubscribableOrPromise } from 'rxjs'; import { JsonArray, JsonObject, JsonValue } from '../interface'; export declare type JsonPointer = string & { __PRIVATE_DEVKIT_JSON_POINTER: void; }; export declare type SchemaValidatorError = RefValidatorError | LimitValidatorError | AdditionalPropertiesValidatorError | FormatValidatorError | RequiredValidatorError; export interface SchemaValidatorErrorBase { keyword: string; dataPath: string; message?: string; data?: JsonValue; } export interface RefValidatorError extends SchemaValidatorErrorBase { keyword: '$ref'; params: { ref: string; }; } export interface LimitValidatorError extends SchemaValidatorErrorBase { keyword: 'maxItems' | 'minItems' | 'maxLength' | 'minLength' | 'maxProperties' | 'minProperties'; params: { limit: number; }; } export interface AdditionalPropertiesValidatorError extends SchemaValidatorErrorBase { keyword: 'additionalProperties'; params: { additionalProperty: string; }; } export interface FormatValidatorError extends SchemaValidatorErrorBase { keyword: 'format'; params: { format: string; }; } export interface RequiredValidatorError extends SchemaValidatorErrorBase { keyword: 'required'; params: { missingProperty: string; }; } export interface SchemaValidatorResult { data: JsonValue; success: boolean; errors?: SchemaValidatorError[]; } export interface SchemaValidatorOptions { applyPreTransforms?: boolean; applyPostTransforms?: boolean; withPrompts?: boolean; } export interface SchemaValidator { (data: JsonValue, options?: SchemaValidatorOptions): Observable<SchemaValidatorResult>; } export interface SchemaFormatter { readonly async: boolean; validate(data: any): boolean | Observable<boolean>; } export interface SchemaFormat { name: string; formatter: SchemaFormatter; } export interface SmartDefaultProvider<T> { (schema: JsonObject): T | Observable<T>; } export interface SchemaKeywordValidator { (data: JsonValue, schema: JsonValue, parent: JsonObject | JsonArray | undefined, parentProperty: string | number | undefined, pointer: JsonPointer, rootData: JsonValue): boolean | Observable<boolean>; } export interface PromptDefinition { id: string; type: string; message: string; default?: string | string[] | number | boolean | null; priority: number; validator?: (value: string) => boolean | string | Promise<boolean | string>; items?: Array<string | { value: JsonValue; label: string; }>; raw?: string | JsonObject; multiselect?: boolean; } export declare type PromptProvider = (definitions: Array<PromptDefinition>) => SubscribableOrPromise<{ [id: string]: JsonValue; }>;
addFormat(format: SchemaFormat): void; addSmartDefaultProvider<T>(source: string, provider: SmartDefaultProvider<T>): void; usePromptProvider(provider: PromptProvider): void; /** * Add a transformation step before the validation of any Json. * @param {JsonVisitor} visitor The visitor to transform every value. * @param {JsonVisitor[]} deps A list of other visitors to run before. */ addPreTransform(visitor: JsonVisitor, deps?: JsonVisitor[]): void; /** * Add a transformation step after the validation of any Json. The JSON will not be validated * after the POST, so if transformations are not compatible with the Schema it will not result * in an error. * @param {JsonVisitor} visitor The visitor to transform every value. * @param {JsonVisitor[]} deps A list of other visitors to run before. */ addPostTransform(visitor: JsonVisitor, deps?: JsonVisitor[]): void; } export interface JsonSchemaVisitor { (current: JsonObject | JsonArray, pointer: JsonPointer, parentSchema?: JsonObject | JsonArray, index?: string): void; } export interface JsonVisitor { (value: JsonValue, pointer: JsonPointer, schema?: JsonObject, root?: JsonObject | JsonArray): Observable<JsonValue> | JsonValue; }
export interface SchemaRegistry { compile(schema: Object): Observable<SchemaValidator>; flatten(schema: JsonObject | string): Observable<JsonObject>;
random_line_split
utils.js
'use strict'; var path = require('path'); var fileRe = require('filename-regex'); var win32 = process && process.platform === 'win32'; var utils = require('lazy-cache')(require); /** * Temporarily re-assign require to trick browserify * into recognizing lazy-cached deps. */ var fn = require; require = utils; /** * Lazily required module dependencies */ require('arr-diff', 'diff'); require('array-unique', 'unique'); require('braces'); require('expand-brackets', 'brackets'); require('extglob'); require('is-extglob'); require('is-glob', 'isGlob'); require('kind-of', 'typeOf'); require('normalize-path', 'normalize'); require('object.omit', 'omit'); require('parse-glob'); require('regex-cache', 'cache'); /** * Get the filename of a filepath * * @param {String} `string` * @return {String} */ utils.filename = function filename(fp) { var seg = fp.match(fileRe()); return seg && seg[0]; }; /** * Returns a function that returns true if the given * pattern is the same as a given `filepath` * * @param {String} `pattern` * @return {Function} */ utils.isPath = function isPath(pattern, opts) { return function (fp) { return pattern === utils.unixify(fp, opts); }; }; /** * Returns a function that returns true if the given * pattern contains a `filepath` * * @param {String} `pattern` * @return {Function} */ utils.hasPath = function hasPath(pattern, opts) { return function (fp) { return utils.unixify(pattern, opts).indexOf(fp) !== -1; }; }; /** * Returns a function that returns true if the given * pattern matches or contains a `filepath` * * @param {String} `pattern` * @return {Function} */ utils.matchPath = function matchPath(pattern, opts) { var fn = (opts && opts.contains) ? utils.hasPath(pattern, opts) : utils.isPath(pattern, opts); return fn; }; /** * Returns a function that returns true if the given * regex matches the `filename` of a file path. * * @param {RegExp} `re` * @return {Boolean} */ utils.hasFilename = function hasFilename(re) { return function (fp) { var name = utils.filename(fp); return name && re.test(name); }; }; /** * Coerce `val` to an array * * @param {*} val * @return {Array} */ utils.arrayify = function arrayify(val) { return !Array.isArray(val) ? [val] : val; }; /** * Normalize all slashes in a file path or glob pattern to * forward slashes. */ utils.unixify = function unixify(fp, opts) { if (opts && opts.unixify === false) return fp; if (opts && opts.unixify === true || win32 || path.sep === '\\')
if (opts && opts.unescape === true) { return fp ? fp.toString().replace(/\\(\w)/g, '$1') : ''; } return fp; }; /** * Escape/unescape utils */ utils.escapePath = function escapePath(fp) { return fp.replace(/[\\.]/g, '\\$&'); }; utils.unescapeGlob = function unescapeGlob(fp) { return fp.replace(/[\\"']/g, ''); }; utils.escapeRe = function escapeRe(str) { return str.replace(/[-[\\$*+?.#^\s{}(|)\]]/g, '\\$&'); }; /** * Restore `require` */ require = fn; /** * Expose `utils` */ module.exports = utils;
{ return utils.normalize(fp, false); }
conditional_block
utils.js
'use strict'; var path = require('path'); var fileRe = require('filename-regex'); var win32 = process && process.platform === 'win32'; var utils = require('lazy-cache')(require); /** * Temporarily re-assign require to trick browserify * into recognizing lazy-cached deps. */ var fn = require;
/** * Lazily required module dependencies */ require('arr-diff', 'diff'); require('array-unique', 'unique'); require('braces'); require('expand-brackets', 'brackets'); require('extglob'); require('is-extglob'); require('is-glob', 'isGlob'); require('kind-of', 'typeOf'); require('normalize-path', 'normalize'); require('object.omit', 'omit'); require('parse-glob'); require('regex-cache', 'cache'); /** * Get the filename of a filepath * * @param {String} `string` * @return {String} */ utils.filename = function filename(fp) { var seg = fp.match(fileRe()); return seg && seg[0]; }; /** * Returns a function that returns true if the given * pattern is the same as a given `filepath` * * @param {String} `pattern` * @return {Function} */ utils.isPath = function isPath(pattern, opts) { return function (fp) { return pattern === utils.unixify(fp, opts); }; }; /** * Returns a function that returns true if the given * pattern contains a `filepath` * * @param {String} `pattern` * @return {Function} */ utils.hasPath = function hasPath(pattern, opts) { return function (fp) { return utils.unixify(pattern, opts).indexOf(fp) !== -1; }; }; /** * Returns a function that returns true if the given * pattern matches or contains a `filepath` * * @param {String} `pattern` * @return {Function} */ utils.matchPath = function matchPath(pattern, opts) { var fn = (opts && opts.contains) ? utils.hasPath(pattern, opts) : utils.isPath(pattern, opts); return fn; }; /** * Returns a function that returns true if the given * regex matches the `filename` of a file path. * * @param {RegExp} `re` * @return {Boolean} */ utils.hasFilename = function hasFilename(re) { return function (fp) { var name = utils.filename(fp); return name && re.test(name); }; }; /** * Coerce `val` to an array * * @param {*} val * @return {Array} */ utils.arrayify = function arrayify(val) { return !Array.isArray(val) ? [val] : val; }; /** * Normalize all slashes in a file path or glob pattern to * forward slashes. */ utils.unixify = function unixify(fp, opts) { if (opts && opts.unixify === false) return fp; if (opts && opts.unixify === true || win32 || path.sep === '\\') { return utils.normalize(fp, false); } if (opts && opts.unescape === true) { return fp ? fp.toString().replace(/\\(\w)/g, '$1') : ''; } return fp; }; /** * Escape/unescape utils */ utils.escapePath = function escapePath(fp) { return fp.replace(/[\\.]/g, '\\$&'); }; utils.unescapeGlob = function unescapeGlob(fp) { return fp.replace(/[\\"']/g, ''); }; utils.escapeRe = function escapeRe(str) { return str.replace(/[-[\\$*+?.#^\s{}(|)\]]/g, '\\$&'); }; /** * Restore `require` */ require = fn; /** * Expose `utils` */ module.exports = utils;
require = utils;
random_line_split
SWIM.py
import unittest import random from pygraph.classes.graph import graph class SWIM(object): def __init__(self, graph): self.graph = graph def edge_alive(self, nodeA, nodeB, alive): ''' edge_alive(A, B, True|False) ''' edge = (nodeA, nodeB) if alive: self.graph.add_edge(edge) else: self.graph.del_edge(edge) def node_alive(self, node, alive): ''' node_alive(A, True|False) ''' if alive: self.graph.node_attributes(node).clear() else: self.graph.node_attributes(node).append("dead") def ping(self, nodeStart, nodeEnd, k): ''' NodeStart to ping NodeEnd directly or indirectly through K random neighbors. Return True if nodeEnd receives ping, or False otherwise ''' g = self.graph # Check if direct ping works if g.has_edge((nodeStart, nodeEnd)) and \ "dead" not in g.node_attributes(nodeEnd): return True # Pick k random neighbors and let them ping end node for neighbor in self._random_neighbors(nodeStart, k): if self.ping(neighbor, nodeEnd, 0): return True # All pings have failed return False def _random_neighbors(self, node, b): neighbors = self.graph.neighbors(node) if len(neighbors) <= b: return neighbors else: return random.sample(neighbors, b) class SWIMTest(unittest.TestCase): def setUp(self):
def test_good_ping(self): swim = self.swim self.assertTrue(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(1, 3, 0)) def test_dead_edge_ping(self): swim = self.swim swim.edge_alive(0, 1, False) self.assertFalse(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(0, 1, 1)) def test_dead_node_ping(self): swim = self.swim swim.node_alive(2, False) self.assertFalse(swim.ping(0, 2, 0)) self.assertFalse(swim.ping(0, 2, 3)) if __name__ == '__main__': unittest.main()
g = graph() g.add_nodes(xrange(10)) g.complete() self.graph = g self.swim = SWIM(g)
identifier_body
SWIM.py
import unittest import random from pygraph.classes.graph import graph class
(object): def __init__(self, graph): self.graph = graph def edge_alive(self, nodeA, nodeB, alive): ''' edge_alive(A, B, True|False) ''' edge = (nodeA, nodeB) if alive: self.graph.add_edge(edge) else: self.graph.del_edge(edge) def node_alive(self, node, alive): ''' node_alive(A, True|False) ''' if alive: self.graph.node_attributes(node).clear() else: self.graph.node_attributes(node).append("dead") def ping(self, nodeStart, nodeEnd, k): ''' NodeStart to ping NodeEnd directly or indirectly through K random neighbors. Return True if nodeEnd receives ping, or False otherwise ''' g = self.graph # Check if direct ping works if g.has_edge((nodeStart, nodeEnd)) and \ "dead" not in g.node_attributes(nodeEnd): return True # Pick k random neighbors and let them ping end node for neighbor in self._random_neighbors(nodeStart, k): if self.ping(neighbor, nodeEnd, 0): return True # All pings have failed return False def _random_neighbors(self, node, b): neighbors = self.graph.neighbors(node) if len(neighbors) <= b: return neighbors else: return random.sample(neighbors, b) class SWIMTest(unittest.TestCase): def setUp(self): g = graph() g.add_nodes(xrange(10)) g.complete() self.graph = g self.swim = SWIM(g) def test_good_ping(self): swim = self.swim self.assertTrue(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(1, 3, 0)) def test_dead_edge_ping(self): swim = self.swim swim.edge_alive(0, 1, False) self.assertFalse(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(0, 1, 1)) def test_dead_node_ping(self): swim = self.swim swim.node_alive(2, False) self.assertFalse(swim.ping(0, 2, 0)) self.assertFalse(swim.ping(0, 2, 3)) if __name__ == '__main__': unittest.main()
SWIM
identifier_name
SWIM.py
import unittest import random from pygraph.classes.graph import graph class SWIM(object): def __init__(self, graph): self.graph = graph def edge_alive(self, nodeA, nodeB, alive): ''' edge_alive(A, B, True|False) ''' edge = (nodeA, nodeB) if alive: self.graph.add_edge(edge) else: self.graph.del_edge(edge) def node_alive(self, node, alive): ''' node_alive(A, True|False) ''' if alive: self.graph.node_attributes(node).clear() else: self.graph.node_attributes(node).append("dead") def ping(self, nodeStart, nodeEnd, k): ''' NodeStart to ping NodeEnd directly or indirectly through K random neighbors. Return True if nodeEnd receives ping, or False otherwise ''' g = self.graph # Check if direct ping works if g.has_edge((nodeStart, nodeEnd)) and \ "dead" not in g.node_attributes(nodeEnd): return True # Pick k random neighbors and let them ping end node for neighbor in self._random_neighbors(nodeStart, k): if self.ping(neighbor, nodeEnd, 0):
# All pings have failed return False def _random_neighbors(self, node, b): neighbors = self.graph.neighbors(node) if len(neighbors) <= b: return neighbors else: return random.sample(neighbors, b) class SWIMTest(unittest.TestCase): def setUp(self): g = graph() g.add_nodes(xrange(10)) g.complete() self.graph = g self.swim = SWIM(g) def test_good_ping(self): swim = self.swim self.assertTrue(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(1, 3, 0)) def test_dead_edge_ping(self): swim = self.swim swim.edge_alive(0, 1, False) self.assertFalse(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(0, 1, 1)) def test_dead_node_ping(self): swim = self.swim swim.node_alive(2, False) self.assertFalse(swim.ping(0, 2, 0)) self.assertFalse(swim.ping(0, 2, 3)) if __name__ == '__main__': unittest.main()
return True
conditional_block
SWIM.py
import unittest import random from pygraph.classes.graph import graph class SWIM(object): def __init__(self, graph): self.graph = graph def edge_alive(self, nodeA, nodeB, alive): ''' edge_alive(A, B, True|False) ''' edge = (nodeA, nodeB) if alive: self.graph.add_edge(edge) else: self.graph.del_edge(edge) def node_alive(self, node, alive): ''' node_alive(A, True|False) ''' if alive: self.graph.node_attributes(node).clear() else: self.graph.node_attributes(node).append("dead") def ping(self, nodeStart, nodeEnd, k): ''' NodeStart to ping NodeEnd directly or indirectly through K random neighbors. Return True if nodeEnd receives ping, or False otherwise ''' g = self.graph # Check if direct ping works if g.has_edge((nodeStart, nodeEnd)) and \ "dead" not in g.node_attributes(nodeEnd): return True # Pick k random neighbors and let them ping end node
def _random_neighbors(self, node, b): neighbors = self.graph.neighbors(node) if len(neighbors) <= b: return neighbors else: return random.sample(neighbors, b) class SWIMTest(unittest.TestCase): def setUp(self): g = graph() g.add_nodes(xrange(10)) g.complete() self.graph = g self.swim = SWIM(g) def test_good_ping(self): swim = self.swim self.assertTrue(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(1, 3, 0)) def test_dead_edge_ping(self): swim = self.swim swim.edge_alive(0, 1, False) self.assertFalse(swim.ping(0, 1, 0)) self.assertTrue(swim.ping(0, 1, 1)) def test_dead_node_ping(self): swim = self.swim swim.node_alive(2, False) self.assertFalse(swim.ping(0, 2, 0)) self.assertFalse(swim.ping(0, 2, 3)) if __name__ == '__main__': unittest.main()
for neighbor in self._random_neighbors(nodeStart, k): if self.ping(neighbor, nodeEnd, 0): return True # All pings have failed return False
random_line_split
runTest.py
args['nproc'] = nproc args['changa_args'] = changa_args if userconfigdir is not None: args['userconfigdir'] = os.path.realpath(userconfigdir) args.update(kwargs) saveRunParam(args, savename) return args, savename, simdir def runTest(directory, configOpts='', outputDir='.', testName='test', paramname=None, reconfigure=True, runner=None, nproc=None, changa_args='', charm_dir=None, **kwargs): """ Will run a changa test simulation Assumes the ChaNGa binary is in the test directory Parameters ---------- directory : str path containing the simulation to run configOpts : str Command-line arguments to pass to the ChaNGa configure script (e.g. --enable-dtadjust=yes, etc.) outputDir : str Directory to save to. The simulation will be run in a subdir of this testName : str Prefix to give the test directory name. This should be present to ensure the uniqueness of the save directory name paramname : str (optional) name of the .param file relative to directory runner : str (optional) defaults to charmrun in the ChaNGa directory nproc : int Number of processors to use (used if runner=None, i.e. using the default charmrun) changa_args : str Extra arguments to pass to ChaNGa charm_dir : str Directory of the charm installation (required for configuring/building) Returns ------- success : bool Returns the success of the test. If it the simulation was run, True is returned. """ arguments = locals() assert os.path.exists(directory) assert os.path.exists(outputDir) paramname = findParam(directory, paramname) outputDir = setupOutputDirName(outputDir, testName, configOpts) safe_copy_tree(directory, outputDir) paramfilename = os.path.split(paramname)[-1] # Use absolute paths directory = os.path.abspath(directory) # Use ChaNGa in the run directory changaDir = os.path.abspath(outputDir) # Set up ChaNGa command if runner is None: runner = os.path.join(changaDir, 'charmrun') if nproc is not None: runner += ' +p{0}'.format(nproc) changa = os.path.join(changaDir, 'ChaNGa') runcmd = "{0} {1} {2} {3}".format(runner, changa, changa_args, paramfilename) print "running ChaNGa with command:" print runcmd # save run params runparname = os.path.join(outputDir, _runparname)
try: os.chdir(outputDir) success = diskpy.pychanga.changa_run(runcmd, log_file='changa.out',\ return_success=True) finally: os.chdir(cwd) if success: print "Success! Test results saved to:" print outputDir return success # --------------------------------------------------------------------- # Git utilities # --------------------------------------------------------------------- def fullsha(commit, repodir='.', verbose=True): """ Get the full git SHA hash for a commit in a given repository directory """ cwd = os.getcwd() try: os.chdir(repodir) p, stdout = shellRun('git rev-parse {0}'.format(commit), verbose, \ returnStdOut=True) if p.returncode != 0: raise RuntimeError, 'Could not get full SHA of commit {0} in {1}'\ .format(commit, repodir) finally: os.chdir(cwd) return stdout[0] def formatCommit(commit, repodir='.', verbose=True): """ Return a formatted 7-character commit SHA """ return fullsha(commit, repodir, verbose)[0:7] # --------------------------------------------------------------------- # Generic utilities # --------------------------------------------------------------------- def mkdir_p(path): """ Recursively make path (python > 2.5) """ try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def safe_copy_tree(src, dst, **kwargs): """ A small wrapper for distutils.dir_util.copy_tree. See that for documentation There is a bug in copy_tree where if you copy a tree, delete it, then try to copy it again it will fail. """ dir_util._path_created = {} return dir_util.copy_tree(src, dst, **kwargs) def shellRun(cmd, verbose=True, logfile=None, returnStdOut=False, env=None): """ Try to run the basic shell command (can only run command + opts, no piping) """ output = subprocess.PIPE p = subprocess.Popen(cmd.split(), stderr=subprocess.STDOUT, stdout=output, env=env) printer = logPrinter(verbose, logfile, overwrite=True) lines = [] try: for line in iter(p.stdout.readline, ''): if line.endswith('\n'): line = line[0:-1] printer(line) lines.append(line) p.wait() finally: printer.close() if returnStdOut: return p, lines else: return p def findInDir(directory, searchPattern): """ Finds files matching pather searchPattern in directory """ searchPattern = os.path.join(directory, searchPattern) results = glob.glob(searchPattern) results.sort() return results # --------------------------------------------------------------------- # Running utilities # --------------------------------------------------------------------- def findParam(directory, paramname=None): """ Find and return a .param file in the directory """ if paramname is None: results = findInDir(directory, '*.param') if len(results) != 1: raise RuntimeError, "Could not find .param file" paramname = results[0] else: paramname = os.path.join(directory, paramname) if not os.path.exists(paramname): raise ValueError, "Param file {0} does not exist".format(paramname) return paramname def saveRunParam(param, fname): """ Save the run parameters to fname as json file. The .json extension will be appended to fname if not present. """ if not fname.endswith('.json'): fname += '.json' directory = os.path.split(fname)[0] mkdir_p(directory) json.dump(param, open(fname, 'w'), indent=4, sort_keys=True) def loadRunParam(fname): """ Loads the run params from fname. If fname doesn't end in .json will also try fname + .json """ try: param = json.load(open(fname, 'r')) except IOError: if not fname.endswith('.json'): param = loadRunParam(fname + '.json') else: raise return param # --------------------------------------------------------------------- # ChaNGa building utilities # --------------------------------------------------------------------- def buildChanga(directory, nproc=None, copydir=None): """ builds ChaNGa in directory. nproc can be set optionally for multicore building. Defaults to n_cpu-1 Can also copy the built binaries (ChaNGa and charmrun) to a directory copydir """ if nproc is None: nproc = max([cpu_count() - 1, 1]) cwd = os.getcwd() if copydir is not None: copydir = os.path.abspath(copydir) try: os.chdir(directory) p = shellRun('make clean') p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0 and (nproc > 1): # Try one more time. ChaNGa sometimes dies during parallel builds # on the first try, but works on the second try p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0: msg = "Could not build ChaNGa in directory: " + directory if copydir is not None: msg += ", in order to copy to: " + copydir raise RuntimeError, msg if copydir is not None: mkdir_p(copydir) for f in ('ChaNGa', 'charmrun'): dest = os.path.join(copydir, f) print 'copying {0} to {1}'.format(f, dest) shutil.copy(f, dest) finally: os.chdir(cwd) return (p.returncode == 0) def configureChanga(directory, configOpts='', charm_dir=None, verbose=True, userconfigdir=None): """ Run the ChaNGa configure script in directory, giving it the command-line options configOpts. Can be silenced by setting verbose=False
json.dump(arguments, open(runparname, 'w'), indent=4, sort_keys=True) # Run ChaNGa cwd = os.getcwd()
random_line_split
runTest.py
args['nproc'] = nproc args['changa_args'] = changa_args if userconfigdir is not None: args['userconfigdir'] = os.path.realpath(userconfigdir) args.update(kwargs) saveRunParam(args, savename) return args, savename, simdir def runTest(directory, configOpts='', outputDir='.', testName='test', paramname=None, reconfigure=True, runner=None, nproc=None, changa_args='', charm_dir=None, **kwargs): """ Will run a changa test simulation Assumes the ChaNGa binary is in the test directory Parameters ---------- directory : str path containing the simulation to run configOpts : str Command-line arguments to pass to the ChaNGa configure script (e.g. --enable-dtadjust=yes, etc.) outputDir : str Directory to save to. The simulation will be run in a subdir of this testName : str Prefix to give the test directory name. This should be present to ensure the uniqueness of the save directory name paramname : str (optional) name of the .param file relative to directory runner : str (optional) defaults to charmrun in the ChaNGa directory nproc : int Number of processors to use (used if runner=None, i.e. using the default charmrun) changa_args : str Extra arguments to pass to ChaNGa charm_dir : str Directory of the charm installation (required for configuring/building) Returns ------- success : bool Returns the success of the test. If it the simulation was run, True is returned. """ arguments = locals() assert os.path.exists(directory) assert os.path.exists(outputDir) paramname = findParam(directory, paramname) outputDir = setupOutputDirName(outputDir, testName, configOpts) safe_copy_tree(directory, outputDir) paramfilename = os.path.split(paramname)[-1] # Use absolute paths directory = os.path.abspath(directory) # Use ChaNGa in the run directory changaDir = os.path.abspath(outputDir) # Set up ChaNGa command if runner is None: runner = os.path.join(changaDir, 'charmrun') if nproc is not None: runner += ' +p{0}'.format(nproc) changa = os.path.join(changaDir, 'ChaNGa') runcmd = "{0} {1} {2} {3}".format(runner, changa, changa_args, paramfilename) print "running ChaNGa with command:" print runcmd # save run params runparname = os.path.join(outputDir, _runparname) json.dump(arguments, open(runparname, 'w'), indent=4, sort_keys=True) # Run ChaNGa cwd = os.getcwd() try: os.chdir(outputDir) success = diskpy.pychanga.changa_run(runcmd, log_file='changa.out',\ return_success=True) finally: os.chdir(cwd) if success: print "Success! Test results saved to:" print outputDir return success # --------------------------------------------------------------------- # Git utilities # --------------------------------------------------------------------- def fullsha(commit, repodir='.', verbose=True): """ Get the full git SHA hash for a commit in a given repository directory """ cwd = os.getcwd() try: os.chdir(repodir) p, stdout = shellRun('git rev-parse {0}'.format(commit), verbose, \ returnStdOut=True) if p.returncode != 0: raise RuntimeError, 'Could not get full SHA of commit {0} in {1}'\ .format(commit, repodir) finally: os.chdir(cwd) return stdout[0] def formatCommit(commit, repodir='.', verbose=True): """ Return a formatted 7-character commit SHA """ return fullsha(commit, repodir, verbose)[0:7] # --------------------------------------------------------------------- # Generic utilities # --------------------------------------------------------------------- def mkdir_p(path): """ Recursively make path (python > 2.5) """ try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def safe_copy_tree(src, dst, **kwargs): """ A small wrapper for distutils.dir_util.copy_tree. See that for documentation There is a bug in copy_tree where if you copy a tree, delete it, then try to copy it again it will fail. """ dir_util._path_created = {} return dir_util.copy_tree(src, dst, **kwargs) def shellRun(cmd, verbose=True, logfile=None, returnStdOut=False, env=None): """ Try to run the basic shell command (can only run command + opts, no piping) """ output = subprocess.PIPE p = subprocess.Popen(cmd.split(), stderr=subprocess.STDOUT, stdout=output, env=env) printer = logPrinter(verbose, logfile, overwrite=True) lines = [] try: for line in iter(p.stdout.readline, ''): if line.endswith('\n'): line = line[0:-1] printer(line) lines.append(line) p.wait() finally: printer.close() if returnStdOut: return p, lines else: return p def findInDir(directory, searchPattern): """ Finds files matching pather searchPattern in directory """ searchPattern = os.path.join(directory, searchPattern) results = glob.glob(searchPattern) results.sort() return results # --------------------------------------------------------------------- # Running utilities # --------------------------------------------------------------------- def findParam(directory, paramname=None): """ Find and return a .param file in the directory """ if paramname is None: results = findInDir(directory, '*.param') if len(results) != 1: raise RuntimeError, "Could not find .param file" paramname = results[0] else: paramname = os.path.join(directory, paramname) if not os.path.exists(paramname): raise ValueError, "Param file {0} does not exist".format(paramname) return paramname def saveRunParam(param, fname): """ Save the run parameters to fname as json file. The .json extension will be appended to fname if not present. """ if not fname.endswith('.json'): fname += '.json' directory = os.path.split(fname)[0] mkdir_p(directory) json.dump(param, open(fname, 'w'), indent=4, sort_keys=True) def loadRunParam(fname): """ Loads the run params from fname. If fname doesn't end in .json will also try fname + .json """ try: param = json.load(open(fname, 'r')) except IOError: if not fname.endswith('.json'): param = loadRunParam(fname + '.json') else: raise return param # --------------------------------------------------------------------- # ChaNGa building utilities # --------------------------------------------------------------------- def
(directory, nproc=None, copydir=None): """ builds ChaNGa in directory. nproc can be set optionally for multicore building. Defaults to n_cpu-1 Can also copy the built binaries (ChaNGa and charmrun) to a directory copydir """ if nproc is None: nproc = max([cpu_count() - 1, 1]) cwd = os.getcwd() if copydir is not None: copydir = os.path.abspath(copydir) try: os.chdir(directory) p = shellRun('make clean') p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0 and (nproc > 1): # Try one more time. ChaNGa sometimes dies during parallel builds # on the first try, but works on the second try p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0: msg = "Could not build ChaNGa in directory: " + directory if copydir is not None: msg += ", in order to copy to: " + copydir raise RuntimeError, msg if copydir is not None: mkdir_p(copydir) for f in ('ChaNGa', 'charmrun'): dest = os.path.join(copydir, f) print 'copying {0} to {1}'.format(f, dest) shutil.copy(f, dest) finally: os.chdir(cwd) return (p.returncode == 0) def configureChanga(directory, configOpts='', charm_dir=None, verbose=True, userconfigdir=None): """ Run the ChaNGa configure script in directory, giving it the command-line options configOpts. Can be silenced by setting verbose=False
buildChanga
identifier_name
runTest.py
args['nproc'] = nproc args['changa_args'] = changa_args if userconfigdir is not None: args['userconfigdir'] = os.path.realpath(userconfigdir) args.update(kwargs) saveRunParam(args, savename) return args, savename, simdir def runTest(directory, configOpts='', outputDir='.', testName='test', paramname=None, reconfigure=True, runner=None, nproc=None, changa_args='', charm_dir=None, **kwargs): """ Will run a changa test simulation Assumes the ChaNGa binary is in the test directory Parameters ---------- directory : str path containing the simulation to run configOpts : str Command-line arguments to pass to the ChaNGa configure script (e.g. --enable-dtadjust=yes, etc.) outputDir : str Directory to save to. The simulation will be run in a subdir of this testName : str Prefix to give the test directory name. This should be present to ensure the uniqueness of the save directory name paramname : str (optional) name of the .param file relative to directory runner : str (optional) defaults to charmrun in the ChaNGa directory nproc : int Number of processors to use (used if runner=None, i.e. using the default charmrun) changa_args : str Extra arguments to pass to ChaNGa charm_dir : str Directory of the charm installation (required for configuring/building) Returns ------- success : bool Returns the success of the test. If it the simulation was run, True is returned. """ arguments = locals() assert os.path.exists(directory) assert os.path.exists(outputDir) paramname = findParam(directory, paramname) outputDir = setupOutputDirName(outputDir, testName, configOpts) safe_copy_tree(directory, outputDir) paramfilename = os.path.split(paramname)[-1] # Use absolute paths directory = os.path.abspath(directory) # Use ChaNGa in the run directory changaDir = os.path.abspath(outputDir) # Set up ChaNGa command if runner is None: runner = os.path.join(changaDir, 'charmrun') if nproc is not None: runner += ' +p{0}'.format(nproc) changa = os.path.join(changaDir, 'ChaNGa') runcmd = "{0} {1} {2} {3}".format(runner, changa, changa_args, paramfilename) print "running ChaNGa with command:" print runcmd # save run params runparname = os.path.join(outputDir, _runparname) json.dump(arguments, open(runparname, 'w'), indent=4, sort_keys=True) # Run ChaNGa cwd = os.getcwd() try: os.chdir(outputDir) success = diskpy.pychanga.changa_run(runcmd, log_file='changa.out',\ return_success=True) finally: os.chdir(cwd) if success: print "Success! Test results saved to:" print outputDir return success # --------------------------------------------------------------------- # Git utilities # --------------------------------------------------------------------- def fullsha(commit, repodir='.', verbose=True): """ Get the full git SHA hash for a commit in a given repository directory """ cwd = os.getcwd() try: os.chdir(repodir) p, stdout = shellRun('git rev-parse {0}'.format(commit), verbose, \ returnStdOut=True) if p.returncode != 0: raise RuntimeError, 'Could not get full SHA of commit {0} in {1}'\ .format(commit, repodir) finally: os.chdir(cwd) return stdout[0] def formatCommit(commit, repodir='.', verbose=True): """ Return a formatted 7-character commit SHA """ return fullsha(commit, repodir, verbose)[0:7] # --------------------------------------------------------------------- # Generic utilities # --------------------------------------------------------------------- def mkdir_p(path): """ Recursively make path (python > 2.5) """ try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def safe_copy_tree(src, dst, **kwargs):
def shellRun(cmd, verbose=True, logfile=None, returnStdOut=False, env=None): """ Try to run the basic shell command (can only run command + opts, no piping) """ output = subprocess.PIPE p = subprocess.Popen(cmd.split(), stderr=subprocess.STDOUT, stdout=output, env=env) printer = logPrinter(verbose, logfile, overwrite=True) lines = [] try: for line in iter(p.stdout.readline, ''): if line.endswith('\n'): line = line[0:-1] printer(line) lines.append(line) p.wait() finally: printer.close() if returnStdOut: return p, lines else: return p def findInDir(directory, searchPattern): """ Finds files matching pather searchPattern in directory """ searchPattern = os.path.join(directory, searchPattern) results = glob.glob(searchPattern) results.sort() return results # --------------------------------------------------------------------- # Running utilities # --------------------------------------------------------------------- def findParam(directory, paramname=None): """ Find and return a .param file in the directory """ if paramname is None: results = findInDir(directory, '*.param') if len(results) != 1: raise RuntimeError, "Could not find .param file" paramname = results[0] else: paramname = os.path.join(directory, paramname) if not os.path.exists(paramname): raise ValueError, "Param file {0} does not exist".format(paramname) return paramname def saveRunParam(param, fname): """ Save the run parameters to fname as json file. The .json extension will be appended to fname if not present. """ if not fname.endswith('.json'): fname += '.json' directory = os.path.split(fname)[0] mkdir_p(directory) json.dump(param, open(fname, 'w'), indent=4, sort_keys=True) def loadRunParam(fname): """ Loads the run params from fname. If fname doesn't end in .json will also try fname + .json """ try: param = json.load(open(fname, 'r')) except IOError: if not fname.endswith('.json'): param = loadRunParam(fname + '.json') else: raise return param # --------------------------------------------------------------------- # ChaNGa building utilities # --------------------------------------------------------------------- def buildChanga(directory, nproc=None, copydir=None): """ builds ChaNGa in directory. nproc can be set optionally for multicore building. Defaults to n_cpu-1 Can also copy the built binaries (ChaNGa and charmrun) to a directory copydir """ if nproc is None: nproc = max([cpu_count() - 1, 1]) cwd = os.getcwd() if copydir is not None: copydir = os.path.abspath(copydir) try: os.chdir(directory) p = shellRun('make clean') p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0 and (nproc > 1): # Try one more time. ChaNGa sometimes dies during parallel builds # on the first try, but works on the second try p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0: msg = "Could not build ChaNGa in directory: " + directory if copydir is not None: msg += ", in order to copy to: " + copydir raise RuntimeError, msg if copydir is not None: mkdir_p(copydir) for f in ('ChaNGa', 'charmrun'): dest = os.path.join(copydir, f) print 'copying {0} to {1}'.format(f, dest) shutil.copy(f, dest) finally: os.chdir(cwd) return (p.returncode == 0) def configureChanga(directory, configOpts='', charm_dir=None, verbose=True, userconfigdir=None): """ Run the ChaNGa configure script in directory, giving it the command-line options configOpts. Can be silenced by setting verbose=False
""" A small wrapper for distutils.dir_util.copy_tree. See that for documentation There is a bug in copy_tree where if you copy a tree, delete it, then try to copy it again it will fail. """ dir_util._path_created = {} return dir_util.copy_tree(src, dst, **kwargs)
identifier_body
runTest.py
1} {2} {3}".format(runner, changa, changa_args, paramfilename) print "running ChaNGa with command:" print runcmd # save run params runparname = os.path.join(outputDir, _runparname) json.dump(arguments, open(runparname, 'w'), indent=4, sort_keys=True) # Run ChaNGa cwd = os.getcwd() try: os.chdir(outputDir) success = diskpy.pychanga.changa_run(runcmd, log_file='changa.out',\ return_success=True) finally: os.chdir(cwd) if success: print "Success! Test results saved to:" print outputDir return success # --------------------------------------------------------------------- # Git utilities # --------------------------------------------------------------------- def fullsha(commit, repodir='.', verbose=True): """ Get the full git SHA hash for a commit in a given repository directory """ cwd = os.getcwd() try: os.chdir(repodir) p, stdout = shellRun('git rev-parse {0}'.format(commit), verbose, \ returnStdOut=True) if p.returncode != 0: raise RuntimeError, 'Could not get full SHA of commit {0} in {1}'\ .format(commit, repodir) finally: os.chdir(cwd) return stdout[0] def formatCommit(commit, repodir='.', verbose=True): """ Return a formatted 7-character commit SHA """ return fullsha(commit, repodir, verbose)[0:7] # --------------------------------------------------------------------- # Generic utilities # --------------------------------------------------------------------- def mkdir_p(path): """ Recursively make path (python > 2.5) """ try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def safe_copy_tree(src, dst, **kwargs): """ A small wrapper for distutils.dir_util.copy_tree. See that for documentation There is a bug in copy_tree where if you copy a tree, delete it, then try to copy it again it will fail. """ dir_util._path_created = {} return dir_util.copy_tree(src, dst, **kwargs) def shellRun(cmd, verbose=True, logfile=None, returnStdOut=False, env=None): """ Try to run the basic shell command (can only run command + opts, no piping) """ output = subprocess.PIPE p = subprocess.Popen(cmd.split(), stderr=subprocess.STDOUT, stdout=output, env=env) printer = logPrinter(verbose, logfile, overwrite=True) lines = [] try: for line in iter(p.stdout.readline, ''): if line.endswith('\n'): line = line[0:-1] printer(line) lines.append(line) p.wait() finally: printer.close() if returnStdOut: return p, lines else: return p def findInDir(directory, searchPattern): """ Finds files matching pather searchPattern in directory """ searchPattern = os.path.join(directory, searchPattern) results = glob.glob(searchPattern) results.sort() return results # --------------------------------------------------------------------- # Running utilities # --------------------------------------------------------------------- def findParam(directory, paramname=None): """ Find and return a .param file in the directory """ if paramname is None: results = findInDir(directory, '*.param') if len(results) != 1: raise RuntimeError, "Could not find .param file" paramname = results[0] else: paramname = os.path.join(directory, paramname) if not os.path.exists(paramname): raise ValueError, "Param file {0} does not exist".format(paramname) return paramname def saveRunParam(param, fname): """ Save the run parameters to fname as json file. The .json extension will be appended to fname if not present. """ if not fname.endswith('.json'): fname += '.json' directory = os.path.split(fname)[0] mkdir_p(directory) json.dump(param, open(fname, 'w'), indent=4, sort_keys=True) def loadRunParam(fname): """ Loads the run params from fname. If fname doesn't end in .json will also try fname + .json """ try: param = json.load(open(fname, 'r')) except IOError: if not fname.endswith('.json'): param = loadRunParam(fname + '.json') else: raise return param # --------------------------------------------------------------------- # ChaNGa building utilities # --------------------------------------------------------------------- def buildChanga(directory, nproc=None, copydir=None): """ builds ChaNGa in directory. nproc can be set optionally for multicore building. Defaults to n_cpu-1 Can also copy the built binaries (ChaNGa and charmrun) to a directory copydir """ if nproc is None: nproc = max([cpu_count() - 1, 1]) cwd = os.getcwd() if copydir is not None: copydir = os.path.abspath(copydir) try: os.chdir(directory) p = shellRun('make clean') p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0 and (nproc > 1): # Try one more time. ChaNGa sometimes dies during parallel builds # on the first try, but works on the second try p = shellRun('make -j {0}'.format(nproc)) if p.returncode != 0: msg = "Could not build ChaNGa in directory: " + directory if copydir is not None: msg += ", in order to copy to: " + copydir raise RuntimeError, msg if copydir is not None: mkdir_p(copydir) for f in ('ChaNGa', 'charmrun'): dest = os.path.join(copydir, f) print 'copying {0} to {1}'.format(f, dest) shutil.copy(f, dest) finally: os.chdir(cwd) return (p.returncode == 0) def configureChanga(directory, configOpts='', charm_dir=None, verbose=True, userconfigdir=None): """ Run the ChaNGa configure script in directory, giving it the command-line options configOpts. Can be silenced by setting verbose=False Raises a RuntimeError if the configuration does not exit successfully """ cwd = os.getcwd() logfilename = os.path.abspath('configure.log') try: if charm_dir is not None: charm_dir = os.path.abspath(charm_dir) os.environ['CHARM_DIR'] = charm_dir os.chdir(directory) cmd = './configure ' + configOpts print 'userconfigdir', userconfigdir with _CopyUserConfig(userconfigdir, os.getcwd()): my_env = os.environ.copy() p = shellRun(cmd, verbose, logfilename, env=my_env) if p.returncode != 0: raise RuntimeError, "Could not configure ChaNGa" with open(logfilename,'r') as f: log = f.read() if 'WARNING' in log: raise RuntimeError, 'WARNING caught, could not configure ChaNGa' finally: os.chdir(cwd) return def configBuildCommit(commit, changaDir='.', configOpts='', outputDir='.', verbose=True, charm_dir=None, nproc=None, configDir='configurestuff', recompile=False, userconfigdir=None, **kwargs): """ Configures and builds a given ChaNGa commit. commit should be a git SHA (partial or full) Raises an error if not successful Returns the directory the binaries are saved to """ outputDir = setupOutputDirName(outputDir, commit, configOpts) #changa directory changaDir = os.path.abspath(changaDir) # Get full commit SHA but only use first 7 characters(github style) commit = formatCommit(commit, changaDir, verbose) # Set up the directory to copy the configure scripts from configDir = os.path.abspath(configDir) configFiles = ['Makefile.in', 'configure', 'configure.ac'] configSrc = [os.path.join(configDir, configFile) \ for configFile in configFiles] configDest = [os.path.join(changaDir, configFile) \ for configFile in configFiles] if not recompile and os.path.exists(os.path.join(outputDir, 'ChaNGa')): print "ChaNGa already built" return outputDir # Do stuff cwd = os.getcwd() try: os.chdir(changaDir) assert(shellRun('git stash').returncode == 0) shellRun('git checkout {0}'.format(commit)) # copy configuration files for src, dest in zip(configSrc, configDest):
shutil.copyfile(src, dest)
conditional_block
test_stock_change_qty_reason.py
# pylint: disable=import-error,protected-access,too-few-public-methods # Copyright 2016-2017 ACSONE SA/NV (<http://acsone.eu>) # Copyright 2019 ForgeFlow S.L. # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from odoo.tests.common import SavepointCase class TestStockQuantityChangeReason(SavepointCase): @classmethod def setUpClass(cls): super(TestStockQuantityChangeReason, cls).setUpClass() # MODELS cls.stock_move = cls.env["stock.move"] cls.product_product_model = cls.env["product.product"] cls.product_category_model = cls.env["product.category"] cls.wizard_model = cls.env["stock.change.product.qty"] cls.preset_reason_id = cls.env["stock.inventory.line.reason"] cls.stock_location = cls.env.ref("stock.stock_location_stock") # INSTANCES cls.category = cls.product_category_model.create({"name": "Physical (test)"}) def _create_product(self, name): return self.product_product_model.create( {"name": name, "categ_id": self.category.id, "type": "product"} ) def _product_change_qty(self, product, new_qty): values = { "product_tmpl_id": product.product_tmpl_id.id, "product_id": product.id, "new_quantity": new_qty, } wizard = self.wizard_model.create(values) wizard.change_product_qty() def _create_reason(self, name, description=None): return self.preset_reason_id.create({"name": name, "description": description}) def test_inventory_adjustment_onchange_reason_preset_reason(self): """Check that adding a reason or a preset reason explode to lines""" product2 = self._create_product("product_product_2") self._product_change_qty(product2, 50) inventory = self.env["stock.inventory"].create( { "name": "remove product2", "product_ids": [(4, product2.id)], "location_ids": [(4, self.stock_location.id)], }
inventory.preset_reason_id = self._create_reason("Test 1", "Description Test 1") inventory.action_start() self.assertEqual(len(inventory.line_ids), 1) inventory.reason = "Reason 2" inventory.onchange_reason() self.assertEqual(inventory.line_ids.reason, inventory.reason) inventory.preset_reason_id = self._create_reason("Test 2", "Description Test 2") inventory.onchange_preset_reason() self.assertEqual( inventory.line_ids.preset_reason_id, inventory.preset_reason_id ) inventory.line_ids[0].write({"product_qty": 10}) inventory.action_validate() move = self.stock_move.search( [("product_id", "=", product2.id), ("preset_reason_id", "!=", False)] ) self.assertEqual(len(move), 1) self.assertEqual(move.origin, inventory.preset_reason_id.name) self.assertEqual(move.preset_reason_id, inventory.preset_reason_id)
)
random_line_split
test_stock_change_qty_reason.py
# pylint: disable=import-error,protected-access,too-few-public-methods # Copyright 2016-2017 ACSONE SA/NV (<http://acsone.eu>) # Copyright 2019 ForgeFlow S.L. # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from odoo.tests.common import SavepointCase class
(SavepointCase): @classmethod def setUpClass(cls): super(TestStockQuantityChangeReason, cls).setUpClass() # MODELS cls.stock_move = cls.env["stock.move"] cls.product_product_model = cls.env["product.product"] cls.product_category_model = cls.env["product.category"] cls.wizard_model = cls.env["stock.change.product.qty"] cls.preset_reason_id = cls.env["stock.inventory.line.reason"] cls.stock_location = cls.env.ref("stock.stock_location_stock") # INSTANCES cls.category = cls.product_category_model.create({"name": "Physical (test)"}) def _create_product(self, name): return self.product_product_model.create( {"name": name, "categ_id": self.category.id, "type": "product"} ) def _product_change_qty(self, product, new_qty): values = { "product_tmpl_id": product.product_tmpl_id.id, "product_id": product.id, "new_quantity": new_qty, } wizard = self.wizard_model.create(values) wizard.change_product_qty() def _create_reason(self, name, description=None): return self.preset_reason_id.create({"name": name, "description": description}) def test_inventory_adjustment_onchange_reason_preset_reason(self): """Check that adding a reason or a preset reason explode to lines""" product2 = self._create_product("product_product_2") self._product_change_qty(product2, 50) inventory = self.env["stock.inventory"].create( { "name": "remove product2", "product_ids": [(4, product2.id)], "location_ids": [(4, self.stock_location.id)], } ) inventory.preset_reason_id = self._create_reason("Test 1", "Description Test 1") inventory.action_start() self.assertEqual(len(inventory.line_ids), 1) inventory.reason = "Reason 2" inventory.onchange_reason() self.assertEqual(inventory.line_ids.reason, inventory.reason) inventory.preset_reason_id = self._create_reason("Test 2", "Description Test 2") inventory.onchange_preset_reason() self.assertEqual( inventory.line_ids.preset_reason_id, inventory.preset_reason_id ) inventory.line_ids[0].write({"product_qty": 10}) inventory.action_validate() move = self.stock_move.search( [("product_id", "=", product2.id), ("preset_reason_id", "!=", False)] ) self.assertEqual(len(move), 1) self.assertEqual(move.origin, inventory.preset_reason_id.name) self.assertEqual(move.preset_reason_id, inventory.preset_reason_id)
TestStockQuantityChangeReason
identifier_name
test_stock_change_qty_reason.py
# pylint: disable=import-error,protected-access,too-few-public-methods # Copyright 2016-2017 ACSONE SA/NV (<http://acsone.eu>) # Copyright 2019 ForgeFlow S.L. # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from odoo.tests.common import SavepointCase class TestStockQuantityChangeReason(SavepointCase): @classmethod def setUpClass(cls): super(TestStockQuantityChangeReason, cls).setUpClass() # MODELS cls.stock_move = cls.env["stock.move"] cls.product_product_model = cls.env["product.product"] cls.product_category_model = cls.env["product.category"] cls.wizard_model = cls.env["stock.change.product.qty"] cls.preset_reason_id = cls.env["stock.inventory.line.reason"] cls.stock_location = cls.env.ref("stock.stock_location_stock") # INSTANCES cls.category = cls.product_category_model.create({"name": "Physical (test)"}) def _create_product(self, name): return self.product_product_model.create( {"name": name, "categ_id": self.category.id, "type": "product"} ) def _product_change_qty(self, product, new_qty): values = { "product_tmpl_id": product.product_tmpl_id.id, "product_id": product.id, "new_quantity": new_qty, } wizard = self.wizard_model.create(values) wizard.change_product_qty() def _create_reason(self, name, description=None): return self.preset_reason_id.create({"name": name, "description": description}) def test_inventory_adjustment_onchange_reason_preset_reason(self):
) inventory.line_ids[0].write({"product_qty": 10}) inventory.action_validate() move = self.stock_move.search( [("product_id", "=", product2.id), ("preset_reason_id", "!=", False)] ) self.assertEqual(len(move), 1) self.assertEqual(move.origin, inventory.preset_reason_id.name) self.assertEqual(move.preset_reason_id, inventory.preset_reason_id)
"""Check that adding a reason or a preset reason explode to lines""" product2 = self._create_product("product_product_2") self._product_change_qty(product2, 50) inventory = self.env["stock.inventory"].create( { "name": "remove product2", "product_ids": [(4, product2.id)], "location_ids": [(4, self.stock_location.id)], } ) inventory.preset_reason_id = self._create_reason("Test 1", "Description Test 1") inventory.action_start() self.assertEqual(len(inventory.line_ids), 1) inventory.reason = "Reason 2" inventory.onchange_reason() self.assertEqual(inventory.line_ids.reason, inventory.reason) inventory.preset_reason_id = self._create_reason("Test 2", "Description Test 2") inventory.onchange_preset_reason() self.assertEqual( inventory.line_ids.preset_reason_id, inventory.preset_reason_id
identifier_body
youtube_apiSpec.js
/* * Copyright (C) 2017 - present Instructure, Inc. * * This file is part of Canvas. * * Canvas is free software: you can redistribute it and/or modify it under * the terms of the GNU Affero General Public License as published by the Free * Software Foundation, version 3 of the License. * * Canvas is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Affero General Public License for more * details. * * You should have received a copy of the GNU Affero General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ import $ from 'jquery' import YouTubeApi from 'tinymce_plugins/instructure_links/youtube_api' const videoId = 'DgDk50dHbjM' const link = {attr: () => {}, text: () => {}} const vidTitle = 'this is my video title' let ytApi QUnit.module('YouTube API', { setup () { $.youTubeID = () => {return videoId} ytApi = new YouTubeApi() }, teardown () { $.youTubeID = undefined } })
const mock = sinon.mock(link).expects('text').withArgs(vidTitle) ytApi.titleYouTubeText(link) mock.verify() }) test('titleYouTubeText increments the failure count on failure', () => { sinon.stub(ytApi, 'fetchYouTubeTitle').callsArgWith(1, null, {responseText: 'error'}) const mock = sinon.mock(link).expects('attr').thrice() ytApi.titleYouTubeText(link) mock.verify() })
test('titleYouTubeText changes the text of a link to match the title', () => { sinon.stub(ytApi, 'fetchYouTubeTitle').callsArgWith(1, vidTitle)
random_line_split
youtube_apiSpec.js
/* * Copyright (C) 2017 - present Instructure, Inc. * * This file is part of Canvas. * * Canvas is free software: you can redistribute it and/or modify it under * the terms of the GNU Affero General Public License as published by the Free * Software Foundation, version 3 of the License. * * Canvas is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Affero General Public License for more * details. * * You should have received a copy of the GNU Affero General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ import $ from 'jquery' import YouTubeApi from 'tinymce_plugins/instructure_links/youtube_api' const videoId = 'DgDk50dHbjM' const link = {attr: () => {}, text: () => {}} const vidTitle = 'this is my video title' let ytApi QUnit.module('YouTube API', { setup () { $.youTubeID = () => {return videoId} ytApi = new YouTubeApi() }, teardown ()
}) test('titleYouTubeText changes the text of a link to match the title', () => { sinon.stub(ytApi, 'fetchYouTubeTitle').callsArgWith(1, vidTitle) const mock = sinon.mock(link).expects('text').withArgs(vidTitle) ytApi.titleYouTubeText(link) mock.verify() }) test('titleYouTubeText increments the failure count on failure', () => { sinon.stub(ytApi, 'fetchYouTubeTitle').callsArgWith(1, null, {responseText: 'error'}) const mock = sinon.mock(link).expects('attr').thrice() ytApi.titleYouTubeText(link) mock.verify() })
{ $.youTubeID = undefined }
identifier_body
youtube_apiSpec.js
/* * Copyright (C) 2017 - present Instructure, Inc. * * This file is part of Canvas. * * Canvas is free software: you can redistribute it and/or modify it under * the terms of the GNU Affero General Public License as published by the Free * Software Foundation, version 3 of the License. * * Canvas is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Affero General Public License for more * details. * * You should have received a copy of the GNU Affero General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ import $ from 'jquery' import YouTubeApi from 'tinymce_plugins/instructure_links/youtube_api' const videoId = 'DgDk50dHbjM' const link = {attr: () => {}, text: () => {}} const vidTitle = 'this is my video title' let ytApi QUnit.module('YouTube API', {
() { $.youTubeID = () => {return videoId} ytApi = new YouTubeApi() }, teardown () { $.youTubeID = undefined } }) test('titleYouTubeText changes the text of a link to match the title', () => { sinon.stub(ytApi, 'fetchYouTubeTitle').callsArgWith(1, vidTitle) const mock = sinon.mock(link).expects('text').withArgs(vidTitle) ytApi.titleYouTubeText(link) mock.verify() }) test('titleYouTubeText increments the failure count on failure', () => { sinon.stub(ytApi, 'fetchYouTubeTitle').callsArgWith(1, null, {responseText: 'error'}) const mock = sinon.mock(link).expects('attr').thrice() ytApi.titleYouTubeText(link) mock.verify() })
setup
identifier_name
dataid.py
FID_DomainNumber = auto() FID_Permeability = auto() FID_Velocity = auto() FID_Pressure = auto() FID_ESI_VPS_Displacement = auto() # # # # # # # # # # # # # # # # # # # # # # GY field IDs FID_Mises_Stress = auto() FID_MaxPrincipal_Stress = auto() FID_MidPrincipal_Stress = auto() FID_MinPrincipal_Stress = auto() FID_MaxPrincipal_Strain = auto() FID_MidPrincipal_Strain = auto() FID_MinPrincipal_Strain = auto() # # # # # # # # # # # # # # # # # # # # # # Particle PSID_ParticlePositions = auto() # # # # # # # # # # # # # # # # # # # # # # Function FuncID_ProbabilityDistribution = auto() # # # # # # # # # # # # # # # # # # # # # # Misc ID_None = auto() ID_GrainState = auto() ID_InputFile = auto() # # # # # # # # # # # # # # # # # # # # # # Property PID_Concentration = auto() PID_CumulativeConcentration = auto() PID_Velocity = auto() PID_transient_simulation_time = auto() PID_effective_conductivity = auto() PID_volume_fraction_red_phosphor = auto() PID_volume_fraction_green_phosphor = auto() PID_conductivity_red_phosphor = auto() PID_conductivity_green_phosphor = auto() PID_mean_radius_red_phosphor = auto() PID_mean_radius_green_phosphor = auto() PID_standard_deviation_red_phosphor = auto() PID_standard_deviation_green_phosphor = auto() PID_RefractiveIndex = auto() PID_NumberOfRays = auto() PID_LEDSpectrum = auto() PID_ChipSpectrum = auto() PID_LEDColor_x = auto() PID_LEDColor_y = auto() PID_LEDCCT = auto() PID_LEDRadiantPower = auto() PID_ParticleNumberDensity = auto() PID_ParticleRefractiveIndex = auto() PID_EmissionSpectrum = auto() PID_ExcitationSpectrum = auto() PID_AsorptionSpectrum = auto() PID_ScatteringCrossSections = auto() PID_InverseCumulativeDist = auto() PID_NumberOfFluorescentParticles = auto() PID_ParticleMu = auto() PID_ParticleSigma = auto() PID_PhosphorEfficiency = auto() PID_Length = auto() PID_Height = auto() PID_Thickness = auto() PID_Deflection = auto() PID_EModulus = auto() # Young's modulus PID_PoissonRatio = auto() # Mul2 properties PID_YoungModulus1 = auto() PID_YoungModulus2 = auto() PID_YoungModulus3 = auto() PID_PoissonRatio23 = auto() PID_PoissonRatio13 = auto() PID_PoissonRatio12 = auto() PID_ShearModulus23 = auto() PID_ShearModulus13 = auto() PID_ShearModulus12 = auto() PID_CriticalLoadLevel = auto() # INSA properties PID_ExtensionalInPlaneStiffness = auto() PID_ExtensionalOutOfPlaneStiffness = auto() PID_ShearInPlaneStiffness = auto() PID_ShearOutOfPlaneStiffness = auto() PID_LocalBendingStiffness = auto() PID_CriticalForce = auto() PID_CriticalMoment = auto() # Digimat Properties PID_MatrixYoung = auto() PID_MatrixPoisson = auto() PID_InclusionYoung = auto() PID_InclusionPoisson = auto() PID_InclusionVolumeFraction = auto() PID_InclusionAspectRatio = auto() PID_MatrixOgdenModulus = auto() PID_MatrixOgdenExponent = auto() PID_InclusionSizeNormalized = auto() PID_CompositeAxialYoung = auto() PID_CompositeInPlaneYoung = auto() PID_CompositeInPlaneShear = auto() PID_CompositeTransverseShear = auto() PID_CompositeInPlanePoisson = auto() PID_CompositeTransversePoisson = auto() PID_CompositeStrain11Tensor = auto() PID_CompositeStrain22Tensor = auto() PID_CompositeStress11Tensor = auto() PID_MatrixDensity = auto() PID_CompositeDensity = auto() PID_InclusionDensity = auto() # CUBA keywords from Jun 6, 2017 - https://github.com/simphony/simphony-common/blob/master/ontology/cuba.yml PID_Position = auto() PID_Direction = auto() PID_Status = auto() PID_Label = auto() PID_Chemical_specie = auto() PID_Material_type = auto() PID_Shape_center = auto() PID_Shape_length = auto() PID_Shape_radius = auto() PID_Shape_side = auto() PID_Crystal_storage = auto() PID_Name_UC = auto() PID_Lattice_vectors = auto() PID_Symmetry_lattice_vectors = auto() PID_Occupancy = auto() PID_Bond_label = auto() PID_Bond_type = auto() # PID_Velocity = auto() Duplicate PID_Dimension = auto() PID_Acceleration = auto() PID_Radius = auto() PID_Size = auto() PID_Mass = auto() PID_Volume = auto() PID_Angular_velocity = auto() PID_Angular_acceleration = auto() PID_Simulation_domain_dimensions = auto() PID_Simulation_domain_origin = auto() PID_Dynamic_viscosity = auto() PID_Kinematic_viscosity = auto() PID_Diffusion_coefficient = auto() PID_Probability_coefficient = auto() PID_Friction_coefficient = auto() PID_Scaling_coefficient = auto() PID_Equation_of_state_coefficient = auto() PID_Contact_angle = auto() PID_Amphiphilicity = auto() PID_Phase_interaction_strength = auto() PID_Hamaker_constant = auto() PID_Zeta_potential = auto() PID_Ion_valence_effect = auto() PID_Debye_length = auto() PID_Smoothing_length = auto() PID_Lattice_spacing = auto() PID_Time_step = auto() PID_Number_of_time_steps = auto() PID_Force = auto() PID_Torque = auto() PID_Density = auto() PID_Pressure = auto() PID_Temperature = auto() PID_Distribution = auto() PID_Order_parameter = auto() PID_Original_position = auto() PID_Current = auto() PID_Final = auto() PID_Delta_displacement = auto() PID_External_applied_force = auto() PID_Euler_angles = auto() PID_Sphericity = auto() PID_Young_modulus = auto() PID_Poisson_ratio = auto() PID_Restitution_coefficient = auto() PID_Rolling_friction = auto() PID_Volume_fraction = auto() PID_Coupling_time = auto() PID_Cutoff_distance = auto() PID_Energy_well_depth = auto() PID_Van_der_Waals_radius = auto() PID_Dielectric_constant = auto() PID_Dynamic_pressure = auto() PID_Flux = auto() PID_Homogenized_stress_tensor = auto() PID_Strain_tensor = auto() PID_Relative_velocity = auto() PID_Diffusion_velocity = auto() PID_Stress_tensor = auto() PID_Volume_fraction_gradient = auto() PID_Cohesion_energy_density = auto() PID_Major = auto() PID_Minor = auto() PID_Patch = auto() PID_Full = auto() PID_Charge = auto() PID_Charge_density = auto() PID_Description = auto() PID_Electric_field = auto() PID_Electron_mass = auto() PID_Electrostatic_field = auto() PID_Energy = auto() PID_Heat_conductivity = auto() PID_Initial_viscosity = auto() PID_Linear_constant = auto() PID_Maximum_viscosity = auto()
""" This class represents the supported values of IDs of property, field, etc. Values of members should be stored by .name, .value should not be used. """ # # # # # # # # # # # # # # # # # # # # # # Field FID_Displacement = auto() FID_Strain = auto() FID_Stress = auto() FID_Temperature = auto() FID_Humidity = auto() FID_Concentration = auto() FID_Thermal_absorption_volume = auto() FID_Thermal_absorption_surface = auto() FID_Material_number = auto() FID_BucklingShape = auto() FID_FibreOrientation = auto()
identifier_body
dataid.py
(IntEnum): """ This class represents the supported values of IDs of property, field, etc. Values of members should be stored by .name, .value should not be used. """ # # # # # # # # # # # # # # # # # # # # # # Field FID_Displacement = auto() FID_Strain = auto() FID_Stress = auto() FID_Temperature = auto() FID_Humidity = auto() FID_Concentration = auto() FID_Thermal_absorption_volume = auto() FID_Thermal_absorption_surface = auto() FID_Material_number = auto() FID_BucklingShape = auto() FID_FibreOrientation = auto() FID_DomainNumber = auto() FID_Permeability = auto() FID_Velocity = auto() FID_Pressure = auto() FID_ESI_VPS_Displacement = auto() # # # # # # # # # # # # # # # # # # # # # # GY field IDs FID_Mises_Stress = auto() FID_MaxPrincipal_Stress = auto() FID_MidPrincipal_Stress = auto() FID_MinPrincipal_Stress = auto() FID_MaxPrincipal_Strain = auto() FID_MidPrincipal_Strain = auto() FID_MinPrincipal_Strain = auto() # # # # # # # # # # # # # # # # # # # # # # Particle PSID_ParticlePositions = auto() # # # # # # # # # # # # # # # # # # # # # # Function FuncID_ProbabilityDistribution = auto() # # # # # # # # # # # # # # # # # # # # # # Misc ID_None = auto() ID_GrainState = auto() ID_InputFile = auto() # # # # # # # # # # # # # # # # # # # # # # Property PID_Concentration = auto() PID_CumulativeConcentration = auto() PID_Velocity = auto() PID_transient_simulation_time = auto() PID_effective_conductivity = auto() PID_volume_fraction_red_phosphor = auto() PID_volume_fraction_green_phosphor = auto() PID_conductivity_red_phosphor = auto() PID_conductivity_green_phosphor = auto() PID_mean_radius_red_phosphor = auto() PID_mean_radius_green_phosphor = auto() PID_standard_deviation_red_phosphor = auto() PID_standard_deviation_green_phosphor = auto() PID_RefractiveIndex = auto() PID_NumberOfRays = auto() PID_LEDSpectrum = auto() PID_ChipSpectrum = auto() PID_LEDColor_x = auto() PID_LEDColor_y = auto() PID_LEDCCT = auto() PID_LEDRadiantPower = auto() PID_ParticleNumberDensity = auto() PID_ParticleRefractiveIndex = auto() PID_EmissionSpectrum = auto() PID_ExcitationSpectrum = auto() PID_AsorptionSpectrum = auto() PID_ScatteringCrossSections = auto() PID_InverseCumulativeDist = auto() PID_NumberOfFluorescentParticles = auto() PID_ParticleMu = auto() PID_ParticleSigma = auto() PID_PhosphorEfficiency = auto() PID_Length = auto() PID_Height = auto() PID_Thickness = auto() PID_Deflection = auto() PID_EModulus = auto() # Young's modulus PID_PoissonRatio = auto() # Mul2 properties PID_YoungModulus1 = auto() PID_YoungModulus2 = auto() PID_YoungModulus3 = auto() PID_PoissonRatio23 = auto() PID_PoissonRatio13 = auto() PID_PoissonRatio12 = auto() PID_ShearModulus23 = auto() PID_ShearModulus13 = auto() PID_ShearModulus12 = auto() PID_CriticalLoadLevel = auto() # INSA properties PID_ExtensionalInPlaneStiffness = auto() PID_ExtensionalOutOfPlaneStiffness = auto() PID_ShearInPlaneStiffness = auto() PID_ShearOutOfPlaneStiffness = auto() PID_LocalBendingStiffness = auto() PID_CriticalForce = auto() PID_CriticalMoment = auto() # Digimat Properties PID_MatrixYoung = auto() PID_MatrixPoisson = auto() PID_InclusionYoung = auto() PID_InclusionPoisson = auto() PID_InclusionVolumeFraction = auto() PID_InclusionAspectRatio = auto() PID_MatrixOgdenModulus = auto() PID_MatrixOgdenExponent = auto() PID_InclusionSizeNormalized = auto() PID_CompositeAxialYoung = auto() PID_CompositeInPlaneYoung = auto() PID_CompositeInPlaneShear = auto() PID_CompositeTransverseShear = auto() PID_CompositeInPlanePoisson = auto() PID_CompositeTransversePoisson = auto() PID_CompositeStrain11Tensor = auto() PID_CompositeStrain22Tensor = auto() PID_CompositeStress11Tensor = auto() PID_MatrixDensity = auto() PID_CompositeDensity = auto() PID_InclusionDensity = auto() # CUBA keywords from Jun 6, 2017 - https://github.com/simphony/simphony-common/blob/master/ontology/cuba.yml PID_Position = auto() PID_Direction = auto() PID_Status = auto() PID_Label = auto() PID_Chemical_specie = auto() PID_Material_type = auto() PID_Shape_center = auto() PID_Shape_length = auto() PID_Shape_radius = auto() PID_Shape_side = auto() PID_Crystal_storage = auto() PID_Name_UC = auto() PID_Lattice_vectors = auto() PID_Symmetry_lattice_vectors = auto() PID_Occupancy = auto() PID_Bond_label = auto() PID_Bond_type = auto() # PID_Velocity = auto() Duplicate PID_Dimension = auto() PID_Acceleration = auto() PID_Radius = auto() PID_Size = auto() PID_Mass = auto() PID_Volume = auto() PID_Angular_velocity = auto() PID_Angular_acceleration = auto() PID_Simulation_domain_dimensions = auto() PID_Simulation_domain_origin = auto() PID_Dynamic_viscosity = auto() PID_Kinematic_viscosity = auto() PID_Diffusion_coefficient = auto() PID_Probability_coefficient = auto() PID_Friction_coefficient = auto() PID_Scaling_coefficient = auto() PID_Equation_of_state_coefficient = auto() PID_Contact_angle = auto() PID_Amphiphilicity = auto() PID_Phase_interaction_strength = auto() PID_Hamaker_constant = auto() PID_Zeta_potential = auto() PID_Ion_valence_effect = auto() PID_Debye_length = auto() PID_Smoothing_length = auto() PID_Lattice_spacing = auto() PID_Time_step = auto() PID_Number_of_time_steps = auto() PID_Force = auto() PID_Torque = auto() PID_Density = auto() PID_Pressure = auto() PID_Temperature = auto() PID_Distribution = auto() PID_Order_parameter = auto() PID_Original_position = auto() PID_Current = auto() PID_Final = auto() PID_Delta_displacement = auto() PID_External_applied_force = auto() PID_Euler_angles = auto() PID_Sphericity = auto() PID_Young_modulus = auto() PID_Poisson_ratio = auto() PID_Restitution_coefficient = auto() PID_Rolling_friction = auto() PID_Volume_fraction = auto() PID_Coupling_time = auto() PID_Cutoff_distance = auto() PID_Energy_well_depth = auto() PID_Van_der_Waals_radius = auto() PID_Dielectric_constant = auto() PID_Dynamic_pressure = auto() PID_Flux = auto() PID_Homogenized_stress_tensor = auto() PID_Strain_tensor = auto() PID_Relative_velocity = auto() PID_Diffusion_velocity = auto() PID_Stress_tensor = auto() PID_Volume_fraction_gradient = auto() PID_Cohesion_energy_density = auto() PID_Major = auto() PID_Minor = auto() PID_Patch = auto() PID_Full = auto() PID_Charge = auto() PID_Charge_density = auto() PID_Description = auto() PID_Electric_field = auto() PID_Electron_mass = auto() PID_Electrostatic_field = auto() PID_Energy = auto() PID_Heat_conductivity = auto() PID_Initial_viscosity = auto() PID_Linear_constant = auto() PID_Maximum_v
DataID
identifier_name
dataid.py
# # # # # # # # # # # # # # # # # # # # # # Field FID_Displacement = auto() FID_Strain = auto() FID_Stress = auto() FID_Temperature = auto() FID_Humidity = auto() FID_Concentration = auto() FID_Thermal_absorption_volume = auto() FID_Thermal_absorption_surface = auto() FID_Material_number = auto() FID_BucklingShape = auto() FID_FibreOrientation = auto() FID_DomainNumber = auto() FID_Permeability = auto() FID_Velocity = auto() FID_Pressure = auto() FID_ESI_VPS_Displacement = auto() # # # # # # # # # # # # # # # # # # # # # # GY field IDs FID_Mises_Stress = auto() FID_MaxPrincipal_Stress = auto() FID_MidPrincipal_Stress = auto() FID_MinPrincipal_Stress = auto() FID_MaxPrincipal_Strain = auto() FID_MidPrincipal_Strain = auto() FID_MinPrincipal_Strain = auto() # # # # # # # # # # # # # # # # # # # # # # Particle PSID_ParticlePositions = auto() # # # # # # # # # # # # # # # # # # # # # # Function
# Misc ID_None = auto() ID_GrainState = auto() ID_InputFile = auto() # # # # # # # # # # # # # # # # # # # # # # Property PID_Concentration = auto() PID_CumulativeConcentration = auto() PID_Velocity = auto() PID_transient_simulation_time = auto() PID_effective_conductivity = auto() PID_volume_fraction_red_phosphor = auto() PID_volume_fraction_green_phosphor = auto() PID_conductivity_red_phosphor = auto() PID_conductivity_green_phosphor = auto() PID_mean_radius_red_phosphor = auto() PID_mean_radius_green_phosphor = auto() PID_standard_deviation_red_phosphor = auto() PID_standard_deviation_green_phosphor = auto() PID_RefractiveIndex = auto() PID_NumberOfRays = auto() PID_LEDSpectrum = auto() PID_ChipSpectrum = auto() PID_LEDColor_x = auto() PID_LEDColor_y = auto() PID_LEDCCT = auto() PID_LEDRadiantPower = auto() PID_ParticleNumberDensity = auto() PID_ParticleRefractiveIndex = auto() PID_EmissionSpectrum = auto() PID_ExcitationSpectrum = auto() PID_AsorptionSpectrum = auto() PID_ScatteringCrossSections = auto() PID_InverseCumulativeDist = auto() PID_NumberOfFluorescentParticles = auto() PID_ParticleMu = auto() PID_ParticleSigma = auto() PID_PhosphorEfficiency = auto() PID_Length = auto() PID_Height = auto() PID_Thickness = auto() PID_Deflection = auto() PID_EModulus = auto() # Young's modulus PID_PoissonRatio = auto() # Mul2 properties PID_YoungModulus1 = auto() PID_YoungModulus2 = auto() PID_YoungModulus3 = auto() PID_PoissonRatio23 = auto() PID_PoissonRatio13 = auto() PID_PoissonRatio12 = auto() PID_ShearModulus23 = auto() PID_ShearModulus13 = auto() PID_ShearModulus12 = auto() PID_CriticalLoadLevel = auto() # INSA properties PID_ExtensionalInPlaneStiffness = auto() PID_ExtensionalOutOfPlaneStiffness = auto() PID_ShearInPlaneStiffness = auto() PID_ShearOutOfPlaneStiffness = auto() PID_LocalBendingStiffness = auto() PID_CriticalForce = auto() PID_CriticalMoment = auto() # Digimat Properties PID_MatrixYoung = auto() PID_MatrixPoisson = auto() PID_InclusionYoung = auto() PID_InclusionPoisson = auto() PID_InclusionVolumeFraction = auto() PID_InclusionAspectRatio = auto() PID_MatrixOgdenModulus = auto() PID_MatrixOgdenExponent = auto() PID_InclusionSizeNormalized = auto() PID_CompositeAxialYoung = auto() PID_CompositeInPlaneYoung = auto() PID_CompositeInPlaneShear = auto() PID_CompositeTransverseShear = auto() PID_CompositeInPlanePoisson = auto() PID_CompositeTransversePoisson = auto() PID_CompositeStrain11Tensor = auto() PID_CompositeStrain22Tensor = auto() PID_CompositeStress11Tensor = auto() PID_MatrixDensity = auto() PID_CompositeDensity = auto() PID_InclusionDensity = auto() # CUBA keywords from Jun 6, 2017 - https://github.com/simphony/simphony-common/blob/master/ontology/cuba.yml PID_Position = auto() PID_Direction = auto() PID_Status = auto() PID_Label = auto() PID_Chemical_specie = auto() PID_Material_type = auto() PID_Shape_center = auto() PID_Shape_length = auto() PID_Shape_radius = auto() PID_Shape_side = auto() PID_Crystal_storage = auto() PID_Name_UC = auto() PID_Lattice_vectors = auto() PID_Symmetry_lattice_vectors = auto() PID_Occupancy = auto() PID_Bond_label = auto() PID_Bond_type = auto() # PID_Velocity = auto() Duplicate PID_Dimension = auto() PID_Acceleration = auto() PID_Radius = auto() PID_Size = auto() PID_Mass = auto() PID_Volume = auto() PID_Angular_velocity = auto() PID_Angular_acceleration = auto() PID_Simulation_domain_dimensions = auto() PID_Simulation_domain_origin = auto() PID_Dynamic_viscosity = auto() PID_Kinematic_viscosity = auto() PID_Diffusion_coefficient = auto() PID_Probability_coefficient = auto() PID_Friction_coefficient = auto() PID_Scaling_coefficient = auto() PID_Equation_of_state_coefficient = auto() PID_Contact_angle = auto() PID_Amphiphilicity = auto() PID_Phase_interaction_strength = auto() PID_Hamaker_constant = auto() PID_Zeta_potential = auto() PID_Ion_valence_effect = auto() PID_Debye_length = auto() PID_Smoothing_length = auto() PID_Lattice_spacing = auto() PID_Time_step = auto() PID_Number_of_time_steps = auto() PID_Force = auto() PID_Torque = auto() PID_Density = auto() PID_Pressure = auto() PID_Temperature = auto() PID_Distribution = auto() PID_Order_parameter = auto() PID_Original_position = auto() PID_Current = auto() PID_Final = auto() PID_Delta_displacement = auto() PID_External_applied_force = auto() PID_Euler_angles = auto() PID_Sphericity = auto() PID_Young_modulus = auto() PID_Poisson_ratio = auto() PID_Restitution_coefficient = auto() PID_Rolling_friction = auto() PID_Volume_fraction = auto() PID_Coupling_time = auto() PID_Cutoff_distance = auto() PID_Energy_well_depth = auto() PID_Van_der_Waals_radius = auto() PID_Dielectric_constant = auto() PID_Dynamic_pressure = auto() PID_Flux = auto() PID_Homogenized_stress_tensor = auto() PID_Strain_tensor = auto() PID_Relative_velocity = auto() PID_Diffusion_velocity = auto() PID_Stress_tensor = auto() PID_Volume_fraction_gradient = auto() PID_Cohesion_energy_density = auto() PID_Major = auto() PID_Minor = auto() PID_Patch = auto() PID_Full = auto() PID_Charge = auto() PID_Charge_density = auto() PID_Description = auto() PID_Electric_field = auto() PID_Electron_mass = auto() PID_Electrostatic_field = auto() PID_Energy = auto() PID_Heat_conductivity = auto() PID_Initial_viscosity = auto() PID_Linear_constant = auto() PID_Maximum_viscosity = auto() PID_Minimum_viscosity = auto() PID_Momentum = auto() PID_Moment_inertia = auto() PID_Potential_energy = auto() PID_P
FuncID_ProbabilityDistribution = auto() # # # # # # # # # # # # # # # # # # # # #
random_line_split
test_guest_vlan_range.py
""" P1 tests for Dedicating Guest Vlan Ranges """ # Import Local Modules from marvin.cloudstackAPI import * from marvin.cloudstackTestCase import * from marvin.lib.base import * from marvin.lib.common import * from marvin.lib.utils import * from nose.plugins.attrib import attr class TestDedicateGuestVlanRange(cloudstackTestCase): @classmethod def setUpClass(cls):
@classmethod def tearDownClass(cls): try: # Cleanup resources used removeGuestVlanRangeResponse = \ cls.physical_network.update(cls.apiclient, id=cls.physical_network.id, vlan=cls.physical_network.vlan) cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=["advanced", "guestvlanrange", "dedicate", "release"], required_hardware="false") def test_dedicateGuestVlanRange(self): """Test guest vlan range dedication """ """Assume a physical network is available """ """ # Validate the following: # 1. List the available physical network using ListPhysicalNetwork # 2. Add a Guest Vlan range to the available physical network using UpdatePhysicalNetwork # 3. Dedicate the created guest vlan range to user account using DedicateGuestVlanRange # 4. Verify vlan range is dedicated with listDedicatedGuestVlanRanges # 5. Release the dedicated guest vlan range back to the system # 6. Verify guest vlan range has been released, verify with listDedicatedGuestVlanRanges # 7. Remove the added guest vlan range using UpdatePhysicalNetwork """ self.debug("Adding guest vlan range") new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0] # new_vlan = self.free_vlan["partial_range"][0] addGuestVlanRangeResponse = self.physical_network.update(self.apiclient, id=self.physical_network.id, vlan=new_vlan) # id=self.physical_network.id, vlan=self.free_vlan["partial_range"][0]) self.debug("Dedicating guest vlan range"); dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate( self.apiclient, self.free_vlan["partial_range"][0], physicalnetworkid=self.physical_network.id, account=self.account.name, domainid=self.account.domainid ) list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated( self.apiclient, id=dedicate_guest_vlan_range_response.id ) dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0] self.assertEqual( dedicated_guest_vlan_response.account, self.account.name, "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to" ) self.debug("Releasing guest vlan range"); dedicate_guest_vlan_range_response.release(self.apiclient) list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient) self.assertEqual( list_dedicated_guest_vlan_range_response, None, "Check vlan range is not available in listDedicatedGuestVlanRanges" )
testClient = super(TestDedicateGuestVlanRange, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) # Create Account cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls._cleanup = [ cls.account, ] cls.physical_network, cls.free_vlan = setNonContiguousVlanIds(cls.apiclient, cls.zone.id) return
identifier_body
test_guest_vlan_range.py
""" P1 tests for Dedicating Guest Vlan Ranges """ # Import Local Modules from marvin.cloudstackAPI import * from marvin.cloudstackTestCase import * from marvin.lib.base import * from marvin.lib.common import * from marvin.lib.utils import * from nose.plugins.attrib import attr class TestDedicateGuestVlanRange(cloudstackTestCase): @classmethod def setUpClass(cls): testClient = super(TestDedicateGuestVlanRange, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) # Create Account cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls._cleanup = [ cls.account, ] cls.physical_network, cls.free_vlan = setNonContiguousVlanIds(cls.apiclient, cls.zone.id) return @classmethod def tearDownClass(cls): try: # Cleanup resources used removeGuestVlanRangeResponse = \ cls.physical_network.update(cls.apiclient, id=cls.physical_network.id, vlan=cls.physical_network.vlan) cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def
(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=["advanced", "guestvlanrange", "dedicate", "release"], required_hardware="false") def test_dedicateGuestVlanRange(self): """Test guest vlan range dedication """ """Assume a physical network is available """ """ # Validate the following: # 1. List the available physical network using ListPhysicalNetwork # 2. Add a Guest Vlan range to the available physical network using UpdatePhysicalNetwork # 3. Dedicate the created guest vlan range to user account using DedicateGuestVlanRange # 4. Verify vlan range is dedicated with listDedicatedGuestVlanRanges # 5. Release the dedicated guest vlan range back to the system # 6. Verify guest vlan range has been released, verify with listDedicatedGuestVlanRanges # 7. Remove the added guest vlan range using UpdatePhysicalNetwork """ self.debug("Adding guest vlan range") new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0] # new_vlan = self.free_vlan["partial_range"][0] addGuestVlanRangeResponse = self.physical_network.update(self.apiclient, id=self.physical_network.id, vlan=new_vlan) # id=self.physical_network.id, vlan=self.free_vlan["partial_range"][0]) self.debug("Dedicating guest vlan range"); dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate( self.apiclient, self.free_vlan["partial_range"][0], physicalnetworkid=self.physical_network.id, account=self.account.name, domainid=self.account.domainid ) list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated( self.apiclient, id=dedicate_guest_vlan_range_response.id ) dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0] self.assertEqual( dedicated_guest_vlan_response.account, self.account.name, "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to" ) self.debug("Releasing guest vlan range"); dedicate_guest_vlan_range_response.release(self.apiclient) list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient) self.assertEqual( list_dedicated_guest_vlan_range_response, None, "Check vlan range is not available in listDedicatedGuestVlanRanges" )
setUp
identifier_name