file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
create-path.ts | import { parse, posix } from "path"
export function createPath(
filePath: string,
// TODO(v5): Set this default to false
withTrailingSlash: boolean = true,
usePathBase: boolean = false
): string | {
const { dir, name, base } = parse(filePath)
// When a collection route also has client-only routes (e.g. {Product.name}/[...sku])
// The "name" would be .. and "ext" .sku -- that's why "base" needs to be used instead
// to get [...sku]. usePathBase is set to "true" in collection-route-builder and gatsbyPath
const parsedBase = base === `index` ? `` : base
const parsedName = name === `index` ? `` : name
const postfix = withTrailingSlash ? `/` : ``
return posix.join(`/`, dir, usePathBase ? parsedBase : parsedName, postfix)
} | identifier_body | |
create-path.ts | import { parse, posix } from "path"
export function | (
filePath: string,
// TODO(v5): Set this default to false
withTrailingSlash: boolean = true,
usePathBase: boolean = false
): string {
const { dir, name, base } = parse(filePath)
// When a collection route also has client-only routes (e.g. {Product.name}/[...sku])
// The "name" would be .. and "ext" .sku -- that's why "base" needs to be used instead
// to get [...sku]. usePathBase is set to "true" in collection-route-builder and gatsbyPath
const parsedBase = base === `index` ? `` : base
const parsedName = name === `index` ? `` : name
const postfix = withTrailingSlash ? `/` : ``
return posix.join(`/`, dir, usePathBase ? parsedBase : parsedName, postfix)
}
| createPath | identifier_name |
create-path.ts | import { parse, posix } from "path"
export function createPath(
filePath: string, | withTrailingSlash: boolean = true,
usePathBase: boolean = false
): string {
const { dir, name, base } = parse(filePath)
// When a collection route also has client-only routes (e.g. {Product.name}/[...sku])
// The "name" would be .. and "ext" .sku -- that's why "base" needs to be used instead
// to get [...sku]. usePathBase is set to "true" in collection-route-builder and gatsbyPath
const parsedBase = base === `index` ? `` : base
const parsedName = name === `index` ? `` : name
const postfix = withTrailingSlash ? `/` : ``
return posix.join(`/`, dir, usePathBase ? parsedBase : parsedName, postfix)
} | // TODO(v5): Set this default to false | random_line_split |
basic_example.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import argparse
import kudu
from kudu.client import Partitioning
# Parse arguments
parser = argparse.ArgumentParser(description='Basic Example for Kudu Python.')
parser.add_argument('--masters', '-m', nargs='+', default='localhost',
help='The master address(es) to connect to Kudu.')
parser.add_argument('--ports', '-p', nargs='+', default='7051',
help='The master server port(s) to connect to Kudu.')
args = parser.parse_args()
# Connect to Kudu master server(s).
client = kudu.connect(host=args.masters, port=args.ports)
# Define a schema for a new table.
builder = kudu.schema_builder()
builder.add_column('key').type(kudu.int64).nullable(False).primary_key()
builder.add_column('ts_val', type_=kudu.unixtime_micros, nullable=False, compression='lz4')
schema = builder.build()
# Define the partitioning schema.
partitioning = Partitioning().add_hash_partitions(column_names=['key'], num_buckets=3)
# Delete table if it already exists.
if client.table_exists('python-example'):
client.delete_table('python-example')
# Create a new table.
client.create_table('python-example', schema, partitioning)
# Open a table.
table = client.table('python-example')
# Create a new session so that we can apply write operations.
session = client.new_session()
# Insert a row.
op = table.new_insert({'key': 1, 'ts_val': datetime.utcnow()})
session.apply(op)
# Upsert a row.
op = table.new_upsert({'key': 2, 'ts_val': "2016-01-01T00:00:00.000000"})
session.apply(op)
# Update a row.
op = table.new_update({'key': 1, 'ts_val': ("2017-01-01", "%Y-%m-%d")}) |
# Flush write operations, if failures occur, print them.
try:
session.flush()
except kudu.KuduBadStatus:
print(session.get_pending_errors())
# Create a scanner and add a predicate.
scanner = table.scanner()
scanner.add_predicate(table['ts_val'] == datetime(2017, 1, 1))
# Open scanner and print all tuples.
# Note: This doesn't scale for large scans
# The expected output: [(1, datetime.datetime(2017, 1, 1, 0, 0, tzinfo=<UTC>))]
print(scanner.open().read_all_tuples()) | session.apply(op)
# Delete a row.
op = table.new_delete({'key': 2})
session.apply(op) | random_line_split |
basic_example.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import argparse
import kudu
from kudu.client import Partitioning
# Parse arguments
parser = argparse.ArgumentParser(description='Basic Example for Kudu Python.')
parser.add_argument('--masters', '-m', nargs='+', default='localhost',
help='The master address(es) to connect to Kudu.')
parser.add_argument('--ports', '-p', nargs='+', default='7051',
help='The master server port(s) to connect to Kudu.')
args = parser.parse_args()
# Connect to Kudu master server(s).
client = kudu.connect(host=args.masters, port=args.ports)
# Define a schema for a new table.
builder = kudu.schema_builder()
builder.add_column('key').type(kudu.int64).nullable(False).primary_key()
builder.add_column('ts_val', type_=kudu.unixtime_micros, nullable=False, compression='lz4')
schema = builder.build()
# Define the partitioning schema.
partitioning = Partitioning().add_hash_partitions(column_names=['key'], num_buckets=3)
# Delete table if it already exists.
if client.table_exists('python-example'):
|
# Create a new table.
client.create_table('python-example', schema, partitioning)
# Open a table.
table = client.table('python-example')
# Create a new session so that we can apply write operations.
session = client.new_session()
# Insert a row.
op = table.new_insert({'key': 1, 'ts_val': datetime.utcnow()})
session.apply(op)
# Upsert a row.
op = table.new_upsert({'key': 2, 'ts_val': "2016-01-01T00:00:00.000000"})
session.apply(op)
# Update a row.
op = table.new_update({'key': 1, 'ts_val': ("2017-01-01", "%Y-%m-%d")})
session.apply(op)
# Delete a row.
op = table.new_delete({'key': 2})
session.apply(op)
# Flush write operations, if failures occur, print them.
try:
session.flush()
except kudu.KuduBadStatus:
print(session.get_pending_errors())
# Create a scanner and add a predicate.
scanner = table.scanner()
scanner.add_predicate(table['ts_val'] == datetime(2017, 1, 1))
# Open scanner and print all tuples.
# Note: This doesn't scale for large scans
# The expected output: [(1, datetime.datetime(2017, 1, 1, 0, 0, tzinfo=<UTC>))]
print(scanner.open().read_all_tuples())
| client.delete_table('python-example') | conditional_block |
script.py | # coding=utf8
import random
import logging
import json
import time
from httplib2 import Http
from logging import handlers
LOG_FILE = '../logs/WSCN_client.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024 * 1024, backupCount=5) # 实例化handler
handler.setFormatter(logging.Formatter('%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'))
logger = logging.getLogger('client')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
h = Http()
def send():
exchange_type = random_type()
r, c = h.request("http://127.0.0.1:4000/trade.do", "POST",
"{\"symbol\": \"WSCN\", \"type\": \"" + exchange_type + "\", \"amount\": " + random_amount() +
", \"price\": " + random_price() + "}", {"Content-Type": "text/json"})
if exchange_type == "buy" or exchange_type == "sell":
obj = json.loads(c)
logger.info("%s, %s", obj['order_id'], exchange_type)
def random_type():
return str(random.choice(["buy", "sell", "buy_market", "sell_market"]))
def random | return str(random.randrange(1, 100, 1))
def random_price():
return str(round(random.uniform(90.00, 110.00), 2))
if __name__ == '__main__':
for i in range(0, 1000):
send()
time.sleep(0.230)
| _amount():
| identifier_name |
script.py | # coding=utf8
import random
import logging
import json
import time
from httplib2 import Http
from logging import handlers
LOG_FILE = '../logs/WSCN_client.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024 * 1024, backupCount=5) # 实例化handler
handler.setFormatter(logging.Formatter('%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'))
logger = logging.getLogger('client')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
| r, c = h.request("http://127.0.0.1:4000/trade.do", "POST",
"{\"symbol\": \"WSCN\", \"type\": \"" + exchange_type + "\", \"amount\": " + random_amount() +
", \"price\": " + random_price() + "}", {"Content-Type": "text/json"})
if exchange_type == "buy" or exchange_type == "sell":
obj = json.loads(c)
logger.info("%s, %s", obj['order_id'], exchange_type)
def random_type():
return str(random.choice(["buy", "sell", "buy_market", "sell_market"]))
def random_amount():
return str(random.randrange(1, 100, 1))
def random_price():
return str(round(random.uniform(90.00, 110.00), 2))
if __name__ == '__main__':
for i in range(0, 1000):
send()
time.sleep(0.230) | h = Http()
def send():
exchange_type = random_type() | random_line_split |
script.py | # coding=utf8
import random
import logging
import json
import time
from httplib2 import Http
from logging import handlers
LOG_FILE = '../logs/WSCN_client.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024 * 1024, backupCount=5) # 实例化handler
handler.setFormatter(logging.Formatter('%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'))
logger = logging.getLogger('client')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
h = Http()
def send():
exchange_type = random_type()
r, c = h.request("http://127.0.0.1:4000/trade.do", "POST",
"{\"symbol\": \"WSCN\", \"type\": \"" + exchange_type + "\", \"amount\": " + random_amount() +
", \"price\": " + random_price() + "}", {"Content-Type": "text/json"})
if exchange_type == "buy" or exchange_type == "sell":
obj = json.loads(c)
logger.info("%s, %s", obj['order_id'], exchange_type)
def random_type():
return str(random.choice(["buy", "sell", "buy_market", "sell_market"]))
def random_amount():
return str(random.randrange(1, 100, 1))
def random_price():
return str(round(random.uniform(90.00, 110.00), 2))
if __name__ == '__main__':
for i | in range(0, 1000):
send()
time.sleep(0.230)
| conditional_block | |
script.py | # coding=utf8
import random
import logging
import json
import time
from httplib2 import Http
from logging import handlers
LOG_FILE = '../logs/WSCN_client.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024 * 1024, backupCount=5) # 实例化handler
handler.setFormatter(logging.Formatter('%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'))
logger = logging.getLogger('client')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
h = Http()
def send():
exchan | random_type():
return str(random.choice(["buy", "sell", "buy_market", "sell_market"]))
def random_amount():
return str(random.randrange(1, 100, 1))
def random_price():
return str(round(random.uniform(90.00, 110.00), 2))
if __name__ == '__main__':
for i in range(0, 1000):
send()
time.sleep(0.230)
| ge_type = random_type()
r, c = h.request("http://127.0.0.1:4000/trade.do", "POST",
"{\"symbol\": \"WSCN\", \"type\": \"" + exchange_type + "\", \"amount\": " + random_amount() +
", \"price\": " + random_price() + "}", {"Content-Type": "text/json"})
if exchange_type == "buy" or exchange_type == "sell":
obj = json.loads(c)
logger.info("%s, %s", obj['order_id'], exchange_type)
def | identifier_body |
karma.local.conf.js | 'use strict';
module.exports = function(config) {
config.set({
files: [
'https://code.jquery.com/jquery-3.1.1.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.6.1/angular.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.6.1/angular-route.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/angular-filter/0.5.14/angular-filter.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.6.1/angular-mocks.js',
'./google.js',
'./bin/app.js',
'./test.js', | port: 9876,
browsers: ['Chrome'],
proxies: {
'/templates/': 'http://localhost:9876/base/bin/templates/'
}
});
}; | {pattern: './bin/templates/*.html', included: false, served: true}
],
frameworks: ['mocha', 'chai'], | random_line_split |
PlainObjectProvider.ts | import { baseSet as set, baseGet as get, castPath, followRef } from './utils';
import { IPathObjectBinder, Path } from './DataBinding';
/**
It wraps getting and setting object properties by setting path expression (dotted path - e.g. "Data.Person.FirstName", "Data.Person.LastName")
*/
export default class PathObjectBinder implements IPathObjectBinder {
private source: any;
constructor(private root: any, source?: any) {
this.source = source === undefined ? this.root : source;
}
public subscribe(updateFce) {
// this.freezer.on('update',function(state,prevState){
// if (updateFce!==undefined) updateFce(state,prevState)}
// );
}
public createNew(path: Path, newItem?: any): IPathObjectBinder {
//var item = followRef(this.root, newItem || this.getValue(path));
return new PathObjectBinder(this.root, newItem || this.getValue(path));
}
public getValue(path: Path) |
public setValue(path: Path, value: any) {
if (path === undefined) return;
var cursorPath = castPath(path);
if (cursorPath.length === 0) return;
var parent = this.getParent(cursorPath);
if (parent === undefined) return;
var property = cursorPath[cursorPath.length - 1];
//console.log(parent);
parent[property] = value;
//console.log(parent);
}
private getParent(cursorPath: Array<string | number>) {
if (cursorPath.length == 0) return;
if (cursorPath.length == 1) return followRef(this.root, this.source);
var parentPath = cursorPath.slice(0, cursorPath.length - 1);
var parent = get(this.source, parentPath);
if (parent !== undefined) return followRef(this.root, parent);
set(this.source, parentPath, {}, Object);
return get(this.source, parentPath);
}
} | {
if (path === undefined) return this.source;
var cursorPath = castPath(path);
if (cursorPath.length === 0) return this.source;
var parent = this.getParent(cursorPath);
if (parent === undefined) return;
var property = cursorPath[cursorPath.length - 1];
return parent[property];
} | identifier_body |
PlainObjectProvider.ts | import { baseSet as set, baseGet as get, castPath, followRef } from './utils';
import { IPathObjectBinder, Path } from './DataBinding';
/**
It wraps getting and setting object properties by setting path expression (dotted path - e.g. "Data.Person.FirstName", "Data.Person.LastName")
*/
export default class PathObjectBinder implements IPathObjectBinder {
private source: any;
constructor(private root: any, source?: any) {
this.source = source === undefined ? this.root : source;
}
public subscribe(updateFce) {
// this.freezer.on('update',function(state,prevState){
// if (updateFce!==undefined) updateFce(state,prevState)}
// );
}
public createNew(path: Path, newItem?: any): IPathObjectBinder {
//var item = followRef(this.root, newItem || this.getValue(path));
return new PathObjectBinder(this.root, newItem || this.getValue(path));
}
public getValue(path: Path) {
if (path === undefined) return this.source;
var cursorPath = castPath(path);
if (cursorPath.length === 0) return this.source;
var parent = this.getParent(cursorPath);
if (parent === undefined) return;
var property = cursorPath[cursorPath.length - 1];
return parent[property];
}
public setValue(path: Path, value: any) {
if (path === undefined) return;
var cursorPath = castPath(path);
if (cursorPath.length === 0) return;
var parent = this.getParent(cursorPath);
if (parent === undefined) return;
var property = cursorPath[cursorPath.length - 1];
//console.log(parent);
parent[property] = value;
//console.log(parent);
}
private getParent(cursorPath: Array<string | number>) {
if (cursorPath.length == 0) return;
if (cursorPath.length == 1) return followRef(this.root, this.source);
var parentPath = cursorPath.slice(0, cursorPath.length - 1);
var parent = get(this.source, parentPath);
if (parent !== undefined) return followRef(this.root, parent);
set(this.source, parentPath, {}, Object);
return get(this.source, parentPath); | } | }
| random_line_split |
PlainObjectProvider.ts | import { baseSet as set, baseGet as get, castPath, followRef } from './utils';
import { IPathObjectBinder, Path } from './DataBinding';
/**
It wraps getting and setting object properties by setting path expression (dotted path - e.g. "Data.Person.FirstName", "Data.Person.LastName")
*/
export default class PathObjectBinder implements IPathObjectBinder {
private source: any;
constructor(private root: any, source?: any) {
this.source = source === undefined ? this.root : source;
}
public | (updateFce) {
// this.freezer.on('update',function(state,prevState){
// if (updateFce!==undefined) updateFce(state,prevState)}
// );
}
public createNew(path: Path, newItem?: any): IPathObjectBinder {
//var item = followRef(this.root, newItem || this.getValue(path));
return new PathObjectBinder(this.root, newItem || this.getValue(path));
}
public getValue(path: Path) {
if (path === undefined) return this.source;
var cursorPath = castPath(path);
if (cursorPath.length === 0) return this.source;
var parent = this.getParent(cursorPath);
if (parent === undefined) return;
var property = cursorPath[cursorPath.length - 1];
return parent[property];
}
public setValue(path: Path, value: any) {
if (path === undefined) return;
var cursorPath = castPath(path);
if (cursorPath.length === 0) return;
var parent = this.getParent(cursorPath);
if (parent === undefined) return;
var property = cursorPath[cursorPath.length - 1];
//console.log(parent);
parent[property] = value;
//console.log(parent);
}
private getParent(cursorPath: Array<string | number>) {
if (cursorPath.length == 0) return;
if (cursorPath.length == 1) return followRef(this.root, this.source);
var parentPath = cursorPath.slice(0, cursorPath.length - 1);
var parent = get(this.source, parentPath);
if (parent !== undefined) return followRef(this.root, parent);
set(this.source, parentPath, {}, Object);
return get(this.source, parentPath);
}
} | subscribe | identifier_name |
add-vmware-template.component.ts | import { Component, OnInit } from '@angular/core';
import { FormBuilder, FormControl, FormGroup, Validators } from '@angular/forms';
import { ActivatedRoute, Router } from '@angular/router';
import { v4 as uuid } from 'uuid';
import { Server } from '../../../../models/server';
import { VmwareTemplate } from '../../../../models/templates/vmware-template';
import { VmwareVm } from '../../../../models/vmware/vmware-vm';
import { ServerService } from '../../../../services/server.service';
import { TemplateMocksService } from '../../../../services/template-mocks.service';
import { ToasterService } from '../../../../services/toaster.service';
import { VmwareService } from '../../../../services/vmware.service';
@Component({
selector: 'app-add-vmware-template',
templateUrl: './add-vmware-template.component.html',
styleUrls: ['./add-vmware-template.component.scss', '../../preferences.component.scss'],
})
export class AddVmwareTemplateComponent implements OnInit {
server: Server;
virtualMachines: VmwareVm[];
selectedVM: VmwareVm;
vmwareTemplate: VmwareTemplate;
templateNameForm: FormGroup;
constructor(
private route: ActivatedRoute,
private serverService: ServerService,
private vmwareService: VmwareService,
private toasterService: ToasterService,
private templateMocksService: TemplateMocksService,
private router: Router,
private formBuilder: FormBuilder
) {
this.templateNameForm = this.formBuilder.group({
templateName: new FormControl(null, [Validators.required]),
});
}
ngOnInit() {
const server_id = this.route.snapshot.paramMap.get('server_id');
this.serverService.get(parseInt(server_id, 10)).then((server: Server) => {
this.server = server;
this.vmwareService.getVirtualMachines(this.server).subscribe((virtualMachines: VmwareVm[]) => {
this.virtualMachines = virtualMachines;
this.templateMocksService.getVmwareTemplate().subscribe((template: VmwareTemplate) => {
this.vmwareTemplate = template;
});
});
});
}
goBack() {
this.router.navigate(['/server', this.server.id, 'preferences', 'vmware', 'templates']);
}
addTemplate() |
}
| {
if (!this.templateNameForm.invalid) {
this.vmwareTemplate.name = this.selectedVM.vmname;
this.vmwareTemplate.vmx_path = this.selectedVM.vmx_path;
this.vmwareTemplate.template_id = uuid();
this.vmwareService.addTemplate(this.server, this.vmwareTemplate).subscribe(() => {
this.goBack();
});
} else {
this.toasterService.error(`Fill all required fields`);
}
} | identifier_body |
add-vmware-template.component.ts | import { Component, OnInit } from '@angular/core';
import { FormBuilder, FormControl, FormGroup, Validators } from '@angular/forms';
import { ActivatedRoute, Router } from '@angular/router';
import { v4 as uuid } from 'uuid';
import { Server } from '../../../../models/server'; | import { VmwareVm } from '../../../../models/vmware/vmware-vm';
import { ServerService } from '../../../../services/server.service';
import { TemplateMocksService } from '../../../../services/template-mocks.service';
import { ToasterService } from '../../../../services/toaster.service';
import { VmwareService } from '../../../../services/vmware.service';
@Component({
selector: 'app-add-vmware-template',
templateUrl: './add-vmware-template.component.html',
styleUrls: ['./add-vmware-template.component.scss', '../../preferences.component.scss'],
})
export class AddVmwareTemplateComponent implements OnInit {
server: Server;
virtualMachines: VmwareVm[];
selectedVM: VmwareVm;
vmwareTemplate: VmwareTemplate;
templateNameForm: FormGroup;
constructor(
private route: ActivatedRoute,
private serverService: ServerService,
private vmwareService: VmwareService,
private toasterService: ToasterService,
private templateMocksService: TemplateMocksService,
private router: Router,
private formBuilder: FormBuilder
) {
this.templateNameForm = this.formBuilder.group({
templateName: new FormControl(null, [Validators.required]),
});
}
ngOnInit() {
const server_id = this.route.snapshot.paramMap.get('server_id');
this.serverService.get(parseInt(server_id, 10)).then((server: Server) => {
this.server = server;
this.vmwareService.getVirtualMachines(this.server).subscribe((virtualMachines: VmwareVm[]) => {
this.virtualMachines = virtualMachines;
this.templateMocksService.getVmwareTemplate().subscribe((template: VmwareTemplate) => {
this.vmwareTemplate = template;
});
});
});
}
goBack() {
this.router.navigate(['/server', this.server.id, 'preferences', 'vmware', 'templates']);
}
addTemplate() {
if (!this.templateNameForm.invalid) {
this.vmwareTemplate.name = this.selectedVM.vmname;
this.vmwareTemplate.vmx_path = this.selectedVM.vmx_path;
this.vmwareTemplate.template_id = uuid();
this.vmwareService.addTemplate(this.server, this.vmwareTemplate).subscribe(() => {
this.goBack();
});
} else {
this.toasterService.error(`Fill all required fields`);
}
}
} | import { VmwareTemplate } from '../../../../models/templates/vmware-template'; | random_line_split |
add-vmware-template.component.ts | import { Component, OnInit } from '@angular/core';
import { FormBuilder, FormControl, FormGroup, Validators } from '@angular/forms';
import { ActivatedRoute, Router } from '@angular/router';
import { v4 as uuid } from 'uuid';
import { Server } from '../../../../models/server';
import { VmwareTemplate } from '../../../../models/templates/vmware-template';
import { VmwareVm } from '../../../../models/vmware/vmware-vm';
import { ServerService } from '../../../../services/server.service';
import { TemplateMocksService } from '../../../../services/template-mocks.service';
import { ToasterService } from '../../../../services/toaster.service';
import { VmwareService } from '../../../../services/vmware.service';
@Component({
selector: 'app-add-vmware-template',
templateUrl: './add-vmware-template.component.html',
styleUrls: ['./add-vmware-template.component.scss', '../../preferences.component.scss'],
})
export class | implements OnInit {
server: Server;
virtualMachines: VmwareVm[];
selectedVM: VmwareVm;
vmwareTemplate: VmwareTemplate;
templateNameForm: FormGroup;
constructor(
private route: ActivatedRoute,
private serverService: ServerService,
private vmwareService: VmwareService,
private toasterService: ToasterService,
private templateMocksService: TemplateMocksService,
private router: Router,
private formBuilder: FormBuilder
) {
this.templateNameForm = this.formBuilder.group({
templateName: new FormControl(null, [Validators.required]),
});
}
ngOnInit() {
const server_id = this.route.snapshot.paramMap.get('server_id');
this.serverService.get(parseInt(server_id, 10)).then((server: Server) => {
this.server = server;
this.vmwareService.getVirtualMachines(this.server).subscribe((virtualMachines: VmwareVm[]) => {
this.virtualMachines = virtualMachines;
this.templateMocksService.getVmwareTemplate().subscribe((template: VmwareTemplate) => {
this.vmwareTemplate = template;
});
});
});
}
goBack() {
this.router.navigate(['/server', this.server.id, 'preferences', 'vmware', 'templates']);
}
addTemplate() {
if (!this.templateNameForm.invalid) {
this.vmwareTemplate.name = this.selectedVM.vmname;
this.vmwareTemplate.vmx_path = this.selectedVM.vmx_path;
this.vmwareTemplate.template_id = uuid();
this.vmwareService.addTemplate(this.server, this.vmwareTemplate).subscribe(() => {
this.goBack();
});
} else {
this.toasterService.error(`Fill all required fields`);
}
}
}
| AddVmwareTemplateComponent | identifier_name |
add-vmware-template.component.ts | import { Component, OnInit } from '@angular/core';
import { FormBuilder, FormControl, FormGroup, Validators } from '@angular/forms';
import { ActivatedRoute, Router } from '@angular/router';
import { v4 as uuid } from 'uuid';
import { Server } from '../../../../models/server';
import { VmwareTemplate } from '../../../../models/templates/vmware-template';
import { VmwareVm } from '../../../../models/vmware/vmware-vm';
import { ServerService } from '../../../../services/server.service';
import { TemplateMocksService } from '../../../../services/template-mocks.service';
import { ToasterService } from '../../../../services/toaster.service';
import { VmwareService } from '../../../../services/vmware.service';
@Component({
selector: 'app-add-vmware-template',
templateUrl: './add-vmware-template.component.html',
styleUrls: ['./add-vmware-template.component.scss', '../../preferences.component.scss'],
})
export class AddVmwareTemplateComponent implements OnInit {
server: Server;
virtualMachines: VmwareVm[];
selectedVM: VmwareVm;
vmwareTemplate: VmwareTemplate;
templateNameForm: FormGroup;
constructor(
private route: ActivatedRoute,
private serverService: ServerService,
private vmwareService: VmwareService,
private toasterService: ToasterService,
private templateMocksService: TemplateMocksService,
private router: Router,
private formBuilder: FormBuilder
) {
this.templateNameForm = this.formBuilder.group({
templateName: new FormControl(null, [Validators.required]),
});
}
ngOnInit() {
const server_id = this.route.snapshot.paramMap.get('server_id');
this.serverService.get(parseInt(server_id, 10)).then((server: Server) => {
this.server = server;
this.vmwareService.getVirtualMachines(this.server).subscribe((virtualMachines: VmwareVm[]) => {
this.virtualMachines = virtualMachines;
this.templateMocksService.getVmwareTemplate().subscribe((template: VmwareTemplate) => {
this.vmwareTemplate = template;
});
});
});
}
goBack() {
this.router.navigate(['/server', this.server.id, 'preferences', 'vmware', 'templates']);
}
addTemplate() {
if (!this.templateNameForm.invalid) {
this.vmwareTemplate.name = this.selectedVM.vmname;
this.vmwareTemplate.vmx_path = this.selectedVM.vmx_path;
this.vmwareTemplate.template_id = uuid();
this.vmwareService.addTemplate(this.server, this.vmwareTemplate).subscribe(() => {
this.goBack();
});
} else |
}
}
| {
this.toasterService.error(`Fill all required fields`);
} | conditional_block |
thermal_monitor.py | #!/usr/bin/python
# vim: ai:ts=4:sw=4:sts=4:et:fileencoding=utf-8
#
# Thermal monitor
#
# Copyright 2013 Michal Belica <devel@beli.sk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import serial
import sys
import signal
import select
import re
import time
import subprocess
from optparse import OptionParser
class ThermalMonitor(object):
def zabbix_sender(self):
proc = subprocess.Popen(['zabbix_sender', '-z', self.options.zabbix, '-p',
self.options.port, '-s', self.options.host, '-i', '-'], stdin=subprocess.PIPE)
for addr,temp in self.data.items():
proc.communicate('- %s[%s] %g\n' % (self.options.key, addr, temp))
proc.stdin.close()
proc.wait()
def parse_options(self):
parser = OptionParser()
parser.add_option("-d", "--device", dest="device",
help="read from serial port DEVICE (required)", metavar="DEVICE")
parser.add_option("-s", "--speed", dest="speed", type="int", default=9600,
help="serial port baud rate (default: 9600)", metavar="BAUD")
parser.add_option("-i", "--interval", dest="interval", type="int", default=10,
help="sampling interval (default: 10)", metavar="SECONDS")
parser.add_option("-z", "--zabbix", dest="zabbix",
help="Zabbix server (required)", metavar="ADDR")
parser.add_option("-p", "--port", dest="port", default="10051",
help="listening port of Zabbix server (default: 10051)", metavar="PORT")
parser.add_option("-n", "--host", dest="host",
help="name of host in Zabbix (required)", metavar="NAME")
parser.add_option("-k", "--key", dest="key", default="thermal_monitor",
help="item key base name; device address will be added as an argument, "
+"e.g. thermal_monitor[addr] (default: thermal_monitor)", metavar="key")
(self.options, self.args) = parser.parse_args()
# check for required options
for opt in ['device', 'zabbix', 'host']:
if opt not in self.options.__dict__ or self.options.__dict__[opt] is None:
parser.error("parameter --%s is required" % opt)
def sighandler_terminate(self, signum, frame):
self.running = False
def register_signals(self, ignore=[],
terminate=[signal.SIGINT, signal.SIGTERM, signal.SIGHUP]):
for sig in ignore:
signal.signal(sig, signal.SIG_IGN)
for sig in terminate:
signal.signal(sig, self.sighandler_terminate)
def open_serial(self):
self.ser = serial.Serial(self.options.device, self.options.speed)
self.ser.readline() # ignore first (incomplete) line
def __init__(self):
|
def start(self):
self.running = True
next = time.time()
sent = False
while self.running:
try:
line = self.ser.readline()
except select.error as e:
if e[0] == 4: # interrupted system call
continue
else:
raise
if time.time() > next:
next += self.options.interval
# clears the list to send all addresses again
for k,v in self.data.items():
self.data[k] = None
sent = False
elif sent:
# data already sent in this cycle
continue
m = self.cre.search(line)
if m:
# line matched pattern
addr = m.group('addr')
temp = float(m.group('temp'))
if addr not in self.data or self.data[addr] is None:
# address not yet collected in this cycle
self.data[addr] = temp
else:
# repeating address reached - send out data
print "sending", addr, temp
self.zabbix_sender()
sent = True
else:
print "invalid line received"
self.cleanup()
def cleanup(self):
self.ser.close()
if __name__ == "__main__":
thermalmonitor = ThermalMonitor()
thermalmonitor.start()
| self.running = False
self.data = dict()
self.register_signals()
self.parse_options()
self.open_serial()
self.cre = re.compile(r"R=(?P<addr>\w+)\s+T=(?P<temp>[.0-9]+)\r?$") | identifier_body |
thermal_monitor.py | #!/usr/bin/python
# vim: ai:ts=4:sw=4:sts=4:et:fileencoding=utf-8
#
# Thermal monitor
#
# Copyright 2013 Michal Belica <devel@beli.sk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import serial
import sys
import signal
import select
import re
import time
import subprocess
from optparse import OptionParser
class ThermalMonitor(object):
def zabbix_sender(self):
proc = subprocess.Popen(['zabbix_sender', '-z', self.options.zabbix, '-p',
self.options.port, '-s', self.options.host, '-i', '-'], stdin=subprocess.PIPE)
for addr,temp in self.data.items():
proc.communicate('- %s[%s] %g\n' % (self.options.key, addr, temp))
proc.stdin.close()
proc.wait()
def parse_options(self):
parser = OptionParser()
parser.add_option("-d", "--device", dest="device",
help="read from serial port DEVICE (required)", metavar="DEVICE")
parser.add_option("-s", "--speed", dest="speed", type="int", default=9600,
help="serial port baud rate (default: 9600)", metavar="BAUD")
parser.add_option("-i", "--interval", dest="interval", type="int", default=10,
help="sampling interval (default: 10)", metavar="SECONDS")
parser.add_option("-z", "--zabbix", dest="zabbix",
help="Zabbix server (required)", metavar="ADDR")
parser.add_option("-p", "--port", dest="port", default="10051",
help="listening port of Zabbix server (default: 10051)", metavar="PORT")
parser.add_option("-n", "--host", dest="host",
help="name of host in Zabbix (required)", metavar="NAME")
parser.add_option("-k", "--key", dest="key", default="thermal_monitor",
help="item key base name; device address will be added as an argument, "
+"e.g. thermal_monitor[addr] (default: thermal_monitor)", metavar="key")
(self.options, self.args) = parser.parse_args()
# check for required options
for opt in ['device', 'zabbix', 'host']:
if opt not in self.options.__dict__ or self.options.__dict__[opt] is None:
parser.error("parameter --%s is required" % opt)
def sighandler_terminate(self, signum, frame):
self.running = False
def register_signals(self, ignore=[],
terminate=[signal.SIGINT, signal.SIGTERM, signal.SIGHUP]):
for sig in ignore:
signal.signal(sig, signal.SIG_IGN)
for sig in terminate:
signal.signal(sig, self.sighandler_terminate)
def open_serial(self):
self.ser = serial.Serial(self.options.device, self.options.speed)
self.ser.readline() # ignore first (incomplete) line
def __init__(self):
self.running = False
self.data = dict()
self.register_signals()
self.parse_options()
self.open_serial()
self.cre = re.compile(r"R=(?P<addr>\w+)\s+T=(?P<temp>[.0-9]+)\r?$")
def start(self):
self.running = True
next = time.time()
sent = False
while self.running:
try:
line = self.ser.readline()
except select.error as e:
if e[0] == 4: # interrupted system call
continue
else:
raise
if time.time() > next:
next += self.options.interval
# clears the list to send all addresses again
for k,v in self.data.items():
self.data[k] = None
sent = False
elif sent:
# data already sent in this cycle
|
m = self.cre.search(line)
if m:
# line matched pattern
addr = m.group('addr')
temp = float(m.group('temp'))
if addr not in self.data or self.data[addr] is None:
# address not yet collected in this cycle
self.data[addr] = temp
else:
# repeating address reached - send out data
print "sending", addr, temp
self.zabbix_sender()
sent = True
else:
print "invalid line received"
self.cleanup()
def cleanup(self):
self.ser.close()
if __name__ == "__main__":
thermalmonitor = ThermalMonitor()
thermalmonitor.start()
| continue | conditional_block |
thermal_monitor.py | #!/usr/bin/python
# vim: ai:ts=4:sw=4:sts=4:et:fileencoding=utf-8
#
# Thermal monitor
#
# Copyright 2013 Michal Belica <devel@beli.sk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import serial
import sys
import signal
import select
import re
import time
import subprocess
from optparse import OptionParser
class ThermalMonitor(object):
def zabbix_sender(self):
proc = subprocess.Popen(['zabbix_sender', '-z', self.options.zabbix, '-p',
self.options.port, '-s', self.options.host, '-i', '-'], stdin=subprocess.PIPE)
for addr,temp in self.data.items():
proc.communicate('- %s[%s] %g\n' % (self.options.key, addr, temp))
proc.stdin.close()
proc.wait()
def parse_options(self):
parser = OptionParser()
parser.add_option("-d", "--device", dest="device",
help="read from serial port DEVICE (required)", metavar="DEVICE")
parser.add_option("-s", "--speed", dest="speed", type="int", default=9600,
help="serial port baud rate (default: 9600)", metavar="BAUD")
parser.add_option("-i", "--interval", dest="interval", type="int", default=10,
help="sampling interval (default: 10)", metavar="SECONDS")
parser.add_option("-z", "--zabbix", dest="zabbix",
help="Zabbix server (required)", metavar="ADDR")
parser.add_option("-p", "--port", dest="port", default="10051",
help="listening port of Zabbix server (default: 10051)", metavar="PORT")
parser.add_option("-n", "--host", dest="host",
help="name of host in Zabbix (required)", metavar="NAME")
parser.add_option("-k", "--key", dest="key", default="thermal_monitor",
help="item key base name; device address will be added as an argument, "
+"e.g. thermal_monitor[addr] (default: thermal_monitor)", metavar="key")
(self.options, self.args) = parser.parse_args()
# check for required options
for opt in ['device', 'zabbix', 'host']:
if opt not in self.options.__dict__ or self.options.__dict__[opt] is None:
parser.error("parameter --%s is required" % opt)
def sighandler_terminate(self, signum, frame):
self.running = False
def register_signals(self, ignore=[],
terminate=[signal.SIGINT, signal.SIGTERM, signal.SIGHUP]):
for sig in ignore:
signal.signal(sig, signal.SIG_IGN)
for sig in terminate:
signal.signal(sig, self.sighandler_terminate)
def open_serial(self):
self.ser = serial.Serial(self.options.device, self.options.speed)
self.ser.readline() # ignore first (incomplete) line
def | (self):
self.running = False
self.data = dict()
self.register_signals()
self.parse_options()
self.open_serial()
self.cre = re.compile(r"R=(?P<addr>\w+)\s+T=(?P<temp>[.0-9]+)\r?$")
def start(self):
self.running = True
next = time.time()
sent = False
while self.running:
try:
line = self.ser.readline()
except select.error as e:
if e[0] == 4: # interrupted system call
continue
else:
raise
if time.time() > next:
next += self.options.interval
# clears the list to send all addresses again
for k,v in self.data.items():
self.data[k] = None
sent = False
elif sent:
# data already sent in this cycle
continue
m = self.cre.search(line)
if m:
# line matched pattern
addr = m.group('addr')
temp = float(m.group('temp'))
if addr not in self.data or self.data[addr] is None:
# address not yet collected in this cycle
self.data[addr] = temp
else:
# repeating address reached - send out data
print "sending", addr, temp
self.zabbix_sender()
sent = True
else:
print "invalid line received"
self.cleanup()
def cleanup(self):
self.ser.close()
if __name__ == "__main__":
thermalmonitor = ThermalMonitor()
thermalmonitor.start()
| __init__ | identifier_name |
thermal_monitor.py | #!/usr/bin/python
# vim: ai:ts=4:sw=4:sts=4:et:fileencoding=utf-8
#
# Thermal monitor
#
# Copyright 2013 Michal Belica <devel@beli.sk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import serial
import sys
import signal
import select
import re
import time
import subprocess
from optparse import OptionParser
class ThermalMonitor(object):
def zabbix_sender(self):
proc = subprocess.Popen(['zabbix_sender', '-z', self.options.zabbix, '-p',
self.options.port, '-s', self.options.host, '-i', '-'], stdin=subprocess.PIPE)
for addr,temp in self.data.items():
proc.communicate('- %s[%s] %g\n' % (self.options.key, addr, temp))
proc.stdin.close()
proc.wait()
def parse_options(self):
parser = OptionParser()
parser.add_option("-d", "--device", dest="device",
help="read from serial port DEVICE (required)", metavar="DEVICE")
parser.add_option("-s", "--speed", dest="speed", type="int", default=9600,
help="serial port baud rate (default: 9600)", metavar="BAUD")
parser.add_option("-i", "--interval", dest="interval", type="int", default=10,
help="sampling interval (default: 10)", metavar="SECONDS")
parser.add_option("-z", "--zabbix", dest="zabbix",
help="Zabbix server (required)", metavar="ADDR")
parser.add_option("-p", "--port", dest="port", default="10051",
help="listening port of Zabbix server (default: 10051)", metavar="PORT")
parser.add_option("-n", "--host", dest="host",
help="name of host in Zabbix (required)", metavar="NAME")
parser.add_option("-k", "--key", dest="key", default="thermal_monitor",
help="item key base name; device address will be added as an argument, "
+"e.g. thermal_monitor[addr] (default: thermal_monitor)", metavar="key")
(self.options, self.args) = parser.parse_args()
# check for required options
for opt in ['device', 'zabbix', 'host']:
if opt not in self.options.__dict__ or self.options.__dict__[opt] is None:
parser.error("parameter --%s is required" % opt)
| def sighandler_terminate(self, signum, frame):
self.running = False
def register_signals(self, ignore=[],
terminate=[signal.SIGINT, signal.SIGTERM, signal.SIGHUP]):
for sig in ignore:
signal.signal(sig, signal.SIG_IGN)
for sig in terminate:
signal.signal(sig, self.sighandler_terminate)
def open_serial(self):
self.ser = serial.Serial(self.options.device, self.options.speed)
self.ser.readline() # ignore first (incomplete) line
def __init__(self):
self.running = False
self.data = dict()
self.register_signals()
self.parse_options()
self.open_serial()
self.cre = re.compile(r"R=(?P<addr>\w+)\s+T=(?P<temp>[.0-9]+)\r?$")
def start(self):
self.running = True
next = time.time()
sent = False
while self.running:
try:
line = self.ser.readline()
except select.error as e:
if e[0] == 4: # interrupted system call
continue
else:
raise
if time.time() > next:
next += self.options.interval
# clears the list to send all addresses again
for k,v in self.data.items():
self.data[k] = None
sent = False
elif sent:
# data already sent in this cycle
continue
m = self.cre.search(line)
if m:
# line matched pattern
addr = m.group('addr')
temp = float(m.group('temp'))
if addr not in self.data or self.data[addr] is None:
# address not yet collected in this cycle
self.data[addr] = temp
else:
# repeating address reached - send out data
print "sending", addr, temp
self.zabbix_sender()
sent = True
else:
print "invalid line received"
self.cleanup()
def cleanup(self):
self.ser.close()
if __name__ == "__main__":
thermalmonitor = ThermalMonitor()
thermalmonitor.start() | random_line_split | |
logging.js | /**
@license
* @pnp/logging v1.0.3 - pnp - light-weight, subscribable logging framework
* MIT (https://github.com/pnp/pnp/blob/master/LICENSE)
* Copyright (c) 2018 Microsoft
* docs: http://officedev.github.io/PnP-JS-Core
* source: https://github.com/pnp/pnp
* bugs: https://github.com/pnp/pnp/issues
*/
/**
* Class used to subscribe ILogListener and log messages throughout an application
*
*/
class Logger {
/**
* Gets or sets the active log level to apply for log filtering
*/
static get activeLogLevel() {
return Logger.instance.activeLogLevel;
}
static set activeLogLevel(value) {
Logger.instance.activeLogLevel = value;
}
static get instance() {
if (typeof Logger._instance === "undefined" || Logger._instance === null) {
Logger._instance = new LoggerImpl();
}
return Logger._instance;
}
/**
* Adds ILogListener instances to the set of subscribed listeners
*
* @param listeners One or more listeners to subscribe to this log
*/
static subscribe(...listeners) {
listeners.map(listener => Logger.instance.subscribe(listener));
}
/**
* Clears the subscribers collection, returning the collection before modifiction
*/
static clearSubscribers() {
return Logger.instance.clearSubscribers();
}
/**
* Gets the current subscriber count
*/
static get count() {
return Logger.instance.count;
}
/**
* Writes the supplied string to the subscribed listeners
*
* @param message The message to write
* @param level [Optional] if supplied will be used as the level of the entry (Default: LogLevel.Verbose)
*/
static write(message, level = 0 /* Verbose */) {
Logger.instance.log({ level: level, message: message });
}
/**
* Writes the supplied string to the subscribed listeners
*
* @param json The json object to stringify and write
* @param level [Optional] if supplied will be used as the level of the entry (Default: LogLevel.Verbose)
*/
static writeJSON(json, level = 0 /* Verbose */) {
Logger.instance.log({ level: level, message: JSON.stringify(json) });
}
/**
* Logs the supplied entry to the subscribed listeners
*
* @param entry The message to log
*/
static log(entry) {
Logger.instance.log(entry);
}
/**
* Logs an error object to the subscribed listeners
*
* @param err The error object
*/
static error(err) {
Logger.instance.log({ data: err, level: 3 /* Error */, message: err.message });
}
}
class LoggerImpl {
constructor(activeLogLevel = 2 /* Warning */, subscribers = []) {
this.activeLogLevel = activeLogLevel;
this.subscribers = subscribers;
}
subscribe(listener) {
this.subscribers.push(listener);
}
clearSubscribers() {
const s = this.subscribers.slice(0);
this.subscribers.length = 0;
return s;
}
get count() {
return this.subscribers.length;
}
write(message, level = 0 /* Verbose */) {
this.log({ level: level, message: message });
}
log(entry) {
if (typeof entry !== "undefined" && this.activeLogLevel <= entry.level) {
this.subscribers.map(subscriber => subscriber.log(entry));
}
}
}
/**
* Implementation of LogListener which logs to the console
*
*/
class ConsoleListener {
/**
* Any associated data that a given logging listener may choose to log or ignore
*
* @param entry The information to be logged
| const msg = this.format(entry);
switch (entry.level) {
case 0 /* Verbose */:
case 1 /* Info */:
console.log(msg);
break;
case 2 /* Warning */:
console.warn(msg);
break;
case 3 /* Error */:
console.error(msg);
break;
}
}
/**
* Formats the message
*
* @param entry The information to format into a string
*/
format(entry) {
const msg = [];
msg.push("Message: " + entry.message);
if (typeof entry.data !== "undefined") {
msg.push(" Data: " + JSON.stringify(entry.data));
}
return msg.join("");
}
}
/**
* Implementation of LogListener which logs to the supplied function
*
*/
class FunctionListener {
/**
* Creates a new instance of the FunctionListener class
*
* @constructor
* @param method The method to which any logging data will be passed
*/
constructor(method) {
this.method = method;
}
/**
* Any associated data that a given logging listener may choose to log or ignore
*
* @param entry The information to be logged
*/
log(entry) {
this.method(entry);
}
}
export { Logger, ConsoleListener, FunctionListener };
//# sourceMappingURL=logging.js.map | */
log(entry) {
| random_line_split |
logging.js | /**
@license
* @pnp/logging v1.0.3 - pnp - light-weight, subscribable logging framework
* MIT (https://github.com/pnp/pnp/blob/master/LICENSE)
* Copyright (c) 2018 Microsoft
* docs: http://officedev.github.io/PnP-JS-Core
* source: https://github.com/pnp/pnp
* bugs: https://github.com/pnp/pnp/issues
*/
/**
* Class used to subscribe ILogListener and log messages throughout an application
*
*/
class Logger {
/**
* Gets or sets the active log level to apply for log filtering
*/
static get activeLogLevel() {
return Logger.instance.activeLogLevel;
}
static set activeLogLevel(value) {
Logger.instance.activeLogLevel = value;
}
static get instance() {
if (typeof Logger._instance === "undefined" || Logger._instance === null) {
Logger._instance = new LoggerImpl();
}
return Logger._instance;
}
/**
* Adds ILogListener instances to the set of subscribed listeners
*
* @param listeners One or more listeners to subscribe to this log
*/
static subscribe(...listeners) {
listeners.map(listener => Logger.instance.subscribe(listener));
}
/**
* Clears the subscribers collection, returning the collection before modifiction
*/
static clearSubscribers() {
return Logger.instance.clearSubscribers();
}
/**
* Gets the current subscriber count
*/
static get count() {
return Logger.instance.count;
}
/**
* Writes the supplied string to the subscribed listeners
*
* @param message The message to write
* @param level [Optional] if supplied will be used as the level of the entry (Default: LogLevel.Verbose)
*/
static write(message, level = 0 /* Verbose */) {
Logger.instance.log({ level: level, message: message });
}
/**
* Writes the supplied string to the subscribed listeners
*
* @param json The json object to stringify and write
* @param level [Optional] if supplied will be used as the level of the entry (Default: LogLevel.Verbose)
*/
static writeJSON(json, level = 0 /* Verbose */) {
Logger.instance.log({ level: level, message: JSON.stringify(json) });
}
/**
* Logs the supplied entry to the subscribed listeners
*
* @param entry The message to log
*/
static log(entry) {
Logger.instance.log(entry);
}
/**
* Logs an error object to the subscribed listeners
*
* @param err The error object
*/
static error(err) {
Logger.instance.log({ data: err, level: 3 /* Error */, message: err.message });
}
}
class | {
constructor(activeLogLevel = 2 /* Warning */, subscribers = []) {
this.activeLogLevel = activeLogLevel;
this.subscribers = subscribers;
}
subscribe(listener) {
this.subscribers.push(listener);
}
clearSubscribers() {
const s = this.subscribers.slice(0);
this.subscribers.length = 0;
return s;
}
get count() {
return this.subscribers.length;
}
write(message, level = 0 /* Verbose */) {
this.log({ level: level, message: message });
}
log(entry) {
if (typeof entry !== "undefined" && this.activeLogLevel <= entry.level) {
this.subscribers.map(subscriber => subscriber.log(entry));
}
}
}
/**
* Implementation of LogListener which logs to the console
*
*/
class ConsoleListener {
/**
* Any associated data that a given logging listener may choose to log or ignore
*
* @param entry The information to be logged
*/
log(entry) {
const msg = this.format(entry);
switch (entry.level) {
case 0 /* Verbose */:
case 1 /* Info */:
console.log(msg);
break;
case 2 /* Warning */:
console.warn(msg);
break;
case 3 /* Error */:
console.error(msg);
break;
}
}
/**
* Formats the message
*
* @param entry The information to format into a string
*/
format(entry) {
const msg = [];
msg.push("Message: " + entry.message);
if (typeof entry.data !== "undefined") {
msg.push(" Data: " + JSON.stringify(entry.data));
}
return msg.join("");
}
}
/**
* Implementation of LogListener which logs to the supplied function
*
*/
class FunctionListener {
/**
* Creates a new instance of the FunctionListener class
*
* @constructor
* @param method The method to which any logging data will be passed
*/
constructor(method) {
this.method = method;
}
/**
* Any associated data that a given logging listener may choose to log or ignore
*
* @param entry The information to be logged
*/
log(entry) {
this.method(entry);
}
}
export { Logger, ConsoleListener, FunctionListener };
//# sourceMappingURL=logging.js.map
| LoggerImpl | identifier_name |
logging.js | /**
@license
* @pnp/logging v1.0.3 - pnp - light-weight, subscribable logging framework
* MIT (https://github.com/pnp/pnp/blob/master/LICENSE)
* Copyright (c) 2018 Microsoft
* docs: http://officedev.github.io/PnP-JS-Core
* source: https://github.com/pnp/pnp
* bugs: https://github.com/pnp/pnp/issues
*/
/**
* Class used to subscribe ILogListener and log messages throughout an application
*
*/
class Logger {
/**
* Gets or sets the active log level to apply for log filtering
*/
static get activeLogLevel() {
return Logger.instance.activeLogLevel;
}
static set activeLogLevel(value) {
Logger.instance.activeLogLevel = value;
}
static get instance() {
if (typeof Logger._instance === "undefined" || Logger._instance === null) |
return Logger._instance;
}
/**
* Adds ILogListener instances to the set of subscribed listeners
*
* @param listeners One or more listeners to subscribe to this log
*/
static subscribe(...listeners) {
listeners.map(listener => Logger.instance.subscribe(listener));
}
/**
* Clears the subscribers collection, returning the collection before modifiction
*/
static clearSubscribers() {
return Logger.instance.clearSubscribers();
}
/**
* Gets the current subscriber count
*/
static get count() {
return Logger.instance.count;
}
/**
* Writes the supplied string to the subscribed listeners
*
* @param message The message to write
* @param level [Optional] if supplied will be used as the level of the entry (Default: LogLevel.Verbose)
*/
static write(message, level = 0 /* Verbose */) {
Logger.instance.log({ level: level, message: message });
}
/**
* Writes the supplied string to the subscribed listeners
*
* @param json The json object to stringify and write
* @param level [Optional] if supplied will be used as the level of the entry (Default: LogLevel.Verbose)
*/
static writeJSON(json, level = 0 /* Verbose */) {
Logger.instance.log({ level: level, message: JSON.stringify(json) });
}
/**
* Logs the supplied entry to the subscribed listeners
*
* @param entry The message to log
*/
static log(entry) {
Logger.instance.log(entry);
}
/**
* Logs an error object to the subscribed listeners
*
* @param err The error object
*/
static error(err) {
Logger.instance.log({ data: err, level: 3 /* Error */, message: err.message });
}
}
class LoggerImpl {
constructor(activeLogLevel = 2 /* Warning */, subscribers = []) {
this.activeLogLevel = activeLogLevel;
this.subscribers = subscribers;
}
subscribe(listener) {
this.subscribers.push(listener);
}
clearSubscribers() {
const s = this.subscribers.slice(0);
this.subscribers.length = 0;
return s;
}
get count() {
return this.subscribers.length;
}
write(message, level = 0 /* Verbose */) {
this.log({ level: level, message: message });
}
log(entry) {
if (typeof entry !== "undefined" && this.activeLogLevel <= entry.level) {
this.subscribers.map(subscriber => subscriber.log(entry));
}
}
}
/**
* Implementation of LogListener which logs to the console
*
*/
class ConsoleListener {
/**
* Any associated data that a given logging listener may choose to log or ignore
*
* @param entry The information to be logged
*/
log(entry) {
const msg = this.format(entry);
switch (entry.level) {
case 0 /* Verbose */:
case 1 /* Info */:
console.log(msg);
break;
case 2 /* Warning */:
console.warn(msg);
break;
case 3 /* Error */:
console.error(msg);
break;
}
}
/**
* Formats the message
*
* @param entry The information to format into a string
*/
format(entry) {
const msg = [];
msg.push("Message: " + entry.message);
if (typeof entry.data !== "undefined") {
msg.push(" Data: " + JSON.stringify(entry.data));
}
return msg.join("");
}
}
/**
* Implementation of LogListener which logs to the supplied function
*
*/
class FunctionListener {
/**
* Creates a new instance of the FunctionListener class
*
* @constructor
* @param method The method to which any logging data will be passed
*/
constructor(method) {
this.method = method;
}
/**
* Any associated data that a given logging listener may choose to log or ignore
*
* @param entry The information to be logged
*/
log(entry) {
this.method(entry);
}
}
export { Logger, ConsoleListener, FunctionListener };
//# sourceMappingURL=logging.js.map
| {
Logger._instance = new LoggerImpl();
} | conditional_block |
config_handler.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use crate::CoreError;
use directories::ProjectDirs;
use quic_p2p::Config as QuicP2pConfig;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[cfg(test)]
use std::fs;
use std::{
ffi::OsStr,
fs::File,
io::{self, BufReader},
path::PathBuf,
sync::Mutex,
};
const CONFIG_DIR_QUALIFIER: &str = "net";
const CONFIG_DIR_ORGANISATION: &str = "MaidSafe";
const CONFIG_DIR_APPLICATION: &str = "safe_core";
const CONFIG_FILE: &str = "safe_core.config";
const VAULT_CONFIG_DIR_APPLICATION: &str = "safe_vault";
const VAULT_CONNECTION_INFO_FILE: &str = "vault_connection_info.config";
lazy_static! {
static ref CONFIG_DIR_PATH: Mutex<Option<PathBuf>> = Mutex::new(None);
static ref DEFAULT_SAFE_CORE_PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from(
CONFIG_DIR_QUALIFIER,
CONFIG_DIR_ORGANISATION,
CONFIG_DIR_APPLICATION,
);
static ref DEFAULT_VAULT_PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from(
CONFIG_DIR_QUALIFIER,
CONFIG_DIR_ORGANISATION,
VAULT_CONFIG_DIR_APPLICATION,
);
}
/// Set a custom path for the config files.
// `OsStr` is platform-native.
pub fn set_config_dir_path<P: AsRef<OsStr> + ?Sized>(path: &P) {
*unwrap!(CONFIG_DIR_PATH.lock()) = Some(From::from(path));
}
/// Configuration for safe-core.
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct Config {
/// QuicP2p options.
pub quic_p2p: QuicP2pConfig,
/// Developer options.
pub dev: Option<DevConfig>,
}
#[cfg(any(target_os = "android", target_os = "androideabi", target_os = "ios"))]
fn check_config_path_set() -> Result<(), CoreError> {
if unwrap!(CONFIG_DIR_PATH.lock()).is_none() {
Err(CoreError::QuicP2p(quic_p2p::Error::Configuration(
"Boostrap cache directory not set".to_string(),
)))
} else {
Ok(())
}
}
impl Config {
/// Returns a new `Config` instance. Tries to read quic-p2p config from file.
pub fn new() -> Self {
let quic_p2p = Self::read_qp2p_from_file().unwrap_or_default();
Self {
quic_p2p,
dev: None,
}
}
fn read_qp2p_from_file() -> Result<QuicP2pConfig, CoreError> |
}
/// Extra configuration options intended for developers.
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct DevConfig {
/// Switch off mutations limit in mock-vault.
pub mock_unlimited_coins: bool,
/// Use memory store instead of file store in mock-vault.
pub mock_in_memory_storage: bool,
/// Set the mock-vault path if using file store (`mock_in_memory_storage` is `false`).
pub mock_vault_path: Option<String>,
}
/// Reads the `safe_core` config file and returns it or a default if this fails.
pub fn get_config() -> Config {
Config::new()
}
/// Returns the directory from which the config files are read
pub fn config_dir() -> Result<PathBuf, CoreError> {
Ok(dirs()?.config_dir().to_path_buf())
}
fn dirs() -> Result<ProjectDirs, CoreError> {
let project_dirs = if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
ProjectDirs::from_path(custom_path)
} else {
DEFAULT_SAFE_CORE_PROJECT_DIRS.clone()
};
project_dirs.ok_or_else(|| CoreError::from("Cannot determine project directory paths"))
}
fn vault_dirs() -> Result<ProjectDirs, CoreError> {
let project_dirs = if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
ProjectDirs::from_path(custom_path)
} else {
DEFAULT_VAULT_PROJECT_DIRS.clone()
};
project_dirs.ok_or_else(|| CoreError::from("Cannot determine vault directory paths"))
}
fn read_config_file<T>(dirs: ProjectDirs, file: &str) -> Result<T, CoreError>
where
T: DeserializeOwned,
{
let path = dirs.config_dir().join(file);
let file = match File::open(&path) {
Ok(file) => {
trace!("Reading: {}", path.display());
file
}
Err(error) => {
trace!("Not available: {}", path.display());
return Err(error.into());
}
};
let reader = BufReader::new(file);
serde_json::from_reader(reader).map_err(|err| {
info!("Could not parse: {} ({:?})", err, err);
err.into()
})
}
/// Writes a `safe_core` config file **for use by tests and examples**.
///
/// N.B. This method should only be used as a utility for test and examples. In normal use cases,
/// the config file should be created by the Vault's installer.
#[cfg(test)]
pub fn write_config_file(config: &Config) -> Result<PathBuf, CoreError> {
let dir = config_dir()?;
fs::create_dir_all(dir.clone())?;
let path = dir.join(CONFIG_FILE);
dbg!(&path);
let mut file = File::create(&path)?;
serde_json::to_writer_pretty(&mut file, config)?;
file.sync_all()?;
Ok(path)
}
#[cfg(all(test, feature = "mock-network"))]
mod test {
use super::*;
use std::env::temp_dir;
// 1. Write the default config file to temp directory.
// 2. Set the temp directory as the custom config directory path.
// 3. Assert that `Config::new()` reads the default config written to disk.
// 4. Verify that `Config::new()` generates the correct default config.
// The default config will have the custom config path in the
// `boostrap_cache_dir` field and `our_type` will be set to `Client`
#[test]
fn custom_config_path() {
let path = temp_dir();
let temp_dir_path = path.clone();
set_config_dir_path(&path);
// In the default config, `our_type` will be set to Node.
let config: Config = Default::default();
unwrap!(write_config_file(&config));
let read_cfg = Config::new();
assert_eq!(config, read_cfg);
let mut path = unwrap!(ProjectDirs::from_path(temp_dir_path.clone()))
.config_dir()
.to_path_buf();
path.push(CONFIG_FILE);
unwrap!(std::fs::remove_file(path));
// In the absence of a config file, the config handler
// should initialize the `our_type` field to Client.
let config = Config::new();
let expected_config = Config {
quic_p2p: QuicP2pConfig {
our_type: quic_p2p::OurType::Client,
bootstrap_cache_dir: Some(unwrap!(temp_dir_path.into_os_string().into_string())),
..Default::default()
},
..Default::default()
};
assert_eq!(config, expected_config);
}
}
| {
// First we read the default configuration file, and use a slightly modified default config
// if there is none.
let mut config: QuicP2pConfig = {
match read_config_file(dirs()?, CONFIG_FILE) {
Err(CoreError::IoError(ref err)) if err.kind() == io::ErrorKind::NotFound => {
// Bootstrap cache dir must be set on mobile platforms
// using set_config_dir_path
#[cfg(any(
target_os = "android",
target_os = "androideabi",
target_os = "ios"
))]
check_config_path_set()?;
let custom_dir =
if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
Some(custom_path.into_os_string().into_string().map_err(|_| {
CoreError::from("Config path is not a valid UTF-8 string")
})?)
} else {
None
};
// If there is no config file, assume we are a client
QuicP2pConfig {
our_type: quic_p2p::OurType::Client,
bootstrap_cache_dir: custom_dir,
..Default::default()
}
}
result => result?,
}
};
// Then if there is a locally running Vault we add it to the list of know contacts.
if let Ok(node_info) = read_config_file(vault_dirs()?, VAULT_CONNECTION_INFO_FILE) {
let _ = config.hard_coded_contacts.insert(node_info);
}
Ok(config)
} | identifier_body |
config_handler.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use crate::CoreError;
use directories::ProjectDirs;
use quic_p2p::Config as QuicP2pConfig;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[cfg(test)]
use std::fs;
use std::{
ffi::OsStr,
fs::File,
io::{self, BufReader},
path::PathBuf,
sync::Mutex,
};
const CONFIG_DIR_QUALIFIER: &str = "net";
const CONFIG_DIR_ORGANISATION: &str = "MaidSafe";
const CONFIG_DIR_APPLICATION: &str = "safe_core";
const CONFIG_FILE: &str = "safe_core.config";
const VAULT_CONFIG_DIR_APPLICATION: &str = "safe_vault";
const VAULT_CONNECTION_INFO_FILE: &str = "vault_connection_info.config";
lazy_static! {
static ref CONFIG_DIR_PATH: Mutex<Option<PathBuf>> = Mutex::new(None);
static ref DEFAULT_SAFE_CORE_PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from(
CONFIG_DIR_QUALIFIER,
CONFIG_DIR_ORGANISATION,
CONFIG_DIR_APPLICATION,
);
static ref DEFAULT_VAULT_PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from(
CONFIG_DIR_QUALIFIER,
CONFIG_DIR_ORGANISATION,
VAULT_CONFIG_DIR_APPLICATION,
);
}
/// Set a custom path for the config files.
// `OsStr` is platform-native.
pub fn set_config_dir_path<P: AsRef<OsStr> + ?Sized>(path: &P) {
*unwrap!(CONFIG_DIR_PATH.lock()) = Some(From::from(path));
}
/// Configuration for safe-core.
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct Config {
/// QuicP2p options.
pub quic_p2p: QuicP2pConfig,
/// Developer options.
pub dev: Option<DevConfig>,
}
#[cfg(any(target_os = "android", target_os = "androideabi", target_os = "ios"))]
fn check_config_path_set() -> Result<(), CoreError> {
if unwrap!(CONFIG_DIR_PATH.lock()).is_none() {
Err(CoreError::QuicP2p(quic_p2p::Error::Configuration(
"Boostrap cache directory not set".to_string(),
)))
} else {
Ok(())
}
}
impl Config {
/// Returns a new `Config` instance. Tries to read quic-p2p config from file.
pub fn new() -> Self {
let quic_p2p = Self::read_qp2p_from_file().unwrap_or_default();
Self {
quic_p2p,
dev: None,
}
}
fn read_qp2p_from_file() -> Result<QuicP2pConfig, CoreError> {
// First we read the default configuration file, and use a slightly modified default config
// if there is none.
let mut config: QuicP2pConfig = {
match read_config_file(dirs()?, CONFIG_FILE) {
Err(CoreError::IoError(ref err)) if err.kind() == io::ErrorKind::NotFound => {
// Bootstrap cache dir must be set on mobile platforms
// using set_config_dir_path
#[cfg(any(
target_os = "android",
target_os = "androideabi",
target_os = "ios"
))]
check_config_path_set()?;
let custom_dir =
if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
Some(custom_path.into_os_string().into_string().map_err(|_| {
CoreError::from("Config path is not a valid UTF-8 string")
})?)
} else {
None
};
// If there is no config file, assume we are a client
QuicP2pConfig {
our_type: quic_p2p::OurType::Client,
bootstrap_cache_dir: custom_dir,
..Default::default()
}
}
result => result?,
}
};
// Then if there is a locally running Vault we add it to the list of know contacts.
if let Ok(node_info) = read_config_file(vault_dirs()?, VAULT_CONNECTION_INFO_FILE) {
let _ = config.hard_coded_contacts.insert(node_info);
}
Ok(config)
}
}
/// Extra configuration options intended for developers.
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct DevConfig {
/// Switch off mutations limit in mock-vault.
pub mock_unlimited_coins: bool,
/// Use memory store instead of file store in mock-vault.
pub mock_in_memory_storage: bool,
/// Set the mock-vault path if using file store (`mock_in_memory_storage` is `false`).
pub mock_vault_path: Option<String>,
}
/// Reads the `safe_core` config file and returns it or a default if this fails.
pub fn get_config() -> Config {
Config::new()
}
/// Returns the directory from which the config files are read
pub fn config_dir() -> Result<PathBuf, CoreError> {
Ok(dirs()?.config_dir().to_path_buf())
}
fn dirs() -> Result<ProjectDirs, CoreError> {
let project_dirs = if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
ProjectDirs::from_path(custom_path)
} else {
DEFAULT_SAFE_CORE_PROJECT_DIRS.clone()
};
project_dirs.ok_or_else(|| CoreError::from("Cannot determine project directory paths"))
}
fn vault_dirs() -> Result<ProjectDirs, CoreError> {
let project_dirs = if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
ProjectDirs::from_path(custom_path)
} else {
DEFAULT_VAULT_PROJECT_DIRS.clone()
};
project_dirs.ok_or_else(|| CoreError::from("Cannot determine vault directory paths"))
}
fn read_config_file<T>(dirs: ProjectDirs, file: &str) -> Result<T, CoreError>
where
T: DeserializeOwned,
{
let path = dirs.config_dir().join(file);
let file = match File::open(&path) {
Ok(file) => |
Err(error) => {
trace!("Not available: {}", path.display());
return Err(error.into());
}
};
let reader = BufReader::new(file);
serde_json::from_reader(reader).map_err(|err| {
info!("Could not parse: {} ({:?})", err, err);
err.into()
})
}
/// Writes a `safe_core` config file **for use by tests and examples**.
///
/// N.B. This method should only be used as a utility for test and examples. In normal use cases,
/// the config file should be created by the Vault's installer.
#[cfg(test)]
pub fn write_config_file(config: &Config) -> Result<PathBuf, CoreError> {
let dir = config_dir()?;
fs::create_dir_all(dir.clone())?;
let path = dir.join(CONFIG_FILE);
dbg!(&path);
let mut file = File::create(&path)?;
serde_json::to_writer_pretty(&mut file, config)?;
file.sync_all()?;
Ok(path)
}
#[cfg(all(test, feature = "mock-network"))]
mod test {
use super::*;
use std::env::temp_dir;
// 1. Write the default config file to temp directory.
// 2. Set the temp directory as the custom config directory path.
// 3. Assert that `Config::new()` reads the default config written to disk.
// 4. Verify that `Config::new()` generates the correct default config.
// The default config will have the custom config path in the
// `boostrap_cache_dir` field and `our_type` will be set to `Client`
#[test]
fn custom_config_path() {
let path = temp_dir();
let temp_dir_path = path.clone();
set_config_dir_path(&path);
// In the default config, `our_type` will be set to Node.
let config: Config = Default::default();
unwrap!(write_config_file(&config));
let read_cfg = Config::new();
assert_eq!(config, read_cfg);
let mut path = unwrap!(ProjectDirs::from_path(temp_dir_path.clone()))
.config_dir()
.to_path_buf();
path.push(CONFIG_FILE);
unwrap!(std::fs::remove_file(path));
// In the absence of a config file, the config handler
// should initialize the `our_type` field to Client.
let config = Config::new();
let expected_config = Config {
quic_p2p: QuicP2pConfig {
our_type: quic_p2p::OurType::Client,
bootstrap_cache_dir: Some(unwrap!(temp_dir_path.into_os_string().into_string())),
..Default::default()
},
..Default::default()
};
assert_eq!(config, expected_config);
}
}
| {
trace!("Reading: {}", path.display());
file
} | conditional_block |
config_handler.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use crate::CoreError;
use directories::ProjectDirs;
use quic_p2p::Config as QuicP2pConfig;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[cfg(test)]
use std::fs;
use std::{
ffi::OsStr,
fs::File,
io::{self, BufReader},
path::PathBuf,
sync::Mutex,
};
const CONFIG_DIR_QUALIFIER: &str = "net";
const CONFIG_DIR_ORGANISATION: &str = "MaidSafe";
const CONFIG_DIR_APPLICATION: &str = "safe_core";
const CONFIG_FILE: &str = "safe_core.config";
const VAULT_CONFIG_DIR_APPLICATION: &str = "safe_vault";
const VAULT_CONNECTION_INFO_FILE: &str = "vault_connection_info.config";
lazy_static! {
static ref CONFIG_DIR_PATH: Mutex<Option<PathBuf>> = Mutex::new(None);
static ref DEFAULT_SAFE_CORE_PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from(
CONFIG_DIR_QUALIFIER,
CONFIG_DIR_ORGANISATION,
CONFIG_DIR_APPLICATION,
);
static ref DEFAULT_VAULT_PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from(
CONFIG_DIR_QUALIFIER,
CONFIG_DIR_ORGANISATION,
VAULT_CONFIG_DIR_APPLICATION,
);
}
/// Set a custom path for the config files.
// `OsStr` is platform-native.
pub fn set_config_dir_path<P: AsRef<OsStr> + ?Sized>(path: &P) {
*unwrap!(CONFIG_DIR_PATH.lock()) = Some(From::from(path));
}
/// Configuration for safe-core.
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct | {
/// QuicP2p options.
pub quic_p2p: QuicP2pConfig,
/// Developer options.
pub dev: Option<DevConfig>,
}
#[cfg(any(target_os = "android", target_os = "androideabi", target_os = "ios"))]
fn check_config_path_set() -> Result<(), CoreError> {
if unwrap!(CONFIG_DIR_PATH.lock()).is_none() {
Err(CoreError::QuicP2p(quic_p2p::Error::Configuration(
"Boostrap cache directory not set".to_string(),
)))
} else {
Ok(())
}
}
impl Config {
/// Returns a new `Config` instance. Tries to read quic-p2p config from file.
pub fn new() -> Self {
let quic_p2p = Self::read_qp2p_from_file().unwrap_or_default();
Self {
quic_p2p,
dev: None,
}
}
fn read_qp2p_from_file() -> Result<QuicP2pConfig, CoreError> {
// First we read the default configuration file, and use a slightly modified default config
// if there is none.
let mut config: QuicP2pConfig = {
match read_config_file(dirs()?, CONFIG_FILE) {
Err(CoreError::IoError(ref err)) if err.kind() == io::ErrorKind::NotFound => {
// Bootstrap cache dir must be set on mobile platforms
// using set_config_dir_path
#[cfg(any(
target_os = "android",
target_os = "androideabi",
target_os = "ios"
))]
check_config_path_set()?;
let custom_dir =
if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
Some(custom_path.into_os_string().into_string().map_err(|_| {
CoreError::from("Config path is not a valid UTF-8 string")
})?)
} else {
None
};
// If there is no config file, assume we are a client
QuicP2pConfig {
our_type: quic_p2p::OurType::Client,
bootstrap_cache_dir: custom_dir,
..Default::default()
}
}
result => result?,
}
};
// Then if there is a locally running Vault we add it to the list of know contacts.
if let Ok(node_info) = read_config_file(vault_dirs()?, VAULT_CONNECTION_INFO_FILE) {
let _ = config.hard_coded_contacts.insert(node_info);
}
Ok(config)
}
}
/// Extra configuration options intended for developers.
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct DevConfig {
/// Switch off mutations limit in mock-vault.
pub mock_unlimited_coins: bool,
/// Use memory store instead of file store in mock-vault.
pub mock_in_memory_storage: bool,
/// Set the mock-vault path if using file store (`mock_in_memory_storage` is `false`).
pub mock_vault_path: Option<String>,
}
/// Reads the `safe_core` config file and returns it or a default if this fails.
pub fn get_config() -> Config {
Config::new()
}
/// Returns the directory from which the config files are read
pub fn config_dir() -> Result<PathBuf, CoreError> {
Ok(dirs()?.config_dir().to_path_buf())
}
fn dirs() -> Result<ProjectDirs, CoreError> {
let project_dirs = if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
ProjectDirs::from_path(custom_path)
} else {
DEFAULT_SAFE_CORE_PROJECT_DIRS.clone()
};
project_dirs.ok_or_else(|| CoreError::from("Cannot determine project directory paths"))
}
fn vault_dirs() -> Result<ProjectDirs, CoreError> {
let project_dirs = if let Some(custom_path) = unwrap!(CONFIG_DIR_PATH.lock()).clone() {
ProjectDirs::from_path(custom_path)
} else {
DEFAULT_VAULT_PROJECT_DIRS.clone()
};
project_dirs.ok_or_else(|| CoreError::from("Cannot determine vault directory paths"))
}
fn read_config_file<T>(dirs: ProjectDirs, file: &str) -> Result<T, CoreError>
where
T: DeserializeOwned,
{
let path = dirs.config_dir().join(file);
let file = match File::open(&path) {
Ok(file) => {
trace!("Reading: {}", path.display());
file
}
Err(error) => {
trace!("Not available: {}", path.display());
return Err(error.into());
}
};
let reader = BufReader::new(file);
serde_json::from_reader(reader).map_err(|err| {
info!("Could not parse: {} ({:?})", err, err);
err.into()
})
}
/// Writes a `safe_core` config file **for use by tests and examples**.
///
/// N.B. This method should only be used as a utility for test and examples. In normal use cases,
/// the config file should be created by the Vault's installer.
#[cfg(test)]
pub fn write_config_file(config: &Config) -> Result<PathBuf, CoreError> {
let dir = config_dir()?;
fs::create_dir_all(dir.clone())?;
let path = dir.join(CONFIG_FILE);
dbg!(&path);
let mut file = File::create(&path)?;
serde_json::to_writer_pretty(&mut file, config)?;
file.sync_all()?;
Ok(path)
}
#[cfg(all(test, feature = "mock-network"))]
mod test {
use super::*;
use std::env::temp_dir;
// 1. Write the default config file to temp directory.
// 2. Set the temp directory as the custom config directory path.
// 3. Assert that `Config::new()` reads the default config written to disk.
// 4. Verify that `Config::new()` generates the correct default config.
// The default config will have the custom config path in the
// `boostrap_cache_dir` field and `our_type` will be set to `Client`
#[test]
fn custom_config_path() {
let path = temp_dir();
let temp_dir_path = path.clone();
set_config_dir_path(&path);
// In the default config, `our_type` will be set to Node.
let config: Config = Default::default();
unwrap!(write_config_file(&config));
let read_cfg = Config::new();
assert_eq!(config, read_cfg);
let mut path = unwrap!(ProjectDirs::from_path(temp_dir_path.clone()))
.config_dir()
.to_path_buf();
path.push(CONFIG_FILE);
unwrap!(std::fs::remove_file(path));
// In the absence of a config file, the config handler
// should initialize the `our_type` field to Client.
let config = Config::new();
let expected_config = Config {
quic_p2p: QuicP2pConfig {
our_type: quic_p2p::OurType::Client,
bootstrap_cache_dir: Some(unwrap!(temp_dir_path.into_os_string().into_string())),
..Default::default()
},
..Default::default()
};
assert_eq!(config, expected_config);
}
}
| Config | identifier_name |
app.js | (function(){
'use strict';
| }
);
var genres = ['fable', 'fantasy', 'fiction', 'folklore', 'horror', 'humor', 'legend', 'metafiction', 'mystery', 'mythology', 'non-fiction', 'poetry']
var books = [
{
title: 'A Game of Thrones: A Song of Ice and Fire',
author: 'George R.R. Martin',
isbn: '0553593714',
review: 'The most inventive and entertaining fantasy saga of our time—warrants one hell of an introduction. I loved this book!',
rating: 4,
genres: { 'non-fiction': true, fantasy: true }
},{
title: 'HTML for Babies',
author: 'John C Vanden-Heuvel Sr',
isbn: '0615487661',
review: "It's never too early to be standards compliant! I taught my little one mark-up in under one hour!",
rating: 5,
genres: { fiction: true }
},{
title: 'A is for Array',
author: 'Brandon J Hansen',
isbn: '1489522212',
review: 'A is for Array is the ABC book for future programmers. Filled with fun illustrations and simple real-world examples, my children loved seeing my world intertwined with theirs!',
rating: 4,
genres: { fiction: true }
},{
title: 'The Dragon Reborn',
author: 'Robert Jordan',
isbn: '0812513711',
review: 'The Wheel weaves as the Wheel wills, and we are only the thread of the Pattern. Moiraine',
rating: 4,
genres: { 'non-fiction': true, fantasy: true }
}
];
})(); | var app = angular.module('readingList', []);
app.controller = ('ReadingListController', function(){
this.books = books;
this.genres = genres; | random_line_split |
TestFastLength.rs | /*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
float __attribute__((kernel)) testFastLengthFloatFloat(float inV) {
return fast_length(inV);
}
| float __attribute__((kernel)) testFastLengthFloat2Float(float2 inV) {
return fast_length(inV);
}
float __attribute__((kernel)) testFastLengthFloat3Float(float3 inV) {
return fast_length(inV);
}
float __attribute__((kernel)) testFastLengthFloat4Float(float4 inV) {
return fast_length(inV);
} | random_line_split | |
functions_e.js | var searchData=
[
['randomdouble',['randomDouble',['../namespace_num_utils.html#a402b26db626888d32e79bfab0e6ba5c2',1,'NumUtils']]],
['randomint',['randomInt',['../namespace_num_utils.html#abb2bad9628db7cd63498680a6f84e13c',1,'NumUtils']]],
['read_5fkernels_5ffrom_5ffile',['read_kernels_from_file',['../class_c_l_helper.html#a14ace996406fabec7232dc354b0aee27',1,'CLHelper']]],
['readasstring',['readAsString',['../class_file_handle.html#a9ba334d8e171983edc20759217bb8558',1,'FileHandle']]],
['readasvector',['readAsVector',['../class_file_handle.html#a941b1e265de1520cb7e1ac26ece6d99b',1,'FileHandle']]],
['readfullfile',['readFullFile',['../namespace_f_s_utils.html#a716aadc6305897ef4710c2f261db73c9',1,'FSUtils']]],
['readlinebyline',['readLineByLine',['../namespace_f_s_utils.html#aa911eba466bd3ba571e86dc3ee024a97',1,'FSUtils']]], | ['removecharfromstr',['removeCharFromStr',['../namespace_str_utils.html#a1d9112fe1be09aeafa3539fb771dd7c8',1,'StrUtils']]],
['resumetimer',['resumeTimer',['../class_timer.html#ade1f50ac610d5fe49228f4e6dc3d4774',1,'Timer']]]
]; | random_line_split | |
cron_servlet.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
|
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.api_models])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached for normal instances.
if False: # should be "not IsDevServer()":
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider)
| _cronlog.error(error_message('response status %s' % response.status))
failure_count += 1 | conditional_block |
cron_servlet.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def | (self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.api_models])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached for normal instances.
if False: # should be "not IsDevServer()":
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider)
| error | identifier_name |
cron_servlet.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
|
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.api_models])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached for normal instances.
if False: # should be "not IsDevServer()":
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider)
| try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush() | identifier_body |
cron_servlet.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
| # ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.api_models])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached for normal instances.
if False: # should be "not IsDevServer()":
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider) | # This is returned every time RenderServlet wants to create a new | random_line_split |
disttest.py | # Copyright (c) 2014 Christian Schmitz <tynn.dev@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This is the disttest python module. It is a fully functional replacement for
the distutils.core module. Only the setup function is extended to support a
test_suite keyword argument and to expose a test command.
Just replace all imports from distutils.core with the respective imports
from disttest.
Pull the gist to include the module:
git remote add disttest https://gist.github.com/9779576.git
git pull disttest master
"""
import importlib, inspect, os.path, sys, unittest
from distutils.core import *
def _import_module (module) :
if '/' in module :
if os.path.isabs(module) :
raise ImportError("Import by absolute filename is not supported")
module = os.path.splitext(module)[0]
if '.' in module :
raise ImportError("Import by filenames with '.' is not supported")
if module[-1] == '/' :
module = module[:-1]
module = module.replace(os.path.sep, '.')
return importlib.import_module(module)
def _load_source_imp (name, pathname) :
import imp
imp.acquire_lock()
try : return imp.load_compiled(name, pathname)
except : return imp.load_source(name, pathname)
finally : imp.release_lock()
def _load_source (path) :
pathname = os.path.abspath(path)
name = 'disttest._' + os.path.splitext(os.path.split(pathname)[1])[0]
try :
try : from importlib.machinery import SourceFileLoader
except : return _load_source_imp(name, pathname)
return SourceFileLoader(name, pathname).load_module()
except : raise ImportError("No module named {}".format(path))
class test (Command) :
description = "perform unit tests on extensions"
user_options = [
('test-suite=', 't', "test suite module to run"),
('check', 'c', "run check command"),
('skip-build', None, "skip the build steps"),
('build-dir=', None, "build directory (implies --skip-build)"),
]
boolean_options = ['check', 'skip-build']
def initialize_options (self) :
self.build_dir = None
self.check = None
self.skip_build = None
self.test_suite = None
def finalize_options (self) :
self.set_unittest_options()
if not self.test_suite and hasattr(self.distribution, 'test_suite') :
self.test_suite = getattr(self.distribution, 'test_suite')
if self.build_dir :
self.skip_build = True
self.set_undefined_options('build', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
def set_unittest_options (self) :
self.loader = unittest.defaultTestLoader
self.suiteType = unittest.TestSuite
def run (self) :
if self.check :
self.run_command('check')
if not self.skip_build :
self.run_command('build')
if self.test_suite :
self.run_tests()
def run_tests (self) :
sys.path.insert(0, self.build_dir)
try : tests = self.suiteType(self.load_tests(self.test_suite))
except ValueError :
tests = self.suiteType()
for _tests in self.test_suite :
|
if not self.dry_run :
unittest.TextTestRunner().run(tests)
sys.path.remove(self.build_dir)
def load_tests (self, tests) :
if isinstance(tests, (unittest.TestCase, unittest.TestSuite)) :
return tests
if isinstance(tests, str) :
try : tests = _import_module(tests)
except : tests = _load_source(tests)
if inspect.ismodule(tests) :
return self.loader.loadTestsFromModule(tests)
raise ValueError('Not a test suite: {}'.format(tests))
def _extend_setup (_setup = setup, _Distribution = Distribution) :
"""
Extend the default or any other compatible distutils setup function.
The Distribution class used with setup must be supplied, if setup
does not distutils.dist.Distribution as default.
"""
def setup (**attrs) :
"""Extended setup function"""
if 'cmdclass' not in attrs :
attrs['cmdclass'] = {'test': test}
elif 'test' not in attrs['cmdclass'] :
attrs['cmdclass']['test'] = test
if 'distclass' not in attrs :
attrs['distclass'] = type('Distribution', (_Distribution, object), {"test_suite": None})
elif not hasattr(attrs['distclass'], 'test_suite') :
attrs['distclass'] = type('Distribution', (attrs['distclass'], object), {"test_suite": None})
return _setup(**attrs)
return setup
setup = _extend_setup()
setup_keywords = setup_keywords + ('test_suite',)
| tests.addTest(self.load_tests(_tests)) | conditional_block |
disttest.py | # Copyright (c) 2014 Christian Schmitz <tynn.dev@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This is the disttest python module. It is a fully functional replacement for
the distutils.core module. Only the setup function is extended to support a
test_suite keyword argument and to expose a test command.
Just replace all imports from distutils.core with the respective imports
from disttest.
Pull the gist to include the module:
git remote add disttest https://gist.github.com/9779576.git
git pull disttest master
"""
import importlib, inspect, os.path, sys, unittest
from distutils.core import *
def _import_module (module) :
if '/' in module :
if os.path.isabs(module) :
raise ImportError("Import by absolute filename is not supported")
module = os.path.splitext(module)[0]
if '.' in module :
raise ImportError("Import by filenames with '.' is not supported")
if module[-1] == '/' :
module = module[:-1]
module = module.replace(os.path.sep, '.')
return importlib.import_module(module)
def _load_source_imp (name, pathname) :
import imp
imp.acquire_lock()
try : return imp.load_compiled(name, pathname)
except : return imp.load_source(name, pathname)
finally : imp.release_lock()
def _load_source (path) :
pathname = os.path.abspath(path)
name = 'disttest._' + os.path.splitext(os.path.split(pathname)[1])[0]
try :
try : from importlib.machinery import SourceFileLoader
except : return _load_source_imp(name, pathname)
return SourceFileLoader(name, pathname).load_module()
except : raise ImportError("No module named {}".format(path))
class test (Command) :
description = "perform unit tests on extensions"
user_options = [
('test-suite=', 't', "test suite module to run"),
('check', 'c', "run check command"),
('skip-build', None, "skip the build steps"),
('build-dir=', None, "build directory (implies --skip-build)"),
]
boolean_options = ['check', 'skip-build']
def initialize_options (self) :
self.build_dir = None
self.check = None
self.skip_build = None
self.test_suite = None
def finalize_options (self) :
self.set_unittest_options()
if not self.test_suite and hasattr(self.distribution, 'test_suite') :
self.test_suite = getattr(self.distribution, 'test_suite')
if self.build_dir :
self.skip_build = True
self.set_undefined_options('build', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
def set_unittest_options (self) :
self.loader = unittest.defaultTestLoader
self.suiteType = unittest.TestSuite
def run (self) :
if self.check :
self.run_command('check')
if not self.skip_build :
self.run_command('build')
if self.test_suite :
self.run_tests()
def run_tests (self) :
sys.path.insert(0, self.build_dir)
try : tests = self.suiteType(self.load_tests(self.test_suite))
except ValueError :
tests = self.suiteType()
for _tests in self.test_suite :
tests.addTest(self.load_tests(_tests))
if not self.dry_run :
unittest.TextTestRunner().run(tests)
sys.path.remove(self.build_dir)
def load_tests (self, tests) :
|
def _extend_setup (_setup = setup, _Distribution = Distribution) :
"""
Extend the default or any other compatible distutils setup function.
The Distribution class used with setup must be supplied, if setup
does not distutils.dist.Distribution as default.
"""
def setup (**attrs) :
"""Extended setup function"""
if 'cmdclass' not in attrs :
attrs['cmdclass'] = {'test': test}
elif 'test' not in attrs['cmdclass'] :
attrs['cmdclass']['test'] = test
if 'distclass' not in attrs :
attrs['distclass'] = type('Distribution', (_Distribution, object), {"test_suite": None})
elif not hasattr(attrs['distclass'], 'test_suite') :
attrs['distclass'] = type('Distribution', (attrs['distclass'], object), {"test_suite": None})
return _setup(**attrs)
return setup
setup = _extend_setup()
setup_keywords = setup_keywords + ('test_suite',)
| if isinstance(tests, (unittest.TestCase, unittest.TestSuite)) :
return tests
if isinstance(tests, str) :
try : tests = _import_module(tests)
except : tests = _load_source(tests)
if inspect.ismodule(tests) :
return self.loader.loadTestsFromModule(tests)
raise ValueError('Not a test suite: {}'.format(tests)) | identifier_body |
disttest.py | # Copyright (c) 2014 Christian Schmitz <tynn.dev@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This is the disttest python module. It is a fully functional replacement for
the distutils.core module. Only the setup function is extended to support a
test_suite keyword argument and to expose a test command.
Just replace all imports from distutils.core with the respective imports
from disttest.
Pull the gist to include the module:
git remote add disttest https://gist.github.com/9779576.git
git pull disttest master
"""
import importlib, inspect, os.path, sys, unittest
from distutils.core import *
def _import_module (module) :
if '/' in module :
if os.path.isabs(module) :
raise ImportError("Import by absolute filename is not supported")
module = os.path.splitext(module)[0]
if '.' in module :
raise ImportError("Import by filenames with '.' is not supported")
if module[-1] == '/' :
module = module[:-1]
module = module.replace(os.path.sep, '.')
return importlib.import_module(module)
def _load_source_imp (name, pathname) :
import imp
imp.acquire_lock()
try : return imp.load_compiled(name, pathname)
except : return imp.load_source(name, pathname)
finally : imp.release_lock()
def _load_source (path) :
pathname = os.path.abspath(path)
name = 'disttest._' + os.path.splitext(os.path.split(pathname)[1])[0]
try :
try : from importlib.machinery import SourceFileLoader
except : return _load_source_imp(name, pathname)
return SourceFileLoader(name, pathname).load_module()
except : raise ImportError("No module named {}".format(path))
class test (Command) :
description = "perform unit tests on extensions"
user_options = [
('test-suite=', 't', "test suite module to run"),
('check', 'c', "run check command"),
('skip-build', None, "skip the build steps"),
('build-dir=', None, "build directory (implies --skip-build)"),
]
boolean_options = ['check', 'skip-build']
def initialize_options (self) :
self.build_dir = None
self.check = None
self.skip_build = None
self.test_suite = None
def finalize_options (self) :
self.set_unittest_options()
if not self.test_suite and hasattr(self.distribution, 'test_suite') :
self.test_suite = getattr(self.distribution, 'test_suite')
if self.build_dir :
self.skip_build = True
self.set_undefined_options('build', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
def set_unittest_options (self) :
self.loader = unittest.defaultTestLoader
self.suiteType = unittest.TestSuite
def run (self) :
if self.check :
self.run_command('check')
if not self.skip_build :
self.run_command('build')
if self.test_suite :
self.run_tests()
def run_tests (self) :
sys.path.insert(0, self.build_dir)
try : tests = self.suiteType(self.load_tests(self.test_suite))
except ValueError :
tests = self.suiteType()
for _tests in self.test_suite :
tests.addTest(self.load_tests(_tests))
if not self.dry_run :
unittest.TextTestRunner().run(tests)
sys.path.remove(self.build_dir)
def load_tests (self, tests) :
if isinstance(tests, (unittest.TestCase, unittest.TestSuite)) :
return tests
if isinstance(tests, str) :
try : tests = _import_module(tests)
except : tests = _load_source(tests)
if inspect.ismodule(tests) :
return self.loader.loadTestsFromModule(tests)
raise ValueError('Not a test suite: {}'.format(tests))
def _extend_setup (_setup = setup, _Distribution = Distribution) : |
def setup (**attrs) :
"""Extended setup function"""
if 'cmdclass' not in attrs :
attrs['cmdclass'] = {'test': test}
elif 'test' not in attrs['cmdclass'] :
attrs['cmdclass']['test'] = test
if 'distclass' not in attrs :
attrs['distclass'] = type('Distribution', (_Distribution, object), {"test_suite": None})
elif not hasattr(attrs['distclass'], 'test_suite') :
attrs['distclass'] = type('Distribution', (attrs['distclass'], object), {"test_suite": None})
return _setup(**attrs)
return setup
setup = _extend_setup()
setup_keywords = setup_keywords + ('test_suite',) | """
Extend the default or any other compatible distutils setup function.
The Distribution class used with setup must be supplied, if setup
does not distutils.dist.Distribution as default.
""" | random_line_split |
disttest.py | # Copyright (c) 2014 Christian Schmitz <tynn.dev@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This is the disttest python module. It is a fully functional replacement for
the distutils.core module. Only the setup function is extended to support a
test_suite keyword argument and to expose a test command.
Just replace all imports from distutils.core with the respective imports
from disttest.
Pull the gist to include the module:
git remote add disttest https://gist.github.com/9779576.git
git pull disttest master
"""
import importlib, inspect, os.path, sys, unittest
from distutils.core import *
def _import_module (module) :
if '/' in module :
if os.path.isabs(module) :
raise ImportError("Import by absolute filename is not supported")
module = os.path.splitext(module)[0]
if '.' in module :
raise ImportError("Import by filenames with '.' is not supported")
if module[-1] == '/' :
module = module[:-1]
module = module.replace(os.path.sep, '.')
return importlib.import_module(module)
def _load_source_imp (name, pathname) :
import imp
imp.acquire_lock()
try : return imp.load_compiled(name, pathname)
except : return imp.load_source(name, pathname)
finally : imp.release_lock()
def | (path) :
pathname = os.path.abspath(path)
name = 'disttest._' + os.path.splitext(os.path.split(pathname)[1])[0]
try :
try : from importlib.machinery import SourceFileLoader
except : return _load_source_imp(name, pathname)
return SourceFileLoader(name, pathname).load_module()
except : raise ImportError("No module named {}".format(path))
class test (Command) :
description = "perform unit tests on extensions"
user_options = [
('test-suite=', 't', "test suite module to run"),
('check', 'c', "run check command"),
('skip-build', None, "skip the build steps"),
('build-dir=', None, "build directory (implies --skip-build)"),
]
boolean_options = ['check', 'skip-build']
def initialize_options (self) :
self.build_dir = None
self.check = None
self.skip_build = None
self.test_suite = None
def finalize_options (self) :
self.set_unittest_options()
if not self.test_suite and hasattr(self.distribution, 'test_suite') :
self.test_suite = getattr(self.distribution, 'test_suite')
if self.build_dir :
self.skip_build = True
self.set_undefined_options('build', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
def set_unittest_options (self) :
self.loader = unittest.defaultTestLoader
self.suiteType = unittest.TestSuite
def run (self) :
if self.check :
self.run_command('check')
if not self.skip_build :
self.run_command('build')
if self.test_suite :
self.run_tests()
def run_tests (self) :
sys.path.insert(0, self.build_dir)
try : tests = self.suiteType(self.load_tests(self.test_suite))
except ValueError :
tests = self.suiteType()
for _tests in self.test_suite :
tests.addTest(self.load_tests(_tests))
if not self.dry_run :
unittest.TextTestRunner().run(tests)
sys.path.remove(self.build_dir)
def load_tests (self, tests) :
if isinstance(tests, (unittest.TestCase, unittest.TestSuite)) :
return tests
if isinstance(tests, str) :
try : tests = _import_module(tests)
except : tests = _load_source(tests)
if inspect.ismodule(tests) :
return self.loader.loadTestsFromModule(tests)
raise ValueError('Not a test suite: {}'.format(tests))
def _extend_setup (_setup = setup, _Distribution = Distribution) :
"""
Extend the default or any other compatible distutils setup function.
The Distribution class used with setup must be supplied, if setup
does not distutils.dist.Distribution as default.
"""
def setup (**attrs) :
"""Extended setup function"""
if 'cmdclass' not in attrs :
attrs['cmdclass'] = {'test': test}
elif 'test' not in attrs['cmdclass'] :
attrs['cmdclass']['test'] = test
if 'distclass' not in attrs :
attrs['distclass'] = type('Distribution', (_Distribution, object), {"test_suite": None})
elif not hasattr(attrs['distclass'], 'test_suite') :
attrs['distclass'] = type('Distribution', (attrs['distclass'], object), {"test_suite": None})
return _setup(**attrs)
return setup
setup = _extend_setup()
setup_keywords = setup_keywords + ('test_suite',)
| _load_source | identifier_name |
binary.rs | /*
* Copyright (C) 2017 Genymobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::fmt::Write;
use byteorder::{BigEndian, ByteOrder};
pub fn to_byte_array(value: u32) -> [u8; 4] {
let mut raw = [0u8; 4];
BigEndian::write_u32(&mut raw, value);
raw
}
pub fn | (data: &[u8]) -> String {
let mut s = String::new();
for (i, &byte) in data.iter().enumerate() {
if i % 16 == 0 {
write!(&mut s, "\n").unwrap();
} else if i % 8 == 0 {
write!(&mut s, " ").unwrap();
}
write!(&mut s, "{:02X} ", byte).unwrap();
}
s
}
// only compare the data part for fat pointers (ignore the vtable part)
// for some (buggy) reason, the vtable part may be different even if the data reference the same
// object
// See <https://github.com/Genymobile/gnirehtet/issues/61#issuecomment-370933770>
pub fn ptr_data_eq<T: ?Sized>(lhs: *const T, rhs: *const T) -> bool {
// cast to thin pointers to ignore the vtable part
lhs as *const () == rhs as *const ()
}
| to_string | identifier_name |
binary.rs | /*
* Copyright (C) 2017 Genymobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::fmt::Write;
use byteorder::{BigEndian, ByteOrder};
pub fn to_byte_array(value: u32) -> [u8; 4] {
let mut raw = [0u8; 4];
BigEndian::write_u32(&mut raw, value);
raw
}
pub fn to_string(data: &[u8]) -> String {
let mut s = String::new();
for (i, &byte) in data.iter().enumerate() {
if i % 16 == 0 | else if i % 8 == 0 {
write!(&mut s, " ").unwrap();
}
write!(&mut s, "{:02X} ", byte).unwrap();
}
s
}
// only compare the data part for fat pointers (ignore the vtable part)
// for some (buggy) reason, the vtable part may be different even if the data reference the same
// object
// See <https://github.com/Genymobile/gnirehtet/issues/61#issuecomment-370933770>
pub fn ptr_data_eq<T: ?Sized>(lhs: *const T, rhs: *const T) -> bool {
// cast to thin pointers to ignore the vtable part
lhs as *const () == rhs as *const ()
}
| {
write!(&mut s, "\n").unwrap();
} | conditional_block |
binary.rs | /*
* Copyright (C) 2017 Genymobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::fmt::Write;
use byteorder::{BigEndian, ByteOrder};
pub fn to_byte_array(value: u32) -> [u8; 4] {
let mut raw = [0u8; 4];
BigEndian::write_u32(&mut raw, value);
raw
}
pub fn to_string(data: &[u8]) -> String {
let mut s = String::new();
for (i, &byte) in data.iter().enumerate() {
if i % 16 == 0 {
write!(&mut s, "\n").unwrap();
} else if i % 8 == 0 {
write!(&mut s, " ").unwrap();
} | write!(&mut s, "{:02X} ", byte).unwrap();
}
s
}
// only compare the data part for fat pointers (ignore the vtable part)
// for some (buggy) reason, the vtable part may be different even if the data reference the same
// object
// See <https://github.com/Genymobile/gnirehtet/issues/61#issuecomment-370933770>
pub fn ptr_data_eq<T: ?Sized>(lhs: *const T, rhs: *const T) -> bool {
// cast to thin pointers to ignore the vtable part
lhs as *const () == rhs as *const ()
} | random_line_split | |
binary.rs | /*
* Copyright (C) 2017 Genymobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::fmt::Write;
use byteorder::{BigEndian, ByteOrder};
pub fn to_byte_array(value: u32) -> [u8; 4] |
pub fn to_string(data: &[u8]) -> String {
let mut s = String::new();
for (i, &byte) in data.iter().enumerate() {
if i % 16 == 0 {
write!(&mut s, "\n").unwrap();
} else if i % 8 == 0 {
write!(&mut s, " ").unwrap();
}
write!(&mut s, "{:02X} ", byte).unwrap();
}
s
}
// only compare the data part for fat pointers (ignore the vtable part)
// for some (buggy) reason, the vtable part may be different even if the data reference the same
// object
// See <https://github.com/Genymobile/gnirehtet/issues/61#issuecomment-370933770>
pub fn ptr_data_eq<T: ?Sized>(lhs: *const T, rhs: *const T) -> bool {
// cast to thin pointers to ignore the vtable part
lhs as *const () == rhs as *const ()
}
| {
let mut raw = [0u8; 4];
BigEndian::write_u32(&mut raw, value);
raw
} | identifier_body |
borrowck-overloaded-index-autoderef.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we still see borrowck errors of various kinds when using
// indexing and autoderef in combination.
use std::ops::{Index, IndexMut};
struct Foo {
x: isize,
y: isize,
}
impl<'a> Index<&'a String> for Foo {
type Output = isize;
fn index(&self, z: &String) -> &isize {
if *z == "x" {
&self.x
} else {
&self.y
}
}
}
impl<'a> IndexMut<&'a String> for Foo {
fn index_mut(&mut self, z: &String) -> &mut isize {
if *z == "x" {
&mut self.x
} else |
}
}
fn test1(mut f: Box<Foo>, s: String) {
let p = &mut f[&s];
let q = &f[&s]; //~ ERROR cannot borrow
p.use_mut();
}
fn test2(mut f: Box<Foo>, s: String) {
let p = &mut f[&s];
let q = &mut f[&s]; //~ ERROR cannot borrow
p.use_mut();
}
struct Bar {
foo: Foo
}
fn test3(mut f: Box<Bar>, s: String) {
let p = &mut f.foo[&s];
let q = &mut f.foo[&s]; //~ ERROR cannot borrow
p.use_mut();
}
fn test4(mut f: Box<Bar>, s: String) {
let p = &f.foo[&s];
let q = &f.foo[&s];
p.use_ref();
}
fn test5(mut f: Box<Bar>, s: String) {
let p = &f.foo[&s];
let q = &mut f.foo[&s]; //~ ERROR cannot borrow
p.use_ref();
}
fn test6(mut f: Box<Bar>, g: Foo, s: String) {
let p = &f.foo[&s];
f.foo = g; //~ ERROR cannot assign
p.use_ref();
}
fn test7(mut f: Box<Bar>, g: Bar, s: String) {
let p = &f.foo[&s];
*f = g; //~ ERROR cannot assign
p.use_ref();
}
fn test8(mut f: Box<Bar>, g: Foo, s: String) {
let p = &mut f.foo[&s];
f.foo = g; //~ ERROR cannot assign
p.use_mut();
}
fn test9(mut f: Box<Bar>, g: Bar, s: String) {
let p = &mut f.foo[&s];
*f = g; //~ ERROR cannot assign
p.use_mut();
}
fn main() {
}
trait Fake { fn use_mut(&mut self) { } fn use_ref(&self) { } }
impl<T> Fake for T { }
| {
&mut self.y
} | conditional_block |
borrowck-overloaded-index-autoderef.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we still see borrowck errors of various kinds when using
// indexing and autoderef in combination.
use std::ops::{Index, IndexMut};
struct Foo {
x: isize,
y: isize,
}
impl<'a> Index<&'a String> for Foo {
type Output = isize;
fn index(&self, z: &String) -> &isize {
if *z == "x" {
&self.x
} else {
&self.y
}
}
}
impl<'a> IndexMut<&'a String> for Foo {
fn index_mut(&mut self, z: &String) -> &mut isize {
if *z == "x" {
&mut self.x
} else {
&mut self.y
}
}
}
fn test1(mut f: Box<Foo>, s: String) {
let p = &mut f[&s];
let q = &f[&s]; //~ ERROR cannot borrow
p.use_mut();
}
fn test2(mut f: Box<Foo>, s: String) {
let p = &mut f[&s];
let q = &mut f[&s]; //~ ERROR cannot borrow
p.use_mut();
}
struct Bar {
foo: Foo
}
fn | (mut f: Box<Bar>, s: String) {
let p = &mut f.foo[&s];
let q = &mut f.foo[&s]; //~ ERROR cannot borrow
p.use_mut();
}
fn test4(mut f: Box<Bar>, s: String) {
let p = &f.foo[&s];
let q = &f.foo[&s];
p.use_ref();
}
fn test5(mut f: Box<Bar>, s: String) {
let p = &f.foo[&s];
let q = &mut f.foo[&s]; //~ ERROR cannot borrow
p.use_ref();
}
fn test6(mut f: Box<Bar>, g: Foo, s: String) {
let p = &f.foo[&s];
f.foo = g; //~ ERROR cannot assign
p.use_ref();
}
fn test7(mut f: Box<Bar>, g: Bar, s: String) {
let p = &f.foo[&s];
*f = g; //~ ERROR cannot assign
p.use_ref();
}
fn test8(mut f: Box<Bar>, g: Foo, s: String) {
let p = &mut f.foo[&s];
f.foo = g; //~ ERROR cannot assign
p.use_mut();
}
fn test9(mut f: Box<Bar>, g: Bar, s: String) {
let p = &mut f.foo[&s];
*f = g; //~ ERROR cannot assign
p.use_mut();
}
fn main() {
}
trait Fake { fn use_mut(&mut self) { } fn use_ref(&self) { } }
impl<T> Fake for T { }
| test3 | identifier_name |
borrowck-overloaded-index-autoderef.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we still see borrowck errors of various kinds when using
// indexing and autoderef in combination.
use std::ops::{Index, IndexMut};
struct Foo {
x: isize,
y: isize,
}
impl<'a> Index<&'a String> for Foo {
type Output = isize;
fn index(&self, z: &String) -> &isize {
if *z == "x" {
&self.x
} else {
&self.y
}
}
}
impl<'a> IndexMut<&'a String> for Foo { | } else {
&mut self.y
}
}
}
fn test1(mut f: Box<Foo>, s: String) {
let p = &mut f[&s];
let q = &f[&s]; //~ ERROR cannot borrow
p.use_mut();
}
fn test2(mut f: Box<Foo>, s: String) {
let p = &mut f[&s];
let q = &mut f[&s]; //~ ERROR cannot borrow
p.use_mut();
}
struct Bar {
foo: Foo
}
fn test3(mut f: Box<Bar>, s: String) {
let p = &mut f.foo[&s];
let q = &mut f.foo[&s]; //~ ERROR cannot borrow
p.use_mut();
}
fn test4(mut f: Box<Bar>, s: String) {
let p = &f.foo[&s];
let q = &f.foo[&s];
p.use_ref();
}
fn test5(mut f: Box<Bar>, s: String) {
let p = &f.foo[&s];
let q = &mut f.foo[&s]; //~ ERROR cannot borrow
p.use_ref();
}
fn test6(mut f: Box<Bar>, g: Foo, s: String) {
let p = &f.foo[&s];
f.foo = g; //~ ERROR cannot assign
p.use_ref();
}
fn test7(mut f: Box<Bar>, g: Bar, s: String) {
let p = &f.foo[&s];
*f = g; //~ ERROR cannot assign
p.use_ref();
}
fn test8(mut f: Box<Bar>, g: Foo, s: String) {
let p = &mut f.foo[&s];
f.foo = g; //~ ERROR cannot assign
p.use_mut();
}
fn test9(mut f: Box<Bar>, g: Bar, s: String) {
let p = &mut f.foo[&s];
*f = g; //~ ERROR cannot assign
p.use_mut();
}
fn main() {
}
trait Fake { fn use_mut(&mut self) { } fn use_ref(&self) { } }
impl<T> Fake for T { } | fn index_mut(&mut self, z: &String) -> &mut isize {
if *z == "x" {
&mut self.x | random_line_split |
Storage_IOCP.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Greg Hazel
import os
import sys
import ctypes
import win32file
from bisect import bisect_right
from BTL.translation import _
from BTL import BTFailure
from BTL.defer import Deferred, ThreadedDeferred, Failure, wrap_task
from BTL.yielddefer import launch_coroutine
from BitTorrent.platform import get_allocated_regions
from BTL.sparse_set import SparseSet
from BTL.DictWithLists import DictWithLists, DictWithSets
from BitTorrent.Storage_base import make_file_sparse, bad_libc_workaround, is_open_for_write
from BitTorrent.Storage_base import open_sparse_file as open_sparse_file_base
from BitTorrent.Storage_base import UnregisteredFileException
# not needed, but it raises errors for platforms that don't support iocp
from twisted.internet.iocpreactor import _iocp
from twisted.internet.iocpreactor.proactor import Proactor
from twisted.internet import reactor
assert isinstance(reactor, Proactor), "You imported twisted.internet.reactor before RawServer_twisted!"
class OverlappedOp:
def initiateOp(self, handle, seekpos, buffer):
assert len(buffer) > 0
assert seekpos >= 0
df = Deferred()
try:
self.op(handle, seekpos, buffer,
self.ovDone, (handle, buffer))
except:
df.errback(Failure())
else:
self.df = df
return df
def op(self, *a, **kw):
raise NotImplementedError
def ovDone(self, ret, bytes, (handle, buffer)):
df = self.df
del self.df
if ret or not bytes:
try:
raise ctypes.WinError()
except:
df.errback(Failure())
else:
self.opComplete(df, bytes, buffer)
def opComplete(self, df, bytes, buffer):
raise NotImplementedError
class ReadFileOp(OverlappedOp):
op = reactor.issueReadFile
def opComplete(self, df, bytes, buffer):
df.callback(buffer[:bytes])
class WriteFileOp(OverlappedOp):
op = reactor.issueWriteFile
def opComplete(self, df, bytes, buffer):
df.callback(bytes)
class IOCPFile(object):
# standard block size by default
buffer_size = 16384
def __init__(self, handle):
from twisted.internet import reactor
self.reactor = reactor
self.handle = handle
self.osfhandle = win32file._get_osfhandle(self.handle.fileno())
self.mode = self.handle.mode
# CloseHandle automatically calls CancelIo
self.close = self.handle.close
self.fileno = self.handle.fileno
self.read_op = ReadFileOp()
self.write_op = WriteFileOp()
self.readbuf = self.reactor.AllocateReadBuffer(self.buffer_size)
def seek(self, offset):
self.seekpos = offset
def write(self, data):
return self.write_op.initiateOp(self.osfhandle, self.seekpos, data)
def read(self, bytes):
if bytes == self.buffer_size:
readbuf = self.readbuf
else:
# hmmmm, slow. but, readfile tries to fill the buffer,
# so maybe this is better than reading too much all the time.
readbuf = self.reactor.AllocateReadBuffer(bytes)
return self.read_op.initiateOp(self.osfhandle, self.seekpos, readbuf)
def open_sparse_file(path, mode, length=0, overlapped=True):
return IOCPFile(open_sparse_file_base(path, mode, length, overlapped))
class FilePool(object):
def __init__(self, doneflag, add_task, external_add_task, max_files_open, num_disk_threads):
self.add_task = add_task
self.file_to_torrent = {}
self.waiting_ops = []
self.active_file_to_handles = DictWithSets()
self.open_file_to_handles = DictWithLists()
self.set_max_files_open(max_files_open)
def close_all(self):
df = Deferred()
self._close_all(df)
return df
def _close_all(self, df):
failures = {}
while len(self.open_file_to_handles) > 0:
filename, handle = self.open_file_to_handles.popitem()
try:
handle.close()
except:
failures[self.file_to_torrent[filename]] = Failure()
for torrent, failure in failures.iteritems():
torrent.got_exception(failure)
if self.get_open_file_count() > 0:
# it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_all, df)
else:
df.callback(True)
def close_files(self, file_set):
df = Deferred()
self._close_files(df, file_set)
return df
def | (self, df, file_set):
failure = None
done = False
filenames = self.open_file_to_handles.keys()
for filename in filenames:
if filename not in file_set:
continue
handles = self.open_file_to_handles.poprow(filename)
for handle in handles:
try:
handle.close()
except:
failure = Failure()
done = True
for filename in file_set.iterkeys():
if filename in self.active_file_to_handles:
done = False
break
if failure is not None:
df.errback(failure)
if not done:
# it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_files, df, file_set)
else:
df.callback(True)
def set_max_files_open(self, max_files_open):
if max_files_open <= 0:
max_files_open = 1e100
self.max_files_open = max_files_open
self.close_all()
def add_files(self, files, torrent):
for filename in files:
if filename in self.file_to_torrent:
raise BTFailure(_("File %s belongs to another running torrent")
% filename)
for filename in files:
self.file_to_torrent[filename] = torrent
def remove_files(self, files):
for filename in files:
del self.file_to_torrent[filename]
def _ensure_exists(self, filename, length=0):
if not os.path.exists(filename):
f = os.path.split(filename)[0]
if f != '' and not os.path.exists(f):
os.makedirs(f)
f = file(filename, 'wb')
make_file_sparse(filename, f, length)
f.close()
def get_open_file_count(self):
t = self.open_file_to_handles.total_length()
t += self.active_file_to_handles.total_length()
return t
def free_handle_notify(self):
if self.waiting_ops:
args = self.waiting_ops.pop(0)
self._produce_handle(*args)
def acquire_handle(self, filename, for_write, length=0):
df = Deferred()
if filename not in self.file_to_torrent:
raise UnregisteredFileException()
if self.active_file_to_handles.total_length() == self.max_files_open:
self.waiting_ops.append((df, filename, for_write, length))
else:
self._produce_handle(df, filename, for_write, length)
return df
def _produce_handle(self, df, filename, for_write, length):
if filename in self.open_file_to_handles:
handle = self.open_file_to_handles.pop_from_row(filename)
if for_write and not is_open_for_write(handle.mode):
handle.close()
handle = open_sparse_file(filename, 'rb+', length=length)
#elif not for_write and is_open_for_write(handle.mode):
# handle.close()
# handle = file(filename, 'rb', 0)
else:
if self.get_open_file_count() == self.max_files_open:
oldfname, oldhandle = self.open_file_to_handles.popitem()
oldhandle.close()
self._ensure_exists(filename, length)
if for_write:
handle = open_sparse_file(filename, 'rb+', length=length)
else:
handle = open_sparse_file(filename, 'rb', length=length)
self.active_file_to_handles.push_to_row(filename, handle)
df.callback(handle)
def release_handle(self, filename, handle):
self.active_file_to_handles.remove_fom_row(filename, handle)
self.open_file_to_handles.push_to_row(filename, handle)
self.free_handle_notify()
class Storage(object):
def __init__(self, config, filepool, save_path, files, add_task,
external_add_task, doneflag):
self.filepool = filepool
self.config = config
self.doneflag = doneflag
self.add_task = add_task
self.external_add_task = external_add_task
self.initialize(save_path, files)
def initialize(self, save_path, files):
# a list of bytes ranges and filenames for window-based IO
self.ranges = []
# a dict of filename-to-ranges for piece priorities and filename lookup
self.range_by_name = {}
# a sparse set for smart allocation detection
self.allocated_regions = SparseSet()
# dict of filename-to-length on disk (for % complete in the file view)
self.undownloaded = {}
self.save_path = save_path
# Rather implement this as an ugly hack here than change all the
# individual calls. Affects all torrent instances using this module.
if self.config['bad_libc_workaround']:
bad_libc_workaround()
self.initialized = False
self.startup_df = ThreadedDeferred(wrap_task(self.external_add_task),
self._build_file_structs,
self.filepool, files)
return self.startup_df
def _build_file_structs(self, filepool, files):
total = 0
for filename, length in files:
# we're shutting down, abort.
if self.doneflag.isSet():
return False
self.undownloaded[filename] = length
if length > 0:
self.ranges.append((total, total + length, filename))
self.range_by_name[filename] = (total, total + length)
if os.path.exists(filename):
if not os.path.isfile(filename):
raise BTFailure(_("File %s already exists, but is not a "
"regular file") % filename)
l = os.path.getsize(filename)
if l > length:
# This is the truncation Bram was talking about that no one
# else thinks is a good idea.
#h = file(filename, 'rb+')
#make_file_sparse(filename, h, length)
#h.truncate(length)
#h.close()
l = length
a = get_allocated_regions(filename, begin=0, length=l)
if a is not None:
a.offset(total)
else:
a = SparseSet()
if l > 0:
a.add(total, total + l)
self.allocated_regions += a
total += length
self.total_length = total
self.initialized = True
return True
def get_byte_range_for_filename(self, filename):
if filename not in self.range_by_name:
filename = os.path.normpath(filename)
filename = os.path.join(self.save_path, filename)
return self.range_by_name[filename]
def was_preallocated(self, pos, length):
return self.allocated_regions.is_range_in(pos, pos+length)
def get_total_length(self):
return self.total_length
def _intervals(self, pos, amount):
r = []
stop = pos + amount
p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
for begin, end, filename in self.ranges[p:]:
if begin >= stop:
break
r.append((filename,
max(pos, begin) - begin, min(end, stop) - begin))
return r
def _file_op(self, filename, pos, param, write):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
hdf = self.filepool.acquire_handle(filename, for_write=write,
length=length)
def op(h):
h.seek(pos)
if write:
odf = h.write(param)
else:
odf = h.read(param)
def like_finally(r):
self.filepool.release_handle(filename, h)
return r
odf.addBoth(like_finally)
return odf
hdf.addCallback(op)
return hdf
def _batch_read(self, pos, amount):
dfs = []
r = []
# queue all the reads
for filename, pos, end in self._intervals(pos, amount):
df = self._file_op(filename, pos, end - pos, write=False)
dfs.append(df)
# yield on all the reads in order - they complete in any order
exc = None
for df in dfs:
yield df
try:
r.append(df.getResult())
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
r = ''.join(r)
if len(r) != amount:
raise BTFailure(_("Short read (%d of %d) - "
"something truncated files?") %
(len(r), amount))
yield r
def read(self, pos, amount):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_read, pos, amount)
return df
def _batch_write(self, pos, s):
dfs = []
total = 0
amount = len(s)
# queue all the writes
for filename, begin, end in self._intervals(pos, amount):
length = end - begin
assert length > 0, '%s %s' % (pos, amount)
d = buffer(s, total, length)
total += length
df = self._file_op(filename, begin, d, write=True)
dfs.append(df)
assert total == amount, '%s and %s' % (total, amount)
written = 0
# yield on all the writes - they complete in any order
exc = None
for df in dfs:
yield df
try:
written += df.getResult()
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
assert total == written, '%s and %s' % (total, written)
yield total
def write(self, pos, s):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_write, pos, s)
return df
def close(self):
if not self.initialized:
def post_init(r):
return self.filepool.close_files(self.range_by_name)
self.startup_df.addCallback(post_init)
return self.startup_df
df = self.filepool.close_files(self.range_by_name)
return df
def downloaded(self, pos, length):
for filename, begin, end in self._intervals(pos, length):
self.undownloaded[filename] -= end - begin
| _close_files | identifier_name |
Storage_IOCP.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Greg Hazel
import os
import sys
import ctypes
import win32file
from bisect import bisect_right
from BTL.translation import _
from BTL import BTFailure
from BTL.defer import Deferred, ThreadedDeferred, Failure, wrap_task
from BTL.yielddefer import launch_coroutine
from BitTorrent.platform import get_allocated_regions
from BTL.sparse_set import SparseSet
from BTL.DictWithLists import DictWithLists, DictWithSets
from BitTorrent.Storage_base import make_file_sparse, bad_libc_workaround, is_open_for_write
from BitTorrent.Storage_base import open_sparse_file as open_sparse_file_base
from BitTorrent.Storage_base import UnregisteredFileException
# not needed, but it raises errors for platforms that don't support iocp
from twisted.internet.iocpreactor import _iocp
from twisted.internet.iocpreactor.proactor import Proactor
from twisted.internet import reactor
assert isinstance(reactor, Proactor), "You imported twisted.internet.reactor before RawServer_twisted!"
class OverlappedOp:
def initiateOp(self, handle, seekpos, buffer):
assert len(buffer) > 0
assert seekpos >= 0
df = Deferred()
try:
self.op(handle, seekpos, buffer,
self.ovDone, (handle, buffer))
except:
df.errback(Failure())
else:
self.df = df
return df
def op(self, *a, **kw):
raise NotImplementedError
def ovDone(self, ret, bytes, (handle, buffer)):
df = self.df
del self.df
if ret or not bytes:
try:
raise ctypes.WinError()
except:
df.errback(Failure())
else:
self.opComplete(df, bytes, buffer)
def opComplete(self, df, bytes, buffer):
raise NotImplementedError
class ReadFileOp(OverlappedOp):
op = reactor.issueReadFile
def opComplete(self, df, bytes, buffer):
df.callback(buffer[:bytes])
class WriteFileOp(OverlappedOp):
op = reactor.issueWriteFile
def opComplete(self, df, bytes, buffer):
df.callback(bytes)
class IOCPFile(object):
# standard block size by default
buffer_size = 16384
def __init__(self, handle):
from twisted.internet import reactor
self.reactor = reactor
self.handle = handle
self.osfhandle = win32file._get_osfhandle(self.handle.fileno())
self.mode = self.handle.mode
# CloseHandle automatically calls CancelIo
self.close = self.handle.close
self.fileno = self.handle.fileno
self.read_op = ReadFileOp()
self.write_op = WriteFileOp()
self.readbuf = self.reactor.AllocateReadBuffer(self.buffer_size)
def seek(self, offset):
self.seekpos = offset
def write(self, data):
return self.write_op.initiateOp(self.osfhandle, self.seekpos, data)
def read(self, bytes):
if bytes == self.buffer_size:
readbuf = self.readbuf
else:
# hmmmm, slow. but, readfile tries to fill the buffer,
# so maybe this is better than reading too much all the time.
readbuf = self.reactor.AllocateReadBuffer(bytes)
return self.read_op.initiateOp(self.osfhandle, self.seekpos, readbuf)
def open_sparse_file(path, mode, length=0, overlapped=True):
return IOCPFile(open_sparse_file_base(path, mode, length, overlapped))
class FilePool(object):
def __init__(self, doneflag, add_task, external_add_task, max_files_open, num_disk_threads):
self.add_task = add_task
self.file_to_torrent = {}
self.waiting_ops = []
self.active_file_to_handles = DictWithSets()
self.open_file_to_handles = DictWithLists()
self.set_max_files_open(max_files_open)
def close_all(self):
df = Deferred()
self._close_all(df)
return df
def _close_all(self, df):
failures = {}
while len(self.open_file_to_handles) > 0:
filename, handle = self.open_file_to_handles.popitem()
try:
handle.close()
except:
failures[self.file_to_torrent[filename]] = Failure()
for torrent, failure in failures.iteritems():
torrent.got_exception(failure)
if self.get_open_file_count() > 0:
# it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_all, df)
else:
df.callback(True)
def close_files(self, file_set):
df = Deferred()
self._close_files(df, file_set)
return df
def _close_files(self, df, file_set):
failure = None
done = False
filenames = self.open_file_to_handles.keys()
for filename in filenames:
if filename not in file_set:
continue
handles = self.open_file_to_handles.poprow(filename)
for handle in handles:
try:
handle.close()
except:
failure = Failure()
done = True
for filename in file_set.iterkeys():
if filename in self.active_file_to_handles:
done = False
break
if failure is not None:
df.errback(failure)
if not done:
# it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_files, df, file_set)
else:
df.callback(True)
def set_max_files_open(self, max_files_open):
if max_files_open <= 0:
max_files_open = 1e100
self.max_files_open = max_files_open
self.close_all()
def add_files(self, files, torrent):
for filename in files:
if filename in self.file_to_torrent:
raise BTFailure(_("File %s belongs to another running torrent")
% filename)
for filename in files:
self.file_to_torrent[filename] = torrent
def remove_files(self, files):
for filename in files:
del self.file_to_torrent[filename]
def _ensure_exists(self, filename, length=0):
if not os.path.exists(filename):
f = os.path.split(filename)[0]
if f != '' and not os.path.exists(f):
os.makedirs(f)
f = file(filename, 'wb')
make_file_sparse(filename, f, length)
f.close()
def get_open_file_count(self):
t = self.open_file_to_handles.total_length()
t += self.active_file_to_handles.total_length()
return t
def free_handle_notify(self):
if self.waiting_ops:
args = self.waiting_ops.pop(0)
self._produce_handle(*args)
def acquire_handle(self, filename, for_write, length=0):
df = Deferred()
if filename not in self.file_to_torrent:
raise UnregisteredFileException()
if self.active_file_to_handles.total_length() == self.max_files_open:
self.waiting_ops.append((df, filename, for_write, length))
else:
self._produce_handle(df, filename, for_write, length)
return df
def _produce_handle(self, df, filename, for_write, length):
if filename in self.open_file_to_handles:
handle = self.open_file_to_handles.pop_from_row(filename)
if for_write and not is_open_for_write(handle.mode):
handle.close()
handle = open_sparse_file(filename, 'rb+', length=length)
#elif not for_write and is_open_for_write(handle.mode):
# handle.close()
# handle = file(filename, 'rb', 0)
else:
if self.get_open_file_count() == self.max_files_open:
oldfname, oldhandle = self.open_file_to_handles.popitem()
oldhandle.close()
self._ensure_exists(filename, length)
if for_write:
handle = open_sparse_file(filename, 'rb+', length=length)
else:
handle = open_sparse_file(filename, 'rb', length=length)
self.active_file_to_handles.push_to_row(filename, handle)
df.callback(handle)
def release_handle(self, filename, handle):
self.active_file_to_handles.remove_fom_row(filename, handle)
self.open_file_to_handles.push_to_row(filename, handle)
self.free_handle_notify()
class Storage(object):
def __init__(self, config, filepool, save_path, files, add_task,
external_add_task, doneflag):
self.filepool = filepool
self.config = config
self.doneflag = doneflag
self.add_task = add_task
self.external_add_task = external_add_task
self.initialize(save_path, files)
def initialize(self, save_path, files):
# a list of bytes ranges and filenames for window-based IO
self.ranges = []
# a dict of filename-to-ranges for piece priorities and filename lookup
self.range_by_name = {}
# a sparse set for smart allocation detection
self.allocated_regions = SparseSet()
# dict of filename-to-length on disk (for % complete in the file view)
self.undownloaded = {}
self.save_path = save_path
# Rather implement this as an ugly hack here than change all the
# individual calls. Affects all torrent instances using this module.
if self.config['bad_libc_workaround']:
bad_libc_workaround()
self.initialized = False
self.startup_df = ThreadedDeferred(wrap_task(self.external_add_task),
self._build_file_structs,
self.filepool, files)
return self.startup_df
def _build_file_structs(self, filepool, files):
total = 0
for filename, length in files:
# we're shutting down, abort.
if self.doneflag.isSet():
return False
self.undownloaded[filename] = length
if length > 0:
self.ranges.append((total, total + length, filename))
self.range_by_name[filename] = (total, total + length)
if os.path.exists(filename):
if not os.path.isfile(filename):
raise BTFailure(_("File %s already exists, but is not a "
"regular file") % filename)
l = os.path.getsize(filename)
if l > length:
# This is the truncation Bram was talking about that no one
# else thinks is a good idea.
#h = file(filename, 'rb+')
#make_file_sparse(filename, h, length)
#h.truncate(length)
#h.close()
l = length
a = get_allocated_regions(filename, begin=0, length=l)
if a is not None:
a.offset(total)
else:
a = SparseSet()
if l > 0:
a.add(total, total + l)
self.allocated_regions += a
total += length
self.total_length = total
self.initialized = True
return True
def get_byte_range_for_filename(self, filename):
if filename not in self.range_by_name:
filename = os.path.normpath(filename)
filename = os.path.join(self.save_path, filename)
return self.range_by_name[filename]
def was_preallocated(self, pos, length):
return self.allocated_regions.is_range_in(pos, pos+length)
def get_total_length(self):
return self.total_length
def _intervals(self, pos, amount):
r = []
stop = pos + amount
p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
for begin, end, filename in self.ranges[p:]:
|
return r
def _file_op(self, filename, pos, param, write):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
hdf = self.filepool.acquire_handle(filename, for_write=write,
length=length)
def op(h):
h.seek(pos)
if write:
odf = h.write(param)
else:
odf = h.read(param)
def like_finally(r):
self.filepool.release_handle(filename, h)
return r
odf.addBoth(like_finally)
return odf
hdf.addCallback(op)
return hdf
def _batch_read(self, pos, amount):
dfs = []
r = []
# queue all the reads
for filename, pos, end in self._intervals(pos, amount):
df = self._file_op(filename, pos, end - pos, write=False)
dfs.append(df)
# yield on all the reads in order - they complete in any order
exc = None
for df in dfs:
yield df
try:
r.append(df.getResult())
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
r = ''.join(r)
if len(r) != amount:
raise BTFailure(_("Short read (%d of %d) - "
"something truncated files?") %
(len(r), amount))
yield r
def read(self, pos, amount):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_read, pos, amount)
return df
def _batch_write(self, pos, s):
dfs = []
total = 0
amount = len(s)
# queue all the writes
for filename, begin, end in self._intervals(pos, amount):
length = end - begin
assert length > 0, '%s %s' % (pos, amount)
d = buffer(s, total, length)
total += length
df = self._file_op(filename, begin, d, write=True)
dfs.append(df)
assert total == amount, '%s and %s' % (total, amount)
written = 0
# yield on all the writes - they complete in any order
exc = None
for df in dfs:
yield df
try:
written += df.getResult()
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
assert total == written, '%s and %s' % (total, written)
yield total
def write(self, pos, s):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_write, pos, s)
return df
def close(self):
if not self.initialized:
def post_init(r):
return self.filepool.close_files(self.range_by_name)
self.startup_df.addCallback(post_init)
return self.startup_df
df = self.filepool.close_files(self.range_by_name)
return df
def downloaded(self, pos, length):
for filename, begin, end in self._intervals(pos, length):
self.undownloaded[filename] -= end - begin
| if begin >= stop:
break
r.append((filename,
max(pos, begin) - begin, min(end, stop) - begin)) | conditional_block |
Storage_IOCP.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Greg Hazel
import os
import sys
import ctypes
import win32file
from bisect import bisect_right
from BTL.translation import _
from BTL import BTFailure
from BTL.defer import Deferred, ThreadedDeferred, Failure, wrap_task
from BTL.yielddefer import launch_coroutine
from BitTorrent.platform import get_allocated_regions
from BTL.sparse_set import SparseSet
from BTL.DictWithLists import DictWithLists, DictWithSets
from BitTorrent.Storage_base import make_file_sparse, bad_libc_workaround, is_open_for_write
from BitTorrent.Storage_base import open_sparse_file as open_sparse_file_base
from BitTorrent.Storage_base import UnregisteredFileException
# not needed, but it raises errors for platforms that don't support iocp
from twisted.internet.iocpreactor import _iocp
from twisted.internet.iocpreactor.proactor import Proactor
from twisted.internet import reactor
assert isinstance(reactor, Proactor), "You imported twisted.internet.reactor before RawServer_twisted!"
class OverlappedOp:
def initiateOp(self, handle, seekpos, buffer):
assert len(buffer) > 0
assert seekpos >= 0
df = Deferred()
try:
self.op(handle, seekpos, buffer,
self.ovDone, (handle, buffer))
except:
df.errback(Failure())
else:
self.df = df
return df
def op(self, *a, **kw):
raise NotImplementedError
def ovDone(self, ret, bytes, (handle, buffer)):
|
def opComplete(self, df, bytes, buffer):
raise NotImplementedError
class ReadFileOp(OverlappedOp):
op = reactor.issueReadFile
def opComplete(self, df, bytes, buffer):
df.callback(buffer[:bytes])
class WriteFileOp(OverlappedOp):
op = reactor.issueWriteFile
def opComplete(self, df, bytes, buffer):
df.callback(bytes)
class IOCPFile(object):
# standard block size by default
buffer_size = 16384
def __init__(self, handle):
from twisted.internet import reactor
self.reactor = reactor
self.handle = handle
self.osfhandle = win32file._get_osfhandle(self.handle.fileno())
self.mode = self.handle.mode
# CloseHandle automatically calls CancelIo
self.close = self.handle.close
self.fileno = self.handle.fileno
self.read_op = ReadFileOp()
self.write_op = WriteFileOp()
self.readbuf = self.reactor.AllocateReadBuffer(self.buffer_size)
def seek(self, offset):
self.seekpos = offset
def write(self, data):
return self.write_op.initiateOp(self.osfhandle, self.seekpos, data)
def read(self, bytes):
if bytes == self.buffer_size:
readbuf = self.readbuf
else:
# hmmmm, slow. but, readfile tries to fill the buffer,
# so maybe this is better than reading too much all the time.
readbuf = self.reactor.AllocateReadBuffer(bytes)
return self.read_op.initiateOp(self.osfhandle, self.seekpos, readbuf)
def open_sparse_file(path, mode, length=0, overlapped=True):
return IOCPFile(open_sparse_file_base(path, mode, length, overlapped))
class FilePool(object):
def __init__(self, doneflag, add_task, external_add_task, max_files_open, num_disk_threads):
self.add_task = add_task
self.file_to_torrent = {}
self.waiting_ops = []
self.active_file_to_handles = DictWithSets()
self.open_file_to_handles = DictWithLists()
self.set_max_files_open(max_files_open)
def close_all(self):
df = Deferred()
self._close_all(df)
return df
def _close_all(self, df):
failures = {}
while len(self.open_file_to_handles) > 0:
filename, handle = self.open_file_to_handles.popitem()
try:
handle.close()
except:
failures[self.file_to_torrent[filename]] = Failure()
for torrent, failure in failures.iteritems():
torrent.got_exception(failure)
if self.get_open_file_count() > 0:
# it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_all, df)
else:
df.callback(True)
def close_files(self, file_set):
df = Deferred()
self._close_files(df, file_set)
return df
def _close_files(self, df, file_set):
failure = None
done = False
filenames = self.open_file_to_handles.keys()
for filename in filenames:
if filename not in file_set:
continue
handles = self.open_file_to_handles.poprow(filename)
for handle in handles:
try:
handle.close()
except:
failure = Failure()
done = True
for filename in file_set.iterkeys():
if filename in self.active_file_to_handles:
done = False
break
if failure is not None:
df.errback(failure)
if not done:
# it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_files, df, file_set)
else:
df.callback(True)
def set_max_files_open(self, max_files_open):
if max_files_open <= 0:
max_files_open = 1e100
self.max_files_open = max_files_open
self.close_all()
def add_files(self, files, torrent):
for filename in files:
if filename in self.file_to_torrent:
raise BTFailure(_("File %s belongs to another running torrent")
% filename)
for filename in files:
self.file_to_torrent[filename] = torrent
def remove_files(self, files):
for filename in files:
del self.file_to_torrent[filename]
def _ensure_exists(self, filename, length=0):
if not os.path.exists(filename):
f = os.path.split(filename)[0]
if f != '' and not os.path.exists(f):
os.makedirs(f)
f = file(filename, 'wb')
make_file_sparse(filename, f, length)
f.close()
def get_open_file_count(self):
t = self.open_file_to_handles.total_length()
t += self.active_file_to_handles.total_length()
return t
def free_handle_notify(self):
if self.waiting_ops:
args = self.waiting_ops.pop(0)
self._produce_handle(*args)
def acquire_handle(self, filename, for_write, length=0):
df = Deferred()
if filename not in self.file_to_torrent:
raise UnregisteredFileException()
if self.active_file_to_handles.total_length() == self.max_files_open:
self.waiting_ops.append((df, filename, for_write, length))
else:
self._produce_handle(df, filename, for_write, length)
return df
def _produce_handle(self, df, filename, for_write, length):
if filename in self.open_file_to_handles:
handle = self.open_file_to_handles.pop_from_row(filename)
if for_write and not is_open_for_write(handle.mode):
handle.close()
handle = open_sparse_file(filename, 'rb+', length=length)
#elif not for_write and is_open_for_write(handle.mode):
# handle.close()
# handle = file(filename, 'rb', 0)
else:
if self.get_open_file_count() == self.max_files_open:
oldfname, oldhandle = self.open_file_to_handles.popitem()
oldhandle.close()
self._ensure_exists(filename, length)
if for_write:
handle = open_sparse_file(filename, 'rb+', length=length)
else:
handle = open_sparse_file(filename, 'rb', length=length)
self.active_file_to_handles.push_to_row(filename, handle)
df.callback(handle)
def release_handle(self, filename, handle):
self.active_file_to_handles.remove_fom_row(filename, handle)
self.open_file_to_handles.push_to_row(filename, handle)
self.free_handle_notify()
class Storage(object):
def __init__(self, config, filepool, save_path, files, add_task,
external_add_task, doneflag):
self.filepool = filepool
self.config = config
self.doneflag = doneflag
self.add_task = add_task
self.external_add_task = external_add_task
self.initialize(save_path, files)
def initialize(self, save_path, files):
# a list of bytes ranges and filenames for window-based IO
self.ranges = []
# a dict of filename-to-ranges for piece priorities and filename lookup
self.range_by_name = {}
# a sparse set for smart allocation detection
self.allocated_regions = SparseSet()
# dict of filename-to-length on disk (for % complete in the file view)
self.undownloaded = {}
self.save_path = save_path
# Rather implement this as an ugly hack here than change all the
# individual calls. Affects all torrent instances using this module.
if self.config['bad_libc_workaround']:
bad_libc_workaround()
self.initialized = False
self.startup_df = ThreadedDeferred(wrap_task(self.external_add_task),
self._build_file_structs,
self.filepool, files)
return self.startup_df
def _build_file_structs(self, filepool, files):
total = 0
for filename, length in files:
# we're shutting down, abort.
if self.doneflag.isSet():
return False
self.undownloaded[filename] = length
if length > 0:
self.ranges.append((total, total + length, filename))
self.range_by_name[filename] = (total, total + length)
if os.path.exists(filename):
if not os.path.isfile(filename):
raise BTFailure(_("File %s already exists, but is not a "
"regular file") % filename)
l = os.path.getsize(filename)
if l > length:
# This is the truncation Bram was talking about that no one
# else thinks is a good idea.
#h = file(filename, 'rb+')
#make_file_sparse(filename, h, length)
#h.truncate(length)
#h.close()
l = length
a = get_allocated_regions(filename, begin=0, length=l)
if a is not None:
a.offset(total)
else:
a = SparseSet()
if l > 0:
a.add(total, total + l)
self.allocated_regions += a
total += length
self.total_length = total
self.initialized = True
return True
def get_byte_range_for_filename(self, filename):
if filename not in self.range_by_name:
filename = os.path.normpath(filename)
filename = os.path.join(self.save_path, filename)
return self.range_by_name[filename]
def was_preallocated(self, pos, length):
return self.allocated_regions.is_range_in(pos, pos+length)
def get_total_length(self):
return self.total_length
def _intervals(self, pos, amount):
r = []
stop = pos + amount
p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
for begin, end, filename in self.ranges[p:]:
if begin >= stop:
break
r.append((filename,
max(pos, begin) - begin, min(end, stop) - begin))
return r
def _file_op(self, filename, pos, param, write):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
hdf = self.filepool.acquire_handle(filename, for_write=write,
length=length)
def op(h):
h.seek(pos)
if write:
odf = h.write(param)
else:
odf = h.read(param)
def like_finally(r):
self.filepool.release_handle(filename, h)
return r
odf.addBoth(like_finally)
return odf
hdf.addCallback(op)
return hdf
def _batch_read(self, pos, amount):
dfs = []
r = []
# queue all the reads
for filename, pos, end in self._intervals(pos, amount):
df = self._file_op(filename, pos, end - pos, write=False)
dfs.append(df)
# yield on all the reads in order - they complete in any order
exc = None
for df in dfs:
yield df
try:
r.append(df.getResult())
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
r = ''.join(r)
if len(r) != amount:
raise BTFailure(_("Short read (%d of %d) - "
"something truncated files?") %
(len(r), amount))
yield r
def read(self, pos, amount):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_read, pos, amount)
return df
def _batch_write(self, pos, s):
dfs = []
total = 0
amount = len(s)
# queue all the writes
for filename, begin, end in self._intervals(pos, amount):
length = end - begin
assert length > 0, '%s %s' % (pos, amount)
d = buffer(s, total, length)
total += length
df = self._file_op(filename, begin, d, write=True)
dfs.append(df)
assert total == amount, '%s and %s' % (total, amount)
written = 0
# yield on all the writes - they complete in any order
exc = None
for df in dfs:
yield df
try:
written += df.getResult()
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
assert total == written, '%s and %s' % (total, written)
yield total
def write(self, pos, s):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_write, pos, s)
return df
def close(self):
if not self.initialized:
def post_init(r):
return self.filepool.close_files(self.range_by_name)
self.startup_df.addCallback(post_init)
return self.startup_df
df = self.filepool.close_files(self.range_by_name)
return df
def downloaded(self, pos, length):
for filename, begin, end in self._intervals(pos, length):
self.undownloaded[filename] -= end - begin
| df = self.df
del self.df
if ret or not bytes:
try:
raise ctypes.WinError()
except:
df.errback(Failure())
else:
self.opComplete(df, bytes, buffer) | identifier_body |
Storage_IOCP.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Greg Hazel
import os
import sys
import ctypes
import win32file
from bisect import bisect_right
from BTL.translation import _
from BTL import BTFailure
from BTL.defer import Deferred, ThreadedDeferred, Failure, wrap_task
from BTL.yielddefer import launch_coroutine
from BitTorrent.platform import get_allocated_regions
from BTL.sparse_set import SparseSet
from BTL.DictWithLists import DictWithLists, DictWithSets
from BitTorrent.Storage_base import make_file_sparse, bad_libc_workaround, is_open_for_write
from BitTorrent.Storage_base import open_sparse_file as open_sparse_file_base
from BitTorrent.Storage_base import UnregisteredFileException
# not needed, but it raises errors for platforms that don't support iocp
from twisted.internet.iocpreactor import _iocp
from twisted.internet.iocpreactor.proactor import Proactor
from twisted.internet import reactor
assert isinstance(reactor, Proactor), "You imported twisted.internet.reactor before RawServer_twisted!"
class OverlappedOp:
def initiateOp(self, handle, seekpos, buffer):
assert len(buffer) > 0
assert seekpos >= 0
df = Deferred()
try:
self.op(handle, seekpos, buffer,
self.ovDone, (handle, buffer))
except:
df.errback(Failure())
else:
self.df = df
return df
def op(self, *a, **kw):
raise NotImplementedError
def ovDone(self, ret, bytes, (handle, buffer)):
df = self.df
del self.df
if ret or not bytes:
try:
raise ctypes.WinError()
except:
df.errback(Failure())
else:
self.opComplete(df, bytes, buffer)
def opComplete(self, df, bytes, buffer):
raise NotImplementedError
class ReadFileOp(OverlappedOp):
op = reactor.issueReadFile
def opComplete(self, df, bytes, buffer):
df.callback(buffer[:bytes])
class WriteFileOp(OverlappedOp):
op = reactor.issueWriteFile
def opComplete(self, df, bytes, buffer):
df.callback(bytes)
class IOCPFile(object):
# standard block size by default
buffer_size = 16384
def __init__(self, handle):
from twisted.internet import reactor
self.reactor = reactor
self.handle = handle
self.osfhandle = win32file._get_osfhandle(self.handle.fileno())
self.mode = self.handle.mode
# CloseHandle automatically calls CancelIo
self.close = self.handle.close
self.fileno = self.handle.fileno
self.read_op = ReadFileOp()
self.write_op = WriteFileOp()
self.readbuf = self.reactor.AllocateReadBuffer(self.buffer_size)
def seek(self, offset):
self.seekpos = offset
def write(self, data):
return self.write_op.initiateOp(self.osfhandle, self.seekpos, data)
def read(self, bytes):
if bytes == self.buffer_size:
readbuf = self.readbuf
else:
# hmmmm, slow. but, readfile tries to fill the buffer,
# so maybe this is better than reading too much all the time.
readbuf = self.reactor.AllocateReadBuffer(bytes)
return self.read_op.initiateOp(self.osfhandle, self.seekpos, readbuf)
def open_sparse_file(path, mode, length=0, overlapped=True):
return IOCPFile(open_sparse_file_base(path, mode, length, overlapped))
class FilePool(object):
def __init__(self, doneflag, add_task, external_add_task, max_files_open, num_disk_threads):
self.add_task = add_task
self.file_to_torrent = {}
self.waiting_ops = []
self.active_file_to_handles = DictWithSets()
self.open_file_to_handles = DictWithLists()
self.set_max_files_open(max_files_open)
def close_all(self):
df = Deferred()
self._close_all(df)
return df
def _close_all(self, df):
failures = {}
while len(self.open_file_to_handles) > 0:
filename, handle = self.open_file_to_handles.popitem()
try:
handle.close()
except:
failures[self.file_to_torrent[filename]] = Failure()
for torrent, failure in failures.iteritems():
torrent.got_exception(failure)
if self.get_open_file_count() > 0:
# it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_all, df)
else:
df.callback(True)
def close_files(self, file_set):
df = Deferred()
self._close_files(df, file_set)
return df
def _close_files(self, df, file_set):
failure = None
done = False
filenames = self.open_file_to_handles.keys()
for filename in filenames:
if filename not in file_set:
continue
handles = self.open_file_to_handles.poprow(filename)
for handle in handles:
try:
handle.close()
except:
failure = Failure()
done = True
for filename in file_set.iterkeys():
if filename in self.active_file_to_handles:
done = False
break | # it would be nice to wait on the deferred for the outstanding ops
self.add_task(0.5, self._close_files, df, file_set)
else:
df.callback(True)
def set_max_files_open(self, max_files_open):
if max_files_open <= 0:
max_files_open = 1e100
self.max_files_open = max_files_open
self.close_all()
def add_files(self, files, torrent):
for filename in files:
if filename in self.file_to_torrent:
raise BTFailure(_("File %s belongs to another running torrent")
% filename)
for filename in files:
self.file_to_torrent[filename] = torrent
def remove_files(self, files):
for filename in files:
del self.file_to_torrent[filename]
def _ensure_exists(self, filename, length=0):
if not os.path.exists(filename):
f = os.path.split(filename)[0]
if f != '' and not os.path.exists(f):
os.makedirs(f)
f = file(filename, 'wb')
make_file_sparse(filename, f, length)
f.close()
def get_open_file_count(self):
t = self.open_file_to_handles.total_length()
t += self.active_file_to_handles.total_length()
return t
def free_handle_notify(self):
if self.waiting_ops:
args = self.waiting_ops.pop(0)
self._produce_handle(*args)
def acquire_handle(self, filename, for_write, length=0):
df = Deferred()
if filename not in self.file_to_torrent:
raise UnregisteredFileException()
if self.active_file_to_handles.total_length() == self.max_files_open:
self.waiting_ops.append((df, filename, for_write, length))
else:
self._produce_handle(df, filename, for_write, length)
return df
def _produce_handle(self, df, filename, for_write, length):
if filename in self.open_file_to_handles:
handle = self.open_file_to_handles.pop_from_row(filename)
if for_write and not is_open_for_write(handle.mode):
handle.close()
handle = open_sparse_file(filename, 'rb+', length=length)
#elif not for_write and is_open_for_write(handle.mode):
# handle.close()
# handle = file(filename, 'rb', 0)
else:
if self.get_open_file_count() == self.max_files_open:
oldfname, oldhandle = self.open_file_to_handles.popitem()
oldhandle.close()
self._ensure_exists(filename, length)
if for_write:
handle = open_sparse_file(filename, 'rb+', length=length)
else:
handle = open_sparse_file(filename, 'rb', length=length)
self.active_file_to_handles.push_to_row(filename, handle)
df.callback(handle)
def release_handle(self, filename, handle):
self.active_file_to_handles.remove_fom_row(filename, handle)
self.open_file_to_handles.push_to_row(filename, handle)
self.free_handle_notify()
class Storage(object):
def __init__(self, config, filepool, save_path, files, add_task,
external_add_task, doneflag):
self.filepool = filepool
self.config = config
self.doneflag = doneflag
self.add_task = add_task
self.external_add_task = external_add_task
self.initialize(save_path, files)
def initialize(self, save_path, files):
# a list of bytes ranges and filenames for window-based IO
self.ranges = []
# a dict of filename-to-ranges for piece priorities and filename lookup
self.range_by_name = {}
# a sparse set for smart allocation detection
self.allocated_regions = SparseSet()
# dict of filename-to-length on disk (for % complete in the file view)
self.undownloaded = {}
self.save_path = save_path
# Rather implement this as an ugly hack here than change all the
# individual calls. Affects all torrent instances using this module.
if self.config['bad_libc_workaround']:
bad_libc_workaround()
self.initialized = False
self.startup_df = ThreadedDeferred(wrap_task(self.external_add_task),
self._build_file_structs,
self.filepool, files)
return self.startup_df
def _build_file_structs(self, filepool, files):
total = 0
for filename, length in files:
# we're shutting down, abort.
if self.doneflag.isSet():
return False
self.undownloaded[filename] = length
if length > 0:
self.ranges.append((total, total + length, filename))
self.range_by_name[filename] = (total, total + length)
if os.path.exists(filename):
if not os.path.isfile(filename):
raise BTFailure(_("File %s already exists, but is not a "
"regular file") % filename)
l = os.path.getsize(filename)
if l > length:
# This is the truncation Bram was talking about that no one
# else thinks is a good idea.
#h = file(filename, 'rb+')
#make_file_sparse(filename, h, length)
#h.truncate(length)
#h.close()
l = length
a = get_allocated_regions(filename, begin=0, length=l)
if a is not None:
a.offset(total)
else:
a = SparseSet()
if l > 0:
a.add(total, total + l)
self.allocated_regions += a
total += length
self.total_length = total
self.initialized = True
return True
def get_byte_range_for_filename(self, filename):
if filename not in self.range_by_name:
filename = os.path.normpath(filename)
filename = os.path.join(self.save_path, filename)
return self.range_by_name[filename]
def was_preallocated(self, pos, length):
return self.allocated_regions.is_range_in(pos, pos+length)
def get_total_length(self):
return self.total_length
def _intervals(self, pos, amount):
r = []
stop = pos + amount
p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
for begin, end, filename in self.ranges[p:]:
if begin >= stop:
break
r.append((filename,
max(pos, begin) - begin, min(end, stop) - begin))
return r
def _file_op(self, filename, pos, param, write):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
hdf = self.filepool.acquire_handle(filename, for_write=write,
length=length)
def op(h):
h.seek(pos)
if write:
odf = h.write(param)
else:
odf = h.read(param)
def like_finally(r):
self.filepool.release_handle(filename, h)
return r
odf.addBoth(like_finally)
return odf
hdf.addCallback(op)
return hdf
def _batch_read(self, pos, amount):
dfs = []
r = []
# queue all the reads
for filename, pos, end in self._intervals(pos, amount):
df = self._file_op(filename, pos, end - pos, write=False)
dfs.append(df)
# yield on all the reads in order - they complete in any order
exc = None
for df in dfs:
yield df
try:
r.append(df.getResult())
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
r = ''.join(r)
if len(r) != amount:
raise BTFailure(_("Short read (%d of %d) - "
"something truncated files?") %
(len(r), amount))
yield r
def read(self, pos, amount):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_read, pos, amount)
return df
def _batch_write(self, pos, s):
dfs = []
total = 0
amount = len(s)
# queue all the writes
for filename, begin, end in self._intervals(pos, amount):
length = end - begin
assert length > 0, '%s %s' % (pos, amount)
d = buffer(s, total, length)
total += length
df = self._file_op(filename, begin, d, write=True)
dfs.append(df)
assert total == amount, '%s and %s' % (total, amount)
written = 0
# yield on all the writes - they complete in any order
exc = None
for df in dfs:
yield df
try:
written += df.getResult()
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
assert total == written, '%s and %s' % (total, written)
yield total
def write(self, pos, s):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_write, pos, s)
return df
def close(self):
if not self.initialized:
def post_init(r):
return self.filepool.close_files(self.range_by_name)
self.startup_df.addCallback(post_init)
return self.startup_df
df = self.filepool.close_files(self.range_by_name)
return df
def downloaded(self, pos, length):
for filename, begin, end in self._intervals(pos, length):
self.undownloaded[filename] -= end - begin |
if failure is not None:
df.errback(failure)
if not done: | random_line_split |
fastTabSwitch.ts | describe('fast tab switch', () => {
let assert = chai.assert;
window.HTMLImports.whenReady(() => {
let tb = d3.select('tf-tensorboard'); | // This test will select the events tab. Once the events tab
// renders, will select the graph tab, and immediately select
// the images tab wihout waiting for the graph tab to finish
// rendering. Finally, it finishes when the images tab
// has rendered and no errors were thrown.
let eventsTabIndex = TF.Globals.TABS.indexOf('events');
let imagesTabIndex = TF.Globals.TABS.indexOf('images');
let graphTabIndex = TF.Globals.TABS.indexOf('graphs');
// Listen for when the events tab rendered.
tb.on('rendered', () => {
it('switching to graph tab and immediately to images', done => {
// Select the graph tab.
tabs.set('selected', graphTabIndex);
// Interrupt graph rendering by immediately selecting the images tab
// and finish when the images tab has rendered.
tb.on('rendered', () => done());
tabs.set('selected', imagesTabIndex);
});
});
// Select the events tab.
tabs.set('selected', eventsTabIndex);
});
}); | var tabs = (<any>tb.node()).$.tabs;
| random_line_split |
test_bm_interface.py | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-metal DB testcase for BareMetalInterface
"""
| from nova.tests.baremetal.db import base
from nova.virt.baremetal import db
class BareMetalInterfaceTestCase(base.BMDBTestCase):
def test_unique_address(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.DBError,
db.bm_interface_create,
self.context, 2, '11:11:11:11:11:11', '0x2', 2)
# succeed after delete pif1
db.bm_interface_destroy(self.context, pif1_id)
pif2_id = db.bm_interface_create(self.context, 2, '11:11:11:11:11:11',
'0x2', 2)
self.assertTrue(pif2_id is not None)
def test_unique_vif_uuid(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
pif2_id = db.bm_interface_create(self.context, 2, '22:22:22:22:22:22',
'0x2', 2)
db.bm_interface_set_vif_uuid(self.context, pif1_id, 'AAAA')
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif2_id, 'AAAA')
def test_vif_not_found(self):
pif_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif_id + 1, 'AAAA') | from nova import exception | random_line_split |
test_bm_interface.py | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-metal DB testcase for BareMetalInterface
"""
from nova import exception
from nova.tests.baremetal.db import base
from nova.virt.baremetal import db
class BareMetalInterfaceTestCase(base.BMDBTestCase):
| def test_unique_address(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.DBError,
db.bm_interface_create,
self.context, 2, '11:11:11:11:11:11', '0x2', 2)
# succeed after delete pif1
db.bm_interface_destroy(self.context, pif1_id)
pif2_id = db.bm_interface_create(self.context, 2, '11:11:11:11:11:11',
'0x2', 2)
self.assertTrue(pif2_id is not None)
def test_unique_vif_uuid(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
pif2_id = db.bm_interface_create(self.context, 2, '22:22:22:22:22:22',
'0x2', 2)
db.bm_interface_set_vif_uuid(self.context, pif1_id, 'AAAA')
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif2_id, 'AAAA')
def test_vif_not_found(self):
pif_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif_id + 1, 'AAAA') | identifier_body | |
test_bm_interface.py | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-metal DB testcase for BareMetalInterface
"""
from nova import exception
from nova.tests.baremetal.db import base
from nova.virt.baremetal import db
class BareMetalInterfaceTestCase(base.BMDBTestCase):
def test_unique_address(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.DBError,
db.bm_interface_create,
self.context, 2, '11:11:11:11:11:11', '0x2', 2)
# succeed after delete pif1
db.bm_interface_destroy(self.context, pif1_id)
pif2_id = db.bm_interface_create(self.context, 2, '11:11:11:11:11:11',
'0x2', 2)
self.assertTrue(pif2_id is not None)
def test_unique_vif_uuid(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
pif2_id = db.bm_interface_create(self.context, 2, '22:22:22:22:22:22',
'0x2', 2)
db.bm_interface_set_vif_uuid(self.context, pif1_id, 'AAAA')
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif2_id, 'AAAA')
def | (self):
pif_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif_id + 1, 'AAAA')
| test_vif_not_found | identifier_name |
generator.rs | // a lot of the source of this file is the same as main.rs
// but instead of tweeting, it just prints to stdout
extern crate rand;
use rand::{thread_rng, ThreadRng};
// these files are paths to plain-text lists of the various word types
// these paths assume you're running with *cargo run* from the root of the crate
// for production, i replace these with absolute paths
const NOUNS_FILENAME: &str = "resources/nouns.txt";
const TEMPLATE_FILENAME: &str = "resources/templates.txt";
const ADJECTIVES_FILENAME: &str = "resources/adjectives.txt";
const ADVERBS_FILENAME: &str = "resources/adverbs.txt";
const ABSTRACTS_FILENAME: &str = "resources/abstracts.txt";
// this module does the heavy lifting of generating the text for tweets
// because i'm lazy, we do templating and replacement instead of markov chaining
mod template_manager;
use template_manager::TemplateManager;
fn | () {
// TemplateManager::new returns a Result because it will fail if there's a problem loading any of the files
match TemplateManager::new(
TEMPLATE_FILENAME,
NOUNS_FILENAME,
ADJECTIVES_FILENAME,
ADVERBS_FILENAME,
ABSTRACTS_FILENAME
) {
Ok(manager) => {
// i feel like there's probably a way to have the TemplateManager own ThreadRng
// but i'm not sure what it is - mutability in structs is hard
let mut rng: ThreadRng = thread_rng();
// TemplateManager.make_formatted_quote returns an Option
// because it doesn't work if any of the lists of phrases are empty
match manager.make_formatted_quote(&mut rng) {
Some(quote) => {
println!("{}", quote);
},
None => {
println!("Couldn't generate a tweet");
},
}
},
Err(err) => {
println!("Failed to create a TemplateManager with err: {}", err);
},
}
}
| main | identifier_name |
generator.rs | // a lot of the source of this file is the same as main.rs
// but instead of tweeting, it just prints to stdout
extern crate rand;
use rand::{thread_rng, ThreadRng};
// these files are paths to plain-text lists of the various word types
// these paths assume you're running with *cargo run* from the root of the crate
// for production, i replace these with absolute paths
const NOUNS_FILENAME: &str = "resources/nouns.txt";
const TEMPLATE_FILENAME: &str = "resources/templates.txt";
const ADJECTIVES_FILENAME: &str = "resources/adjectives.txt";
const ADVERBS_FILENAME: &str = "resources/adverbs.txt";
const ABSTRACTS_FILENAME: &str = "resources/abstracts.txt";
// this module does the heavy lifting of generating the text for tweets
// because i'm lazy, we do templating and replacement instead of markov chaining
mod template_manager;
use template_manager::TemplateManager;
fn main() {
// TemplateManager::new returns a Result because it will fail if there's a problem loading any of the files
match TemplateManager::new(
TEMPLATE_FILENAME,
NOUNS_FILENAME,
ADJECTIVES_FILENAME,
ADVERBS_FILENAME,
ABSTRACTS_FILENAME
) {
Ok(manager) => {
// i feel like there's probably a way to have the TemplateManager own ThreadRng
// but i'm not sure what it is - mutability in structs is hard
let mut rng: ThreadRng = thread_rng();
// TemplateManager.make_formatted_quote returns an Option
// because it doesn't work if any of the lists of phrases are empty
match manager.make_formatted_quote(&mut rng) {
Some(quote) => {
println!("{}", quote);
},
None => | ,
}
},
Err(err) => {
println!("Failed to create a TemplateManager with err: {}", err);
},
}
}
| {
println!("Couldn't generate a tweet");
} | conditional_block |
generator.rs | // a lot of the source of this file is the same as main.rs
// but instead of tweeting, it just prints to stdout
extern crate rand;
use rand::{thread_rng, ThreadRng};
// these files are paths to plain-text lists of the various word types
// these paths assume you're running with *cargo run* from the root of the crate
// for production, i replace these with absolute paths
const NOUNS_FILENAME: &str = "resources/nouns.txt";
const TEMPLATE_FILENAME: &str = "resources/templates.txt";
const ADJECTIVES_FILENAME: &str = "resources/adjectives.txt";
const ADVERBS_FILENAME: &str = "resources/adverbs.txt";
const ABSTRACTS_FILENAME: &str = "resources/abstracts.txt";
// this module does the heavy lifting of generating the text for tweets
// because i'm lazy, we do templating and replacement instead of markov chaining
mod template_manager;
use template_manager::TemplateManager;
fn main() | {
// TemplateManager::new returns a Result because it will fail if there's a problem loading any of the files
match TemplateManager::new(
TEMPLATE_FILENAME,
NOUNS_FILENAME,
ADJECTIVES_FILENAME,
ADVERBS_FILENAME,
ABSTRACTS_FILENAME
) {
Ok(manager) => {
// i feel like there's probably a way to have the TemplateManager own ThreadRng
// but i'm not sure what it is - mutability in structs is hard
let mut rng: ThreadRng = thread_rng();
// TemplateManager.make_formatted_quote returns an Option
// because it doesn't work if any of the lists of phrases are empty
match manager.make_formatted_quote(&mut rng) {
Some(quote) => {
println!("{}", quote);
},
None => {
println!("Couldn't generate a tweet");
},
}
},
Err(err) => {
println!("Failed to create a TemplateManager with err: {}", err);
},
}
} | identifier_body | |
generator.rs | // a lot of the source of this file is the same as main.rs
// but instead of tweeting, it just prints to stdout
extern crate rand;
use rand::{thread_rng, ThreadRng};
// these files are paths to plain-text lists of the various word types
// these paths assume you're running with *cargo run* from the root of the crate
// for production, i replace these with absolute paths
const NOUNS_FILENAME: &str = "resources/nouns.txt";
const TEMPLATE_FILENAME: &str = "resources/templates.txt";
const ADJECTIVES_FILENAME: &str = "resources/adjectives.txt";
const ADVERBS_FILENAME: &str = "resources/adverbs.txt";
const ABSTRACTS_FILENAME: &str = "resources/abstracts.txt";
// this module does the heavy lifting of generating the text for tweets
// because i'm lazy, we do templating and replacement instead of markov chaining
mod template_manager;
use template_manager::TemplateManager;
fn main() {
// TemplateManager::new returns a Result because it will fail if there's a problem loading any of the files
match TemplateManager::new(
TEMPLATE_FILENAME,
NOUNS_FILENAME,
ADJECTIVES_FILENAME,
ADVERBS_FILENAME,
ABSTRACTS_FILENAME
) {
Ok(manager) => {
// i feel like there's probably a way to have the TemplateManager own ThreadRng
// but i'm not sure what it is - mutability in structs is hard
let mut rng: ThreadRng = thread_rng();
// TemplateManager.make_formatted_quote returns an Option
// because it doesn't work if any of the lists of phrases are empty
match manager.make_formatted_quote(&mut rng) {
Some(quote) => {
println!("{}", quote);
},
None => {
println!("Couldn't generate a tweet");
},
}
},
Err(err) => { | println!("Failed to create a TemplateManager with err: {}", err);
},
}
} | random_line_split | |
wizard-base.component.ts | // ============================================================================
// Wizard Base - COMPONENT
//
// This component creates the Wizard landing page
// ============================================================================
// ----------------------------------------------------------------------------
// Imports
// ----------------------------------------------------------------------------
// Angular | import { ConfigService } from "../../shared/config/config.service";
import { DavisService } from "../../shared/davis.service";
import * as _ from "lodash";
// ----------------------------------------------------------------------------
// Class
// ----------------------------------------------------------------------------
@Component({
selector: "wizard-base",
templateUrl: "./wizard-base.component.html",
})
export class WizardBaseComponent implements OnInit {
// ------------------------------------------------------
// Inject services
// ------------------------------------------------------
constructor(public iConfig: ConfigService, public iDavis: DavisService) {}
ngOnInit() {
this.iDavis.isBreadcrumbsVisible = true;
this.iConfig.getDavisConfiguration()
.then(response => {
if (!response.success) {
throw new Error(response.message);
}
this.iConfig.values.dynatrace.url = response.config.dynatrace.url;
this.iConfig.values.dynatrace.token = response.config.dynatrace.token;
this.iConfig.values.slack.clientId = response.config.slack.clientId;
this.iConfig.values.slack.clientSecret = response.config.slack.clientSecret;
this.iConfig.values.slack.redirectUri = response.config.slack.redirectUri;
if (this.iConfig.values.slack.redirectUri.length < 1) {
this.iConfig.values.slack.redirectUri = `${window.location.protocol}//${window.location.host}/oauth`;
}
})
.catch(err => {
this.iConfig.displayError(err, 'dynatrace-connect');
});
}
} | import { Component, OnInit } from "@angular/core";
// Services | random_line_split |
wizard-base.component.ts | // ============================================================================
// Wizard Base - COMPONENT
//
// This component creates the Wizard landing page
// ============================================================================
// ----------------------------------------------------------------------------
// Imports
// ----------------------------------------------------------------------------
// Angular
import { Component, OnInit } from "@angular/core";
// Services
import { ConfigService } from "../../shared/config/config.service";
import { DavisService } from "../../shared/davis.service";
import * as _ from "lodash";
// ----------------------------------------------------------------------------
// Class
// ----------------------------------------------------------------------------
@Component({
selector: "wizard-base",
templateUrl: "./wizard-base.component.html",
})
export class WizardBaseComponent implements OnInit {
// ------------------------------------------------------
// Inject services
// ------------------------------------------------------
constructor(public iConfig: ConfigService, public iDavis: DavisService) |
ngOnInit() {
this.iDavis.isBreadcrumbsVisible = true;
this.iConfig.getDavisConfiguration()
.then(response => {
if (!response.success) {
throw new Error(response.message);
}
this.iConfig.values.dynatrace.url = response.config.dynatrace.url;
this.iConfig.values.dynatrace.token = response.config.dynatrace.token;
this.iConfig.values.slack.clientId = response.config.slack.clientId;
this.iConfig.values.slack.clientSecret = response.config.slack.clientSecret;
this.iConfig.values.slack.redirectUri = response.config.slack.redirectUri;
if (this.iConfig.values.slack.redirectUri.length < 1) {
this.iConfig.values.slack.redirectUri = `${window.location.protocol}//${window.location.host}/oauth`;
}
})
.catch(err => {
this.iConfig.displayError(err, 'dynatrace-connect');
});
}
}
| {} | identifier_body |
wizard-base.component.ts | // ============================================================================
// Wizard Base - COMPONENT
//
// This component creates the Wizard landing page
// ============================================================================
// ----------------------------------------------------------------------------
// Imports
// ----------------------------------------------------------------------------
// Angular
import { Component, OnInit } from "@angular/core";
// Services
import { ConfigService } from "../../shared/config/config.service";
import { DavisService } from "../../shared/davis.service";
import * as _ from "lodash";
// ----------------------------------------------------------------------------
// Class
// ----------------------------------------------------------------------------
@Component({
selector: "wizard-base",
templateUrl: "./wizard-base.component.html",
})
export class WizardBaseComponent implements OnInit {
// ------------------------------------------------------
// Inject services
// ------------------------------------------------------
constructor(public iConfig: ConfigService, public iDavis: DavisService) {}
| () {
this.iDavis.isBreadcrumbsVisible = true;
this.iConfig.getDavisConfiguration()
.then(response => {
if (!response.success) {
throw new Error(response.message);
}
this.iConfig.values.dynatrace.url = response.config.dynatrace.url;
this.iConfig.values.dynatrace.token = response.config.dynatrace.token;
this.iConfig.values.slack.clientId = response.config.slack.clientId;
this.iConfig.values.slack.clientSecret = response.config.slack.clientSecret;
this.iConfig.values.slack.redirectUri = response.config.slack.redirectUri;
if (this.iConfig.values.slack.redirectUri.length < 1) {
this.iConfig.values.slack.redirectUri = `${window.location.protocol}//${window.location.host}/oauth`;
}
})
.catch(err => {
this.iConfig.displayError(err, 'dynatrace-connect');
});
}
}
| ngOnInit | identifier_name |
wizard-base.component.ts | // ============================================================================
// Wizard Base - COMPONENT
//
// This component creates the Wizard landing page
// ============================================================================
// ----------------------------------------------------------------------------
// Imports
// ----------------------------------------------------------------------------
// Angular
import { Component, OnInit } from "@angular/core";
// Services
import { ConfigService } from "../../shared/config/config.service";
import { DavisService } from "../../shared/davis.service";
import * as _ from "lodash";
// ----------------------------------------------------------------------------
// Class
// ----------------------------------------------------------------------------
@Component({
selector: "wizard-base",
templateUrl: "./wizard-base.component.html",
})
export class WizardBaseComponent implements OnInit {
// ------------------------------------------------------
// Inject services
// ------------------------------------------------------
constructor(public iConfig: ConfigService, public iDavis: DavisService) {}
ngOnInit() {
this.iDavis.isBreadcrumbsVisible = true;
this.iConfig.getDavisConfiguration()
.then(response => {
if (!response.success) |
this.iConfig.values.dynatrace.url = response.config.dynatrace.url;
this.iConfig.values.dynatrace.token = response.config.dynatrace.token;
this.iConfig.values.slack.clientId = response.config.slack.clientId;
this.iConfig.values.slack.clientSecret = response.config.slack.clientSecret;
this.iConfig.values.slack.redirectUri = response.config.slack.redirectUri;
if (this.iConfig.values.slack.redirectUri.length < 1) {
this.iConfig.values.slack.redirectUri = `${window.location.protocol}//${window.location.host}/oauth`;
}
})
.catch(err => {
this.iConfig.displayError(err, 'dynatrace-connect');
});
}
}
| {
throw new Error(response.message);
} | conditional_block |
derive_input_object.rs | use fnv::FnvHashMap;
use juniper::{
marker, DefaultScalarValue, FromInputValue, GraphQLInputObject, GraphQLType, GraphQLValue,
InputValue, Registry, ToInputValue,
};
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(
name = "MyInput",
description = "input descr",
scalar = DefaultScalarValue
)]
struct Input {
regular_field: String,
#[graphql(name = "haha", default = "33", description = "haha descr")]
c: i32,
#[graphql(default)]
other: Option<bool>,
}
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(rename = "none")]
struct NoRenameInput {
regular_field: String,
}
/// Object comment.
#[derive(GraphQLInputObject, Debug, PartialEq)]
struct DocComment {
/// Field comment.
regular_field: bool,
}
/// Doc 1.\
/// Doc 2.
///
/// Doc 4.
#[derive(GraphQLInputObject, Debug, PartialEq)]
struct MultiDocComment {
/// Field 1.
/// Field 2.
regular_field: bool,
}
/// This is not used as the description.
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(description = "obj override")]
struct OverrideDocComment {
/// This is not used as the description.
#[graphql(description = "field override")]
regular_field: bool,
}
#[derive(Debug, PartialEq)]
struct Fake;
impl<'a> marker::IsInputType<DefaultScalarValue> for &'a Fake {}
impl<'a> FromInputValue for &'a Fake {
fn from_input_value(_v: &InputValue) -> Option<&'a Fake> {
None
}
}
impl<'a> ToInputValue for &'a Fake {
fn to_input_value(&self) -> InputValue {
InputValue::scalar("this is fake")
}
}
impl<'a> GraphQLType<DefaultScalarValue> for &'a Fake {
fn name(_: &()) -> Option<&'static str> {
None
}
fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> juniper::meta::MetaType<'r>
where
DefaultScalarValue: 'r,
{
let meta = registry.build_enum_type::<&'a Fake>(
&(),
&[juniper::meta::EnumValue {
name: "fake".to_string(),
description: None,
deprecation_status: juniper::meta::DeprecationStatus::Current,
}],
);
meta.into_meta()
}
}
impl<'a> GraphQLValue<DefaultScalarValue> for &'a Fake {
type Context = ();
type TypeInfo = ();
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str> {
<Self as GraphQLType>::name(info)
}
}
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(scalar = DefaultScalarValue)]
struct WithLifetime<'a> {
regular_field: &'a Fake,
}
#[test]
fn test_derived_input_object() {
assert_eq!(
<Input as GraphQLType<DefaultScalarValue>>::name(&()),
Some("MyInput")
);
// Validate meta info.
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = Input::meta(&(), &mut registry);
assert_eq!(meta.name(), Some("MyInput"));
assert_eq!(meta.description(), Some(&"input descr".to_string()));
// Test default value injection.
let input_no_defaults: InputValue = ::serde_json::from_value(serde_json::json!({
"regularField": "a",
}))
.unwrap();
let output_no_defaults: Input = FromInputValue::from_input_value(&input_no_defaults).unwrap();
assert_eq!(
output_no_defaults,
Input {
regular_field: "a".into(),
c: 33,
other: None,
}
);
// Test with all values supplied.
let input: InputValue = ::serde_json::from_value(serde_json::json!({
"regularField": "a",
"haha": 55,
"other": true,
}))
.unwrap();
let output: Input = FromInputValue::from_input_value(&input).unwrap();
assert_eq!(
output,
Input {
regular_field: "a".into(),
c: 55,
other: Some(true),
}
);
// Test disable renaming
let input: InputValue = ::serde_json::from_value(serde_json::json!({
"regular_field": "hello",
}))
.unwrap();
let output: NoRenameInput = FromInputValue::from_input_value(&input).unwrap();
assert_eq!(
output,
NoRenameInput {
regular_field: "hello".into(),
}
);
}
#[test]
fn | () {
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = DocComment::meta(&(), &mut registry);
assert_eq!(meta.description(), Some(&"Object comment.".to_string()));
}
#[test]
fn test_multi_doc_comment() {
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = MultiDocComment::meta(&(), &mut registry);
assert_eq!(
meta.description(),
Some(&"Doc 1. Doc 2.\n\nDoc 4.".to_string())
);
}
#[test]
fn test_doc_comment_override() {
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = OverrideDocComment::meta(&(), &mut registry);
assert_eq!(meta.description(), Some(&"obj override".to_string()));
}
| test_doc_comment | identifier_name |
derive_input_object.rs | use fnv::FnvHashMap;
use juniper::{
marker, DefaultScalarValue, FromInputValue, GraphQLInputObject, GraphQLType, GraphQLValue,
InputValue, Registry, ToInputValue,
};
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(
name = "MyInput",
description = "input descr",
scalar = DefaultScalarValue
)]
struct Input {
regular_field: String,
#[graphql(name = "haha", default = "33", description = "haha descr")]
c: i32,
#[graphql(default)]
other: Option<bool>,
}
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(rename = "none")]
struct NoRenameInput {
regular_field: String,
}
/// Object comment.
#[derive(GraphQLInputObject, Debug, PartialEq)]
struct DocComment {
/// Field comment.
regular_field: bool,
}
/// Doc 1.\
/// Doc 2.
///
/// Doc 4.
#[derive(GraphQLInputObject, Debug, PartialEq)]
struct MultiDocComment {
/// Field 1.
/// Field 2.
regular_field: bool,
}
/// This is not used as the description.
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(description = "obj override")]
struct OverrideDocComment {
/// This is not used as the description.
#[graphql(description = "field override")]
regular_field: bool,
}
#[derive(Debug, PartialEq)]
struct Fake;
impl<'a> marker::IsInputType<DefaultScalarValue> for &'a Fake {}
impl<'a> FromInputValue for &'a Fake {
fn from_input_value(_v: &InputValue) -> Option<&'a Fake> {
None
}
}
impl<'a> ToInputValue for &'a Fake {
fn to_input_value(&self) -> InputValue {
InputValue::scalar("this is fake")
}
}
impl<'a> GraphQLType<DefaultScalarValue> for &'a Fake {
fn name(_: &()) -> Option<&'static str> {
None
}
fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> juniper::meta::MetaType<'r>
where
DefaultScalarValue: 'r,
{
let meta = registry.build_enum_type::<&'a Fake>(
&(),
&[juniper::meta::EnumValue {
name: "fake".to_string(),
description: None,
deprecation_status: juniper::meta::DeprecationStatus::Current,
}],
);
meta.into_meta()
}
}
impl<'a> GraphQLValue<DefaultScalarValue> for &'a Fake {
type Context = ();
type TypeInfo = ();
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str> {
<Self as GraphQLType>::name(info)
}
}
#[derive(GraphQLInputObject, Debug, PartialEq)]
#[graphql(scalar = DefaultScalarValue)]
struct WithLifetime<'a> {
regular_field: &'a Fake,
}
#[test]
fn test_derived_input_object() {
assert_eq!(
<Input as GraphQLType<DefaultScalarValue>>::name(&()),
Some("MyInput")
);
// Validate meta info.
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = Input::meta(&(), &mut registry);
assert_eq!(meta.name(), Some("MyInput"));
assert_eq!(meta.description(), Some(&"input descr".to_string()));
// Test default value injection.
let input_no_defaults: InputValue = ::serde_json::from_value(serde_json::json!({
"regularField": "a",
}))
.unwrap();
let output_no_defaults: Input = FromInputValue::from_input_value(&input_no_defaults).unwrap();
assert_eq!(
output_no_defaults,
Input {
regular_field: "a".into(),
c: 33,
other: None,
}
);
// Test with all values supplied.
let input: InputValue = ::serde_json::from_value(serde_json::json!({
"regularField": "a",
"haha": 55,
"other": true,
}))
.unwrap();
let output: Input = FromInputValue::from_input_value(&input).unwrap();
assert_eq!(
output,
Input {
regular_field: "a".into(),
c: 55,
other: Some(true),
}
);
// Test disable renaming
let input: InputValue = ::serde_json::from_value(serde_json::json!({
"regular_field": "hello",
}))
.unwrap();
let output: NoRenameInput = FromInputValue::from_input_value(&input).unwrap();
assert_eq!(
output,
NoRenameInput {
regular_field: "hello".into(),
}
);
}
#[test]
fn test_doc_comment() {
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = DocComment::meta(&(), &mut registry);
assert_eq!(meta.description(), Some(&"Object comment.".to_string()));
}
#[test]
fn test_multi_doc_comment() {
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = MultiDocComment::meta(&(), &mut registry);
assert_eq!(
meta.description(),
Some(&"Doc 1. Doc 2.\n\nDoc 4.".to_string())
);
}
#[test]
fn test_doc_comment_override() {
let mut registry: Registry = Registry::new(FnvHashMap::default());
let meta = OverrideDocComment::meta(&(), &mut registry);
assert_eq!(meta.description(), Some(&"obj override".to_string())); | } | random_line_split | |
MessageQueueBackend.py | """
Message Queue wrapper
"""
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self):
"""
Initialization of the MessageQueueBackend
"""
super(MessageQueueBackend, self).__init__(None, JsonFormatter)
self.__queue = ''
def createHandler(self, parameters=None):
|
def setLevel(self, level):
"""
No possibility to set the level of the MessageQueue handler.
It is not set by default so it can send all Log Records of all levels to the MessageQueue.
"""
pass
| """
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
self.__queue = parameters.get("MsgQueue", self.__queue)
self._handler = MessageQueueHandler(self.__queue) | identifier_body |
MessageQueueBackend.py | """
Message Queue wrapper
"""
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self):
"""
Initialization of the MessageQueueBackend
"""
super(MessageQueueBackend, self).__init__(None, JsonFormatter)
self.__queue = ''
def createHandler(self, parameters=None):
""" | :params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
self.__queue = parameters.get("MsgQueue", self.__queue)
self._handler = MessageQueueHandler(self.__queue)
def setLevel(self, level):
"""
No possibility to set the level of the MessageQueue handler.
It is not set by default so it can send all Log Records of all levels to the MessageQueue.
"""
pass | Each backend can initialize its attributes and create its handler with them.
| random_line_split |
MessageQueueBackend.py | """
Message Queue wrapper
"""
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def | (self):
"""
Initialization of the MessageQueueBackend
"""
super(MessageQueueBackend, self).__init__(None, JsonFormatter)
self.__queue = ''
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
self.__queue = parameters.get("MsgQueue", self.__queue)
self._handler = MessageQueueHandler(self.__queue)
def setLevel(self, level):
"""
No possibility to set the level of the MessageQueue handler.
It is not set by default so it can send all Log Records of all levels to the MessageQueue.
"""
pass
| __init__ | identifier_name |
MessageQueueBackend.py | """
Message Queue wrapper
"""
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self):
"""
Initialization of the MessageQueueBackend
"""
super(MessageQueueBackend, self).__init__(None, JsonFormatter)
self.__queue = ''
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
|
self._handler = MessageQueueHandler(self.__queue)
def setLevel(self, level):
"""
No possibility to set the level of the MessageQueue handler.
It is not set by default so it can send all Log Records of all levels to the MessageQueue.
"""
pass
| self.__queue = parameters.get("MsgQueue", self.__queue) | conditional_block |
stdio.py | # Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Standard input/out/err support.
API Stability: semi-stable
Future Plans: support for stderr, perhaps
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
# system imports
import sys, os, select, errno
# Sibling Imports
import abstract, fdesc, protocol
from main import CONNECTION_LOST
_stdio_in_use = 0
class | (abstract.FileDescriptor):
connected = 1
ic = 0
def __init__(self):
abstract.FileDescriptor.__init__(self)
self.fileno = sys.__stdout__.fileno
fdesc.setNonBlocking(self.fileno())
def writeSomeData(self, data):
try:
return os.write(self.fileno(), data)
return rv
except IOError, io:
if io.args[0] == errno.EAGAIN:
return 0
elif io.args[0] == errno.EPERM:
return 0
return CONNECTION_LOST
except OSError, ose:
if ose.errno == errno.EPIPE:
return CONNECTION_LOST
if ose.errno == errno.EAGAIN:
return 0
raise
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
os.close(self.fileno())
class StandardIO(abstract.FileDescriptor):
"""I can connect Standard IO to a twisted.protocol
I act as a selectable for sys.stdin, and provide a write method that writes
to stdout.
"""
def __init__(self, protocol):
"""Create me with a protocol.
This will fail if a StandardIO has already been instantiated.
"""
abstract.FileDescriptor.__init__(self)
global _stdio_in_use
if _stdio_in_use:
raise RuntimeError, "Standard IO already in use."
_stdio_in_use = 1
self.fileno = sys.__stdin__.fileno
fdesc.setNonBlocking(self.fileno())
self.protocol = protocol
self.startReading()
self.writer = StandardIOWriter()
self.protocol.makeConnection(self)
def write(self, data):
"""Write some data to standard output.
"""
self.writer.write(data)
def doRead(self):
"""Some data's readable from standard input.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def closeStdin(self):
"""Close standard input.
"""
self.writer.loseConnection()
def connectionLost(self, reason):
"""The connection was lost.
"""
self.protocol.connectionLost()
| StandardIOWriter | identifier_name |
stdio.py | # Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Standard input/out/err support.
API Stability: semi-stable
Future Plans: support for stderr, perhaps
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
# system imports
import sys, os, select, errno
# Sibling Imports
import abstract, fdesc, protocol
from main import CONNECTION_LOST
_stdio_in_use = 0
class StandardIOWriter(abstract.FileDescriptor):
connected = 1
ic = 0
def __init__(self):
abstract.FileDescriptor.__init__(self)
self.fileno = sys.__stdout__.fileno
fdesc.setNonBlocking(self.fileno())
def writeSomeData(self, data):
try:
return os.write(self.fileno(), data)
return rv
except IOError, io:
if io.args[0] == errno.EAGAIN:
|
elif io.args[0] == errno.EPERM:
return 0
return CONNECTION_LOST
except OSError, ose:
if ose.errno == errno.EPIPE:
return CONNECTION_LOST
if ose.errno == errno.EAGAIN:
return 0
raise
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
os.close(self.fileno())
class StandardIO(abstract.FileDescriptor):
"""I can connect Standard IO to a twisted.protocol
I act as a selectable for sys.stdin, and provide a write method that writes
to stdout.
"""
def __init__(self, protocol):
"""Create me with a protocol.
This will fail if a StandardIO has already been instantiated.
"""
abstract.FileDescriptor.__init__(self)
global _stdio_in_use
if _stdio_in_use:
raise RuntimeError, "Standard IO already in use."
_stdio_in_use = 1
self.fileno = sys.__stdin__.fileno
fdesc.setNonBlocking(self.fileno())
self.protocol = protocol
self.startReading()
self.writer = StandardIOWriter()
self.protocol.makeConnection(self)
def write(self, data):
"""Write some data to standard output.
"""
self.writer.write(data)
def doRead(self):
"""Some data's readable from standard input.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def closeStdin(self):
"""Close standard input.
"""
self.writer.loseConnection()
def connectionLost(self, reason):
"""The connection was lost.
"""
self.protocol.connectionLost()
| return 0 | conditional_block |
stdio.py | # Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Standard input/out/err support.
API Stability: semi-stable
Future Plans: support for stderr, perhaps
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
# system imports
import sys, os, select, errno
# Sibling Imports
import abstract, fdesc, protocol
from main import CONNECTION_LOST
_stdio_in_use = 0
class StandardIOWriter(abstract.FileDescriptor):
connected = 1
ic = 0
def __init__(self):
abstract.FileDescriptor.__init__(self)
self.fileno = sys.__stdout__.fileno
fdesc.setNonBlocking(self.fileno())
def writeSomeData(self, data):
try:
return os.write(self.fileno(), data)
return rv
except IOError, io:
if io.args[0] == errno.EAGAIN:
return 0
elif io.args[0] == errno.EPERM:
return 0
return CONNECTION_LOST
except OSError, ose:
if ose.errno == errno.EPIPE:
return CONNECTION_LOST
if ose.errno == errno.EAGAIN:
return 0
raise
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
os.close(self.fileno())
class StandardIO(abstract.FileDescriptor):
"""I can connect Standard IO to a twisted.protocol
I act as a selectable for sys.stdin, and provide a write method that writes
to stdout.
"""
def __init__(self, protocol):
"""Create me with a protocol.
This will fail if a StandardIO has already been instantiated.
"""
abstract.FileDescriptor.__init__(self)
global _stdio_in_use
if _stdio_in_use:
raise RuntimeError, "Standard IO already in use."
_stdio_in_use = 1
self.fileno = sys.__stdin__.fileno
fdesc.setNonBlocking(self.fileno())
self.protocol = protocol
self.startReading()
self.writer = StandardIOWriter()
self.protocol.makeConnection(self)
def write(self, data):
"""Write some data to standard output. |
def doRead(self):
"""Some data's readable from standard input.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def closeStdin(self):
"""Close standard input.
"""
self.writer.loseConnection()
def connectionLost(self, reason):
"""The connection was lost.
"""
self.protocol.connectionLost() | """
self.writer.write(data) | random_line_split |
stdio.py | # Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Standard input/out/err support.
API Stability: semi-stable
Future Plans: support for stderr, perhaps
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
# system imports
import sys, os, select, errno
# Sibling Imports
import abstract, fdesc, protocol
from main import CONNECTION_LOST
_stdio_in_use = 0
class StandardIOWriter(abstract.FileDescriptor):
connected = 1
ic = 0
def __init__(self):
|
def writeSomeData(self, data):
try:
return os.write(self.fileno(), data)
return rv
except IOError, io:
if io.args[0] == errno.EAGAIN:
return 0
elif io.args[0] == errno.EPERM:
return 0
return CONNECTION_LOST
except OSError, ose:
if ose.errno == errno.EPIPE:
return CONNECTION_LOST
if ose.errno == errno.EAGAIN:
return 0
raise
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
os.close(self.fileno())
class StandardIO(abstract.FileDescriptor):
"""I can connect Standard IO to a twisted.protocol
I act as a selectable for sys.stdin, and provide a write method that writes
to stdout.
"""
def __init__(self, protocol):
"""Create me with a protocol.
This will fail if a StandardIO has already been instantiated.
"""
abstract.FileDescriptor.__init__(self)
global _stdio_in_use
if _stdio_in_use:
raise RuntimeError, "Standard IO already in use."
_stdio_in_use = 1
self.fileno = sys.__stdin__.fileno
fdesc.setNonBlocking(self.fileno())
self.protocol = protocol
self.startReading()
self.writer = StandardIOWriter()
self.protocol.makeConnection(self)
def write(self, data):
"""Write some data to standard output.
"""
self.writer.write(data)
def doRead(self):
"""Some data's readable from standard input.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def closeStdin(self):
"""Close standard input.
"""
self.writer.loseConnection()
def connectionLost(self, reason):
"""The connection was lost.
"""
self.protocol.connectionLost()
| abstract.FileDescriptor.__init__(self)
self.fileno = sys.__stdout__.fileno
fdesc.setNonBlocking(self.fileno()) | identifier_body |
BaseAssignmentDataList.js | define([
'underscore',
'shared/views/BaseView',
'shared/views/datalist/DeadSimpleDataList'
], function (_, BaseView, DeadSimpleDataList) {
var DoneButton = BaseView.extend({
className: 'simplemodal-button-bar o-form-button-bar',
template: '<input type="button" data-type="close" \
class="button button-primary js-done-button" \
value="{{i18n code="simplemodal.done"}}">',
events: {
'click .js-done-button': 'doneAssignment'
},
doneAssignment: function () {
this.state.trigger('doneAssignment');
}
});
return DeadSimpleDataList.extend({
resizeSelector: '.data-list-content-wrap',
toolbar: [
{
type: 'search',
align: 'left',
minChars: 1,
field: 'unassignedSearchQuery'
}
],
collectionEvents: {
sync: 'balance',
itemAssigned: 'itemAssigned'
},
postRender: function () {
this.add(DoneButton);
},
itemAssigned: function (assignmentId) {
var assignment = this.collection.findWhere({id: assignmentId});
// safe check to avoid nullpoint exception.
if (assignment) |
},
balance: _.debounce(function () {
DeadSimpleDataList.prototype.balance.apply(this, arguments);
this.trigger('resize');
}, this.debounceTime)
});
});
| {
assignment.set('__isAssigned__', true);
} | conditional_block |
BaseAssignmentDataList.js | define([
'underscore',
'shared/views/BaseView',
'shared/views/datalist/DeadSimpleDataList'
], function (_, BaseView, DeadSimpleDataList) {
var DoneButton = BaseView.extend({
className: 'simplemodal-button-bar o-form-button-bar',
template: '<input type="button" data-type="close" \
class="button button-primary js-done-button" \
value="{{i18n code="simplemodal.done"}}">',
events: {
'click .js-done-button': 'doneAssignment'
},
doneAssignment: function () {
this.state.trigger('doneAssignment');
}
});
return DeadSimpleDataList.extend({
resizeSelector: '.data-list-content-wrap',
toolbar: [
{
type: 'search',
align: 'left',
minChars: 1,
field: 'unassignedSearchQuery'
}
],
collectionEvents: {
sync: 'balance',
itemAssigned: 'itemAssigned'
},
postRender: function () {
this.add(DoneButton);
},
itemAssigned: function (assignmentId) {
var assignment = this.collection.findWhere({id: assignmentId});
// safe check to avoid nullpoint exception.
if (assignment) { | },
balance: _.debounce(function () {
DeadSimpleDataList.prototype.balance.apply(this, arguments);
this.trigger('resize');
}, this.debounceTime)
});
}); | assignment.set('__isAssigned__', true);
} | random_line_split |
builtin-superkinds-self-type.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests (negatively) the ability for the Self type in default methods
// to use capabilities granted by builtin kinds as supertraits.
use std::sync::mpsc::{channel, Sender};
trait Foo : Sync+'static {
fn foo(self, mut chan: Sender<Self>) |
}
impl <T: Sync> Foo for T { }
//~^ ERROR the parameter type `T` may not live long enough
fn main() {
let (tx, rx) = channel();
1193182is.foo(tx);
assert!(rx.recv() == 1193182is);
}
| { } | identifier_body |
builtin-superkinds-self-type.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests (negatively) the ability for the Self type in default methods
// to use capabilities granted by builtin kinds as supertraits.
use std::sync::mpsc::{channel, Sender};
trait Foo : Sync+'static {
fn foo(self, mut chan: Sender<Self>) { }
}
impl <T: Sync> Foo for T { }
//~^ ERROR the parameter type `T` may not live long enough
fn main() {
let (tx, rx) = channel();
1193182is.foo(tx);
assert!(rx.recv() == 1193182is);
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | random_line_split |
builtin-superkinds-self-type.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests (negatively) the ability for the Self type in default methods
// to use capabilities granted by builtin kinds as supertraits.
use std::sync::mpsc::{channel, Sender};
trait Foo : Sync+'static {
fn | (self, mut chan: Sender<Self>) { }
}
impl <T: Sync> Foo for T { }
//~^ ERROR the parameter type `T` may not live long enough
fn main() {
let (tx, rx) = channel();
1193182is.foo(tx);
assert!(rx.recv() == 1193182is);
}
| foo | identifier_name |
Searchbar.ts | /// <reference path="../launchpad/notice/notice.ts" />
/// <reference path="../basecontroller.ts" />
module sap.sbo.ng4c.header {
import BaseController = sap.sbo.ng4c.BaseController;
import Notice = sap.sbo.ng4c.launchpad.notice.Notice;
export interface SearchbarScope extends Scope {
meta: SearchbarMeta;
extendSearch: Function;
}
export interface SearchbarMeta {
placeholder: string;
search: boolean;
}
export class Searchbar extends BaseController {
private scope: SearchbarScope;
public co | scope: Scope, $element: JQuery, $attrs: ng.IAttributes) {
super($scope, $element, $attrs, "sap.sbo.ng4c.header.Searchbar");
this.scope = <SearchbarScope>this.$scope;
this.scope.meta = { placeholder: "Look up data", search: false };
this.scope.extendSearch = $.proxy(this.extendSearch, this);
this.scope.$on("bodyClickBroadcast", $.proxy(this.onClickOutside, this));
}
private extendSearch(event: ng.IAngularEvent): void {
this.scope.meta.search = true;
}
private onClickOutside(event: ng.IAngularEvent, clickEvent: JQueryEventObject): void {
var target: JQuery = $(clickEvent.target);
if (target.parents("." + this.scope.className).length === 0) {
this.scope.meta.search = false;
}
}
}
} | nstructor($ | identifier_name |
Searchbar.ts | /// <reference path="../launchpad/notice/notice.ts" />
/// <reference path="../basecontroller.ts" />
module sap.sbo.ng4c.header {
import BaseController = sap.sbo.ng4c.BaseController;
import Notice = sap.sbo.ng4c.launchpad.notice.Notice;
export interface SearchbarScope extends Scope {
meta: SearchbarMeta;
extendSearch: Function;
}
export interface SearchbarMeta {
placeholder: string;
search: boolean; |
export class Searchbar extends BaseController {
private scope: SearchbarScope;
public constructor($scope: Scope, $element: JQuery, $attrs: ng.IAttributes) {
super($scope, $element, $attrs, "sap.sbo.ng4c.header.Searchbar");
this.scope = <SearchbarScope>this.$scope;
this.scope.meta = { placeholder: "Look up data", search: false };
this.scope.extendSearch = $.proxy(this.extendSearch, this);
this.scope.$on("bodyClickBroadcast", $.proxy(this.onClickOutside, this));
}
private extendSearch(event: ng.IAngularEvent): void {
this.scope.meta.search = true;
}
private onClickOutside(event: ng.IAngularEvent, clickEvent: JQueryEventObject): void {
var target: JQuery = $(clickEvent.target);
if (target.parents("." + this.scope.className).length === 0) {
this.scope.meta.search = false;
}
}
}
} | } | random_line_split |
Searchbar.ts | /// <reference path="../launchpad/notice/notice.ts" />
/// <reference path="../basecontroller.ts" />
module sap.sbo.ng4c.header {
import BaseController = sap.sbo.ng4c.BaseController;
import Notice = sap.sbo.ng4c.launchpad.notice.Notice;
export interface SearchbarScope extends Scope {
meta: SearchbarMeta;
extendSearch: Function;
}
export interface SearchbarMeta {
placeholder: string;
search: boolean;
}
export class Searchbar extends BaseController {
private scope: SearchbarScope;
public constructor($scope: Scope, $element: JQuery, $attrs: ng.IAttributes) {
super($scope, $element, $attrs, "sap.sbo.ng4c.header.Searchbar");
this.scope = <SearchbarScope>this.$scope;
this.scope.meta = { placeholder: "Look up data", search: false };
this.scope.extendSearch = $.proxy(this.extendSearch, this);
this.scope.$on("bodyClickBroadcast", $.proxy(this.onClickOutside, this));
}
private extendSearch(event: ng.IAngularEvent): void {
this.scope.meta.search = true;
}
private onClickOutside(event: ng.IAngularEvent, clickEvent: JQueryEventObject): void {
var target: JQuery = $(clickEvent.target);
if (target.parents("." + this.scope.className).length === 0) {
| }
}
} | this.scope.meta.search = false;
}
| conditional_block |
DefaultFormatter.ts | import { AbstractFormatter } from './AbstractFormatter';
import * as vscode from 'vscode';
import { FileCoverage } from 'istanbul-lib-coverage';
import { isValidLocation } from './helpers';
const uncoveredBranch = vscode.window.createTextEditorDecorationType({
backgroundColor: 'rgba(216,134,123,0.4)',
overviewRulerColor: 'rgba(216,134,123,0.8)',
overviewRulerLane: vscode.OverviewRulerLane.Left,
});
const uncoveredLine = vscode.window.createTextEditorDecorationType({
isWholeLine: true,
backgroundColor: 'rgba(216,134,123,0.4)',
overviewRulerColor: 'rgba(216,134,123,0.8)',
overviewRulerLane: vscode.OverviewRulerLane.Left,
});
export class DefaultFormatter extends AbstractFormatter {
format(editor: vscode.TextEditor) {
const fileCoverage = this.coverageMapProvider.getFileCoverage(editor.document.fileName);
if (!fileCoverage) {
return;
}
this.formatBranches(editor, fileCoverage);
this.formatUncoveredLines(editor, fileCoverage);
}
formatBranches(editor: vscode.TextEditor, fileCoverage: FileCoverage) {
const ranges = [];
Object.keys(fileCoverage.b).forEach((branchIndex) => {
fileCoverage.b[branchIndex].forEach((hitCount, locationIndex) => {
if (hitCount > 0) {
return;
}
const branch = fileCoverage.branchMap[branchIndex].locations[locationIndex];
if (!isValidLocation(branch)) {
return;
}
// If the value is `null`, then set it to the first character on its
// line.
const endColumn = branch.end.column || 0;
ranges.push(
new vscode.Range(
branch.start.line - 1,
branch.start.column,
branch.end.line - 1,
endColumn
)
);
});
});
editor.setDecorations(uncoveredBranch, ranges);
}
formatUncoveredLines(editor: vscode.TextEditor, fileCoverage: FileCoverage) {
const lines = fileCoverage.getUncoveredLines();
const ranges = [];
for (const oneBasedLineNumber of lines) {
const zeroBasedLineNumber = Number(oneBasedLineNumber) - 1;
ranges.push(new vscode.Range(zeroBasedLineNumber, 0, zeroBasedLineNumber, 0));
}
editor.setDecorations(uncoveredLine, ranges);
}
| (editor: vscode.TextEditor) {
editor.setDecorations(uncoveredLine, []);
editor.setDecorations(uncoveredBranch, []);
}
}
| clear | identifier_name |
DefaultFormatter.ts | import { AbstractFormatter } from './AbstractFormatter';
import * as vscode from 'vscode';
import { FileCoverage } from 'istanbul-lib-coverage';
import { isValidLocation } from './helpers';
const uncoveredBranch = vscode.window.createTextEditorDecorationType({
backgroundColor: 'rgba(216,134,123,0.4)',
overviewRulerColor: 'rgba(216,134,123,0.8)',
overviewRulerLane: vscode.OverviewRulerLane.Left,
});
const uncoveredLine = vscode.window.createTextEditorDecorationType({
isWholeLine: true,
backgroundColor: 'rgba(216,134,123,0.4)',
overviewRulerColor: 'rgba(216,134,123,0.8)',
overviewRulerLane: vscode.OverviewRulerLane.Left,
});
export class DefaultFormatter extends AbstractFormatter {
format(editor: vscode.TextEditor) {
const fileCoverage = this.coverageMapProvider.getFileCoverage(editor.document.fileName);
if (!fileCoverage) |
this.formatBranches(editor, fileCoverage);
this.formatUncoveredLines(editor, fileCoverage);
}
formatBranches(editor: vscode.TextEditor, fileCoverage: FileCoverage) {
const ranges = [];
Object.keys(fileCoverage.b).forEach((branchIndex) => {
fileCoverage.b[branchIndex].forEach((hitCount, locationIndex) => {
if (hitCount > 0) {
return;
}
const branch = fileCoverage.branchMap[branchIndex].locations[locationIndex];
if (!isValidLocation(branch)) {
return;
}
// If the value is `null`, then set it to the first character on its
// line.
const endColumn = branch.end.column || 0;
ranges.push(
new vscode.Range(
branch.start.line - 1,
branch.start.column,
branch.end.line - 1,
endColumn
)
);
});
});
editor.setDecorations(uncoveredBranch, ranges);
}
formatUncoveredLines(editor: vscode.TextEditor, fileCoverage: FileCoverage) {
const lines = fileCoverage.getUncoveredLines();
const ranges = [];
for (const oneBasedLineNumber of lines) {
const zeroBasedLineNumber = Number(oneBasedLineNumber) - 1;
ranges.push(new vscode.Range(zeroBasedLineNumber, 0, zeroBasedLineNumber, 0));
}
editor.setDecorations(uncoveredLine, ranges);
}
clear(editor: vscode.TextEditor) {
editor.setDecorations(uncoveredLine, []);
editor.setDecorations(uncoveredBranch, []);
}
}
| {
return;
} | conditional_block |
DefaultFormatter.ts | import { AbstractFormatter } from './AbstractFormatter';
import * as vscode from 'vscode';
import { FileCoverage } from 'istanbul-lib-coverage';
import { isValidLocation } from './helpers';
const uncoveredBranch = vscode.window.createTextEditorDecorationType({
backgroundColor: 'rgba(216,134,123,0.4)',
overviewRulerColor: 'rgba(216,134,123,0.8)',
overviewRulerLane: vscode.OverviewRulerLane.Left,
});
const uncoveredLine = vscode.window.createTextEditorDecorationType({
isWholeLine: true,
backgroundColor: 'rgba(216,134,123,0.4)',
overviewRulerColor: 'rgba(216,134,123,0.8)',
overviewRulerLane: vscode.OverviewRulerLane.Left,
});
export class DefaultFormatter extends AbstractFormatter {
format(editor: vscode.TextEditor) {
const fileCoverage = this.coverageMapProvider.getFileCoverage(editor.document.fileName);
if (!fileCoverage) {
return;
}
this.formatBranches(editor, fileCoverage);
this.formatUncoveredLines(editor, fileCoverage);
}
formatBranches(editor: vscode.TextEditor, fileCoverage: FileCoverage) {
const ranges = [];
Object.keys(fileCoverage.b).forEach((branchIndex) => {
fileCoverage.b[branchIndex].forEach((hitCount, locationIndex) => {
if (hitCount > 0) {
return;
}
const branch = fileCoverage.branchMap[branchIndex].locations[locationIndex];
if (!isValidLocation(branch)) {
return;
}
// If the value is `null`, then set it to the first character on its
// line. | branch.start.line - 1,
branch.start.column,
branch.end.line - 1,
endColumn
)
);
});
});
editor.setDecorations(uncoveredBranch, ranges);
}
formatUncoveredLines(editor: vscode.TextEditor, fileCoverage: FileCoverage) {
const lines = fileCoverage.getUncoveredLines();
const ranges = [];
for (const oneBasedLineNumber of lines) {
const zeroBasedLineNumber = Number(oneBasedLineNumber) - 1;
ranges.push(new vscode.Range(zeroBasedLineNumber, 0, zeroBasedLineNumber, 0));
}
editor.setDecorations(uncoveredLine, ranges);
}
clear(editor: vscode.TextEditor) {
editor.setDecorations(uncoveredLine, []);
editor.setDecorations(uncoveredBranch, []);
}
} | const endColumn = branch.end.column || 0;
ranges.push(
new vscode.Range( | random_line_split |
__init__.py | import platform
import glob
from .io import DxlIO, Dxl320IO, DxlError
from .error import BaseErrorHandler
from .controller import BaseDxlController
from .motor import DxlMXMotor, DxlAXRXMotor, DxlXL320Motor
from ..robot import Robot
def _get_available_ports():
""" Tries to find the available usb2serial port on your system. """
if platform.system() == 'Darwin':
return glob.glob('/dev/tty.usb*')
elif platform.system() == 'Linux':
return glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*')
elif platform.system() == 'Windows':
import _winreg
import itertools
ports = []
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
for i in itertools.count():
try:
ports.append(str(_winreg.EnumValue(key, i)[1]))
except WindowsError:
return ports
return []
def get_available_ports(only_free=False):
ports = _get_available_ports()
if only_free:
ports = list(set(ports) - set(DxlIO.get_used_ports()))
return ports
def find_port(ids, strict=True):
|
def autodetect_robot():
""" Creates a :class:`~pypot.robot.robot.Robot` by detecting dynamixel motors on all available ports. """
motor_controllers = []
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
dxl_io = DxlIOCls(port)
ids = dxl_io.scan()
if not ids:
dxl_io.close()
continue
models = dxl_io.get_model(ids)
motorcls = {
'MX': DxlMXMotor,
'RX': DxlAXRXMotor,
'AX': DxlAXRXMotor,
'XL': DxlXL320Motor
}
motors = [motorcls[model[:2]](id, model=model)
for id, model in zip(ids, models)]
c = BaseDxlController(dxl_io, motors)
motor_controllers.append(c)
return Robot(motor_controllers)
| """ Find the port with the specified attached motor ids.
:param list ids: list of motor ids to find
:param bool strict: specify if all ids should be find (when set to False, only half motor must be found)
.. warning:: If two (or more) ports are attached to the same list of motor ids the first match will be returned.
"""
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
try:
with DxlIOCls(port) as dxl:
founds = len(dxl.scan(ids))
if strict and founds == len(ids):
return port
if not strict and founds >= len(ids) / 2:
return port
except DxlError:
continue
raise IndexError('No suitable port found for ids {}!'.format(ids)) | identifier_body |
__init__.py | import platform
import glob
from .io import DxlIO, Dxl320IO, DxlError
from .error import BaseErrorHandler
from .controller import BaseDxlController
from .motor import DxlMXMotor, DxlAXRXMotor, DxlXL320Motor
from ..robot import Robot
def _get_available_ports():
""" Tries to find the available usb2serial port on your system. """
if platform.system() == 'Darwin':
return glob.glob('/dev/tty.usb*')
elif platform.system() == 'Linux':
return glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*')
elif platform.system() == 'Windows':
import _winreg
import itertools
ports = []
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
for i in itertools.count():
try:
ports.append(str(_winreg.EnumValue(key, i)[1]))
except WindowsError:
return ports
return []
def get_available_ports(only_free=False):
ports = _get_available_ports()
if only_free:
ports = list(set(ports) - set(DxlIO.get_used_ports()))
return ports
def find_port(ids, strict=True):
""" Find the port with the specified attached motor ids.
:param list ids: list of motor ids to find
:param bool strict: specify if all ids should be find (when set to False, only half motor must be found)
.. warning:: If two (or more) ports are attached to the same list of motor ids the first match will be returned.
"""
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
try:
with DxlIOCls(port) as dxl:
founds = len(dxl.scan(ids))
if strict and founds == len(ids):
return port
| raise IndexError('No suitable port found for ids {}!'.format(ids))
def autodetect_robot():
""" Creates a :class:`~pypot.robot.robot.Robot` by detecting dynamixel motors on all available ports. """
motor_controllers = []
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
dxl_io = DxlIOCls(port)
ids = dxl_io.scan()
if not ids:
dxl_io.close()
continue
models = dxl_io.get_model(ids)
motorcls = {
'MX': DxlMXMotor,
'RX': DxlAXRXMotor,
'AX': DxlAXRXMotor,
'XL': DxlXL320Motor
}
motors = [motorcls[model[:2]](id, model=model)
for id, model in zip(ids, models)]
c = BaseDxlController(dxl_io, motors)
motor_controllers.append(c)
return Robot(motor_controllers) | if not strict and founds >= len(ids) / 2:
return port
except DxlError:
continue
| random_line_split |
__init__.py | import platform
import glob
from .io import DxlIO, Dxl320IO, DxlError
from .error import BaseErrorHandler
from .controller import BaseDxlController
from .motor import DxlMXMotor, DxlAXRXMotor, DxlXL320Motor
from ..robot import Robot
def _get_available_ports():
""" Tries to find the available usb2serial port on your system. """
if platform.system() == 'Darwin':
return glob.glob('/dev/tty.usb*')
elif platform.system() == 'Linux':
return glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*')
elif platform.system() == 'Windows':
import _winreg
import itertools
ports = []
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
for i in itertools.count():
|
return []
def get_available_ports(only_free=False):
ports = _get_available_ports()
if only_free:
ports = list(set(ports) - set(DxlIO.get_used_ports()))
return ports
def find_port(ids, strict=True):
""" Find the port with the specified attached motor ids.
:param list ids: list of motor ids to find
:param bool strict: specify if all ids should be find (when set to False, only half motor must be found)
.. warning:: If two (or more) ports are attached to the same list of motor ids the first match will be returned.
"""
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
try:
with DxlIOCls(port) as dxl:
founds = len(dxl.scan(ids))
if strict and founds == len(ids):
return port
if not strict and founds >= len(ids) / 2:
return port
except DxlError:
continue
raise IndexError('No suitable port found for ids {}!'.format(ids))
def autodetect_robot():
""" Creates a :class:`~pypot.robot.robot.Robot` by detecting dynamixel motors on all available ports. """
motor_controllers = []
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
dxl_io = DxlIOCls(port)
ids = dxl_io.scan()
if not ids:
dxl_io.close()
continue
models = dxl_io.get_model(ids)
motorcls = {
'MX': DxlMXMotor,
'RX': DxlAXRXMotor,
'AX': DxlAXRXMotor,
'XL': DxlXL320Motor
}
motors = [motorcls[model[:2]](id, model=model)
for id, model in zip(ids, models)]
c = BaseDxlController(dxl_io, motors)
motor_controllers.append(c)
return Robot(motor_controllers)
| try:
ports.append(str(_winreg.EnumValue(key, i)[1]))
except WindowsError:
return ports | conditional_block |
__init__.py | import platform
import glob
from .io import DxlIO, Dxl320IO, DxlError
from .error import BaseErrorHandler
from .controller import BaseDxlController
from .motor import DxlMXMotor, DxlAXRXMotor, DxlXL320Motor
from ..robot import Robot
def _get_available_ports():
""" Tries to find the available usb2serial port on your system. """
if platform.system() == 'Darwin':
return glob.glob('/dev/tty.usb*')
elif platform.system() == 'Linux':
return glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*')
elif platform.system() == 'Windows':
import _winreg
import itertools
ports = []
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
for i in itertools.count():
try:
ports.append(str(_winreg.EnumValue(key, i)[1]))
except WindowsError:
return ports
return []
def get_available_ports(only_free=False):
ports = _get_available_ports()
if only_free:
ports = list(set(ports) - set(DxlIO.get_used_ports()))
return ports
def find_port(ids, strict=True):
""" Find the port with the specified attached motor ids.
:param list ids: list of motor ids to find
:param bool strict: specify if all ids should be find (when set to False, only half motor must be found)
.. warning:: If two (or more) ports are attached to the same list of motor ids the first match will be returned.
"""
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
try:
with DxlIOCls(port) as dxl:
founds = len(dxl.scan(ids))
if strict and founds == len(ids):
return port
if not strict and founds >= len(ids) / 2:
return port
except DxlError:
continue
raise IndexError('No suitable port found for ids {}!'.format(ids))
def | ():
""" Creates a :class:`~pypot.robot.robot.Robot` by detecting dynamixel motors on all available ports. """
motor_controllers = []
for port in get_available_ports():
for DxlIOCls in (DxlIO, Dxl320IO):
dxl_io = DxlIOCls(port)
ids = dxl_io.scan()
if not ids:
dxl_io.close()
continue
models = dxl_io.get_model(ids)
motorcls = {
'MX': DxlMXMotor,
'RX': DxlAXRXMotor,
'AX': DxlAXRXMotor,
'XL': DxlXL320Motor
}
motors = [motorcls[model[:2]](id, model=model)
for id, model in zip(ids, models)]
c = BaseDxlController(dxl_io, motors)
motor_controllers.append(c)
return Robot(motor_controllers)
| autodetect_robot | identifier_name |
file_to_gcs.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class FileToGoogleCloudStorageOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage
:param src: Path to the local file
:type src: string
:param dst: Destination path within the specified bucket
:type dst: string
:param bucket: The bucket to upload to
:type bucket: string
:param google_cloud_storage_conn_id: The Airflow connection ID to upload with
:type google_cloud_storage_conn_id: string
:param mime_type: The mime-type string
:type mime_type: string
:param delegate_to: The account to impersonate, if any
:type delegate_to: string
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
google_cloud_storage_conn_id='google_cloud_storage_default',
mime_type='application/octet-stream',
delegate_to=None,
*args,
**kwargs):
super(FileToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.src = src
self.dst = dst
self.bucket = bucket
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
def | (self, context):
"""
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket=self.bucket,
object=self.dst,
mime_type=self.mime_type,
filename=self.src)
| execute | identifier_name |
file_to_gcs.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and | from airflow.utils.decorators import apply_defaults
class FileToGoogleCloudStorageOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage
:param src: Path to the local file
:type src: string
:param dst: Destination path within the specified bucket
:type dst: string
:param bucket: The bucket to upload to
:type bucket: string
:param google_cloud_storage_conn_id: The Airflow connection ID to upload with
:type google_cloud_storage_conn_id: string
:param mime_type: The mime-type string
:type mime_type: string
:param delegate_to: The account to impersonate, if any
:type delegate_to: string
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
google_cloud_storage_conn_id='google_cloud_storage_default',
mime_type='application/octet-stream',
delegate_to=None,
*args,
**kwargs):
super(FileToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.src = src
self.dst = dst
self.bucket = bucket
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
def execute(self, context):
"""
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket=self.bucket,
object=self.dst,
mime_type=self.mime_type,
filename=self.src) | # limitations under the License.
#
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator | random_line_split |
file_to_gcs.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class FileToGoogleCloudStorageOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage
:param src: Path to the local file
:type src: string
:param dst: Destination path within the specified bucket
:type dst: string
:param bucket: The bucket to upload to
:type bucket: string
:param google_cloud_storage_conn_id: The Airflow connection ID to upload with
:type google_cloud_storage_conn_id: string
:param mime_type: The mime-type string
:type mime_type: string
:param delegate_to: The account to impersonate, if any
:type delegate_to: string
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
google_cloud_storage_conn_id='google_cloud_storage_default',
mime_type='application/octet-stream',
delegate_to=None,
*args,
**kwargs):
super(FileToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.src = src
self.dst = dst
self.bucket = bucket
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
def execute(self, context):
| """
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket=self.bucket,
object=self.dst,
mime_type=self.mime_type,
filename=self.src) | identifier_body | |
SimplePrompt.tsx | import * as React from 'react';
import '../App.css';
import * as jsutil from '../../../shared/src/util/jsutil';
import { SimplePromptModel } from '../models/SimplePromptModel';
import {
Modal,
ModalHeader,
ModalBody,
ModalTitle,
ModalFooter,
Button,
FormGroup,
FormControl,
ControlLabel,
HelpBlock
} from 'react-bootstrap';
interface SimplePromptState {
currentValue: string;
}
export class SimplePrompt extends React.Component<SimplePromptModel, SimplePromptState>{
constructor(props:SimplePromptModel){
super(props);
this.state={currentValue:''};
}
onClickSubmit() {
this.props.okCallback(this.state.currentValue);
}
onClickCancel() {
this.props.cancelCallback();
}
onTextChange(value: string) {
this.setState({ currentValue: value });
}
render() {
// SPROMPT : add some markup for validation message
return (
<Modal show={this.props.isActive} onHide={() => this.onClickCancel()}>
<ModalHeader closeButton>
<ModalTitle>{this.props.title}</ModalTitle>
</ModalHeader>
<ModalBody>
<label className='control-label' htmlFor='inputControl'>{this.props.prompt}</label>
<input type='text'
className='form-control'
value={this.state.currentValue}
onChange={(ev) => this.onTextChange(ev.target.value)}
/>
</ModalBody>
<ModalFooter>
<Button onClick={() => this.onClickSubmit()}>OK</Button>
</ModalFooter>
</Modal >
);
{/*<div id="promptModal" className="modal fade" tabIndex={-1} role="dialog">
<div className="modal-dialog" role="document">
<div className="modal-content">
<div className="modal-header">
<button type="button" className="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
<h4 className="modal-title">Modal title</h4>
</div>
<div className="modal-body">
<label className='control-label' htmlFor='inputControl'>YOURPROMPT</label>
<input type='text' className='form-control' id='inputControl' placeholder='YOURPLACEHOLDER' />
</div>
<div className="modal-footer"> | </div>*/}
}
} | <button type="button" className="btn btn-default" data-dismiss="modal">Close</button>
<button type="button" className="btn btn-primary">Save changes</button>
</div>
</div>
</div> | random_line_split |
SimplePrompt.tsx | import * as React from 'react';
import '../App.css';
import * as jsutil from '../../../shared/src/util/jsutil';
import { SimplePromptModel } from '../models/SimplePromptModel';
import {
Modal,
ModalHeader,
ModalBody,
ModalTitle,
ModalFooter,
Button,
FormGroup,
FormControl,
ControlLabel,
HelpBlock
} from 'react-bootstrap';
interface SimplePromptState {
currentValue: string;
}
export class SimplePrompt extends React.Component<SimplePromptModel, SimplePromptState>{
constructor(props:SimplePromptModel){
super(props);
this.state={currentValue:''};
}
| () {
this.props.okCallback(this.state.currentValue);
}
onClickCancel() {
this.props.cancelCallback();
}
onTextChange(value: string) {
this.setState({ currentValue: value });
}
render() {
// SPROMPT : add some markup for validation message
return (
<Modal show={this.props.isActive} onHide={() => this.onClickCancel()}>
<ModalHeader closeButton>
<ModalTitle>{this.props.title}</ModalTitle>
</ModalHeader>
<ModalBody>
<label className='control-label' htmlFor='inputControl'>{this.props.prompt}</label>
<input type='text'
className='form-control'
value={this.state.currentValue}
onChange={(ev) => this.onTextChange(ev.target.value)}
/>
</ModalBody>
<ModalFooter>
<Button onClick={() => this.onClickSubmit()}>OK</Button>
</ModalFooter>
</Modal >
);
{/*<div id="promptModal" className="modal fade" tabIndex={-1} role="dialog">
<div className="modal-dialog" role="document">
<div className="modal-content">
<div className="modal-header">
<button type="button" className="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
<h4 className="modal-title">Modal title</h4>
</div>
<div className="modal-body">
<label className='control-label' htmlFor='inputControl'>YOURPROMPT</label>
<input type='text' className='form-control' id='inputControl' placeholder='YOURPLACEHOLDER' />
</div>
<div className="modal-footer">
<button type="button" className="btn btn-default" data-dismiss="modal">Close</button>
<button type="button" className="btn btn-primary">Save changes</button>
</div>
</div>
</div>
</div>*/}
}
} | onClickSubmit | identifier_name |
euler50.rs | pub fn main() {
let primes = || (2..).filter(|&n| (2..(n as f32).sqrt() as u32 + 1).all(|i| n % i != 0));
let isprime = |n: &u32| (2..(*n as f32).sqrt() as u32 + 1).all(|i| n % i != 0);
let mut sum = 0;
let mut primesums = Vec::new();
for p in primes() {
sum += p;
if sum > 1_000_000 {
break;
};
primesums.push(sum);
}
let mut max = 0;
for &n in &primesums {
for &&p in &primesums.iter().take_while(|&&p| p < n).collect::<Vec<_>>() {
let candidate = n - p;
if isprime(&candidate) && candidate > max |
}
}
println!("{}", max);
}
| {
max = candidate;
} | conditional_block |
euler50.rs | let isprime = |n: &u32| (2..(*n as f32).sqrt() as u32 + 1).all(|i| n % i != 0);
let mut sum = 0;
let mut primesums = Vec::new();
for p in primes() {
sum += p;
if sum > 1_000_000 {
break;
};
primesums.push(sum);
}
let mut max = 0;
for &n in &primesums {
for &&p in &primesums.iter().take_while(|&&p| p < n).collect::<Vec<_>>() {
let candidate = n - p;
if isprime(&candidate) && candidate > max {
max = candidate;
}
}
}
println!("{}", max);
} | pub fn main() {
let primes = || (2..).filter(|&n| (2..(n as f32).sqrt() as u32 + 1).all(|i| n % i != 0)); | random_line_split | |
euler50.rs | pub fn | () {
let primes = || (2..).filter(|&n| (2..(n as f32).sqrt() as u32 + 1).all(|i| n % i != 0));
let isprime = |n: &u32| (2..(*n as f32).sqrt() as u32 + 1).all(|i| n % i != 0);
let mut sum = 0;
let mut primesums = Vec::new();
for p in primes() {
sum += p;
if sum > 1_000_000 {
break;
};
primesums.push(sum);
}
let mut max = 0;
for &n in &primesums {
for &&p in &primesums.iter().take_while(|&&p| p < n).collect::<Vec<_>>() {
let candidate = n - p;
if isprime(&candidate) && candidate > max {
max = candidate;
}
}
}
println!("{}", max);
}
| main | identifier_name |
euler50.rs | pub fn main() | {
let primes = || (2..).filter(|&n| (2..(n as f32).sqrt() as u32 + 1).all(|i| n % i != 0));
let isprime = |n: &u32| (2..(*n as f32).sqrt() as u32 + 1).all(|i| n % i != 0);
let mut sum = 0;
let mut primesums = Vec::new();
for p in primes() {
sum += p;
if sum > 1_000_000 {
break;
};
primesums.push(sum);
}
let mut max = 0;
for &n in &primesums {
for &&p in &primesums.iter().take_while(|&&p| p < n).collect::<Vec<_>>() {
let candidate = n - p;
if isprime(&candidate) && candidate > max {
max = candidate;
}
}
}
println!("{}", max);
} | identifier_body | |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "style_traits"]
#![crate_type = "rlib"]
#![deny(unsafe_code, missing_docs)]
extern crate app_units;
#[macro_use] extern crate bitflags;
#[macro_use] extern crate cssparser;
extern crate euclid;
extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate selectors;
#[cfg(feature = "servo")] #[macro_use] extern crate serde;
#[cfg(feature = "servo")] extern crate webrender_api;
extern crate servo_arc;
#[cfg(feature = "servo")] extern crate servo_atoms;
#[cfg(feature = "servo")] extern crate servo_url;
#[cfg(feature = "servo")] pub use webrender_api::DevicePixel;
use cssparser::{CowRcStr, Token};
use selectors::parser::SelectorParseErrorKind;
#[cfg(feature = "servo")] use servo_atoms::Atom;
/// One hardware pixel.
///
/// This unit corresponds to the smallest addressable element of the display hardware.
#[cfg(not(feature = "servo"))]
#[derive(Clone, Copy, Debug)]
pub enum DevicePixel {}
/// Represents a mobile style pinch zoom factor.
/// TODO(gw): Once WR supports pinch zoom, use a type directly from webrender_api.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize, MallocSizeOf))]
pub struct PinchZoomFactor(f32);
impl PinchZoomFactor {
/// Construct a new pinch zoom factor.
pub fn new(scale: f32) -> PinchZoomFactor {
PinchZoomFactor(scale)
}
/// Get the pinch zoom factor as an untyped float.
pub fn get(&self) -> f32 {
self.0
}
}
/// One CSS "px" in the coordinate system of the "initial viewport":
/// <http://www.w3.org/TR/css-device-adapt/#initial-viewport>
///
/// `CSSPixel` is equal to `DeviceIndependentPixel` times a "page zoom" factor controlled by the user. This is
/// the desktop-style "full page" zoom that enlarges content but then reflows the layout viewport
/// so it still exactly fits the visible area.
///
/// At the default zoom level of 100%, one `CSSPixel` is equal to one `DeviceIndependentPixel`. However, if the
/// document is zoomed in or out then this scale may be larger or smaller.
#[derive(Clone, Copy, Debug)]
pub enum CSSPixel {}
// In summary, the hierarchy of pixel units and the factors to convert from one to the next:
//
// DevicePixel
// / hidpi_ratio => DeviceIndependentPixel
// / desktop_zoom => CSSPixel
pub mod cursor;
pub mod specified_value_info;
#[macro_use]
pub mod values;
#[macro_use]
pub mod viewport;
pub use specified_value_info::{CssType, KeywordsCollectFn, SpecifiedValueInfo};
pub use values::{Comma, CommaWithSpace, CssWriter, OneOrMoreSeparated, Separator, Space, ToCss};
/// The error type for all CSS parsing routines.
pub type ParseError<'i> = cssparser::ParseError<'i, StyleParseErrorKind<'i>>;
/// Error in property value parsing
pub type ValueParseError<'i> = cssparser::ParseError<'i, ValueParseErrorKind<'i>>;
#[derive(Clone, Debug, PartialEq)]
/// Errors that can be encountered while parsing CSS values.
pub enum StyleParseErrorKind<'i> {
/// A bad URL token in a DVB.
BadUrlInDeclarationValueBlock(CowRcStr<'i>),
/// A bad string token in a DVB.
BadStringInDeclarationValueBlock(CowRcStr<'i>),
/// Unexpected closing parenthesis in a DVB.
UnbalancedCloseParenthesisInDeclarationValueBlock,
/// Unexpected closing bracket in a DVB.
UnbalancedCloseSquareBracketInDeclarationValueBlock,
/// Unexpected closing curly bracket in a DVB.
UnbalancedCloseCurlyBracketInDeclarationValueBlock,
/// A property declaration value had input remaining after successfully parsing.
PropertyDeclarationValueNotExhausted,
/// An unexpected dimension token was encountered.
UnexpectedDimension(CowRcStr<'i>),
/// Expected identifier not found.
ExpectedIdentifier(Token<'i>),
/// Missing or invalid media feature name.
MediaQueryExpectedFeatureName(CowRcStr<'i>),
/// Missing or invalid media feature value.
MediaQueryExpectedFeatureValue,
/// min- or max- properties must have a value.
RangedExpressionWithNoValue,
/// A function was encountered that was not expected.
UnexpectedFunction(CowRcStr<'i>),
/// @namespace must be before any rule but @charset and @import
UnexpectedNamespaceRule,
/// @import must be before any rule but @charset
UnexpectedImportRule,
/// Unexpected @charset rule encountered.
UnexpectedCharsetRule,
/// Unsupported @ rule
UnsupportedAtRule(CowRcStr<'i>),
/// A placeholder for many sources of errors that require more specific variants.
UnspecifiedError,
/// An unexpected token was found within a namespace rule.
UnexpectedTokenWithinNamespace(Token<'i>),
/// An error was encountered while parsing a property value.
ValueError(ValueParseErrorKind<'i>),
/// An error was encountered while parsing a selector
SelectorError(SelectorParseErrorKind<'i>),
/// The property declaration was for an unknown property.
UnknownProperty(CowRcStr<'i>),
/// An unknown vendor-specific identifier was encountered.
UnknownVendorProperty,
/// The property declaration was for a disabled experimental property.
ExperimentalProperty,
/// The property declaration contained an invalid color value.
InvalidColor(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid filter value.
InvalidFilter(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid value.
OtherInvalidValue(CowRcStr<'i>),
/// The declaration contained an animation property, and we were parsing
/// this as a keyframe block (so that property should be ignored).
///
/// See: https://drafts.csswg.org/css-animations/#keyframes
AnimationPropertyInKeyframeBlock,
/// The property is not allowed within a page rule.
NotAllowedInPageRule,
}
impl<'i> From<ValueParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn from(this: ValueParseErrorKind<'i>) -> Self {
StyleParseErrorKind::ValueError(this)
}
}
impl<'i> From<SelectorParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn from(this: SelectorParseErrorKind<'i>) -> Self {
StyleParseErrorKind::SelectorError(this)
}
}
/// Specific errors that can be encountered while parsing property values.
#[derive(Clone, Debug, PartialEq)]
pub enum ValueParseErrorKind<'i> {
/// An invalid token was encountered while parsing a color value.
InvalidColor(Token<'i>),
/// An invalid filter value was encountered.
InvalidFilter(Token<'i>),
}
impl<'i> StyleParseErrorKind<'i> {
/// Create an InvalidValue parse error
pub fn new_invalid(name: CowRcStr<'i>, value_error: ParseError<'i>) -> ParseError<'i> {
let variant = match value_error.kind {
cssparser::ParseErrorKind::Custom(StyleParseErrorKind::ValueError(e)) => {
match e {
ValueParseErrorKind::InvalidColor(token) => |
ValueParseErrorKind::InvalidFilter(token) => {
StyleParseErrorKind::InvalidFilter(name, token)
}
}
}
_ => StyleParseErrorKind::OtherInvalidValue(name),
};
cssparser::ParseError {
kind: cssparser::ParseErrorKind::Custom(variant),
location: value_error.location,
}
}
}
bitflags! {
/// The mode to use when parsing values.
pub struct ParsingMode: u8 {
/// In CSS; lengths must have units, except for zero values, where the unit can be omitted.
/// <https://www.w3.org/TR/css3-values/#lengths>
const DEFAULT = 0x00;
/// In SVG; a coordinate or length value without a unit identifier (e.g., "25") is assumed
/// to be in user units (px).
/// <https://www.w3.org/TR/SVG/coords.html#Units>
const ALLOW_UNITLESS_LENGTH = 0x01;
/// In SVG; out-of-range values are not treated as an error in parsing.
/// <https://www.w3.org/TR/SVG/implnote.html#RangeClamping>
const ALLOW_ALL_NUMERIC_VALUES = 0x02;
}
}
impl ParsingMode {
/// Whether the parsing mode allows unitless lengths for non-zero values to be intpreted as px.
#[inline]
pub fn allows_unitless_lengths(&self) -> bool {
self.intersects(ParsingMode::ALLOW_UNITLESS_LENGTH)
}
/// Whether the parsing mode allows all numeric values.
#[inline]
pub fn allows_all_numeric_values(&self) -> bool {
self.intersects(ParsingMode::ALLOW_ALL_NUMERIC_VALUES)
}
}
#[cfg(feature = "servo")]
/// Speculatively execute paint code in the worklet thread pool.
pub trait SpeculativePainter: Send + Sync {
/// <https://drafts.css-houdini.org/css-paint-api/#draw-a-paint-image>
fn speculatively_draw_a_paint_image(&self, properties: Vec<(Atom, String)>, arguments: Vec<String>);
}
| {
StyleParseErrorKind::InvalidColor(name, token)
} | conditional_block |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "style_traits"]
#![crate_type = "rlib"]
#![deny(unsafe_code, missing_docs)]
extern crate app_units;
#[macro_use] extern crate bitflags;
#[macro_use] extern crate cssparser;
extern crate euclid;
extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate selectors;
#[cfg(feature = "servo")] #[macro_use] extern crate serde;
#[cfg(feature = "servo")] extern crate webrender_api;
extern crate servo_arc;
#[cfg(feature = "servo")] extern crate servo_atoms;
#[cfg(feature = "servo")] extern crate servo_url;
#[cfg(feature = "servo")] pub use webrender_api::DevicePixel;
use cssparser::{CowRcStr, Token};
use selectors::parser::SelectorParseErrorKind;
#[cfg(feature = "servo")] use servo_atoms::Atom;
/// One hardware pixel.
///
/// This unit corresponds to the smallest addressable element of the display hardware.
#[cfg(not(feature = "servo"))]
#[derive(Clone, Copy, Debug)]
pub enum DevicePixel {}
/// Represents a mobile style pinch zoom factor.
/// TODO(gw): Once WR supports pinch zoom, use a type directly from webrender_api.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize, MallocSizeOf))]
pub struct PinchZoomFactor(f32);
impl PinchZoomFactor {
/// Construct a new pinch zoom factor.
pub fn new(scale: f32) -> PinchZoomFactor |
/// Get the pinch zoom factor as an untyped float.
pub fn get(&self) -> f32 {
self.0
}
}
/// One CSS "px" in the coordinate system of the "initial viewport":
/// <http://www.w3.org/TR/css-device-adapt/#initial-viewport>
///
/// `CSSPixel` is equal to `DeviceIndependentPixel` times a "page zoom" factor controlled by the user. This is
/// the desktop-style "full page" zoom that enlarges content but then reflows the layout viewport
/// so it still exactly fits the visible area.
///
/// At the default zoom level of 100%, one `CSSPixel` is equal to one `DeviceIndependentPixel`. However, if the
/// document is zoomed in or out then this scale may be larger or smaller.
#[derive(Clone, Copy, Debug)]
pub enum CSSPixel {}
// In summary, the hierarchy of pixel units and the factors to convert from one to the next:
//
// DevicePixel
// / hidpi_ratio => DeviceIndependentPixel
// / desktop_zoom => CSSPixel
pub mod cursor;
pub mod specified_value_info;
#[macro_use]
pub mod values;
#[macro_use]
pub mod viewport;
pub use specified_value_info::{CssType, KeywordsCollectFn, SpecifiedValueInfo};
pub use values::{Comma, CommaWithSpace, CssWriter, OneOrMoreSeparated, Separator, Space, ToCss};
/// The error type for all CSS parsing routines.
pub type ParseError<'i> = cssparser::ParseError<'i, StyleParseErrorKind<'i>>;
/// Error in property value parsing
pub type ValueParseError<'i> = cssparser::ParseError<'i, ValueParseErrorKind<'i>>;
#[derive(Clone, Debug, PartialEq)]
/// Errors that can be encountered while parsing CSS values.
pub enum StyleParseErrorKind<'i> {
/// A bad URL token in a DVB.
BadUrlInDeclarationValueBlock(CowRcStr<'i>),
/// A bad string token in a DVB.
BadStringInDeclarationValueBlock(CowRcStr<'i>),
/// Unexpected closing parenthesis in a DVB.
UnbalancedCloseParenthesisInDeclarationValueBlock,
/// Unexpected closing bracket in a DVB.
UnbalancedCloseSquareBracketInDeclarationValueBlock,
/// Unexpected closing curly bracket in a DVB.
UnbalancedCloseCurlyBracketInDeclarationValueBlock,
/// A property declaration value had input remaining after successfully parsing.
PropertyDeclarationValueNotExhausted,
/// An unexpected dimension token was encountered.
UnexpectedDimension(CowRcStr<'i>),
/// Expected identifier not found.
ExpectedIdentifier(Token<'i>),
/// Missing or invalid media feature name.
MediaQueryExpectedFeatureName(CowRcStr<'i>),
/// Missing or invalid media feature value.
MediaQueryExpectedFeatureValue,
/// min- or max- properties must have a value.
RangedExpressionWithNoValue,
/// A function was encountered that was not expected.
UnexpectedFunction(CowRcStr<'i>),
/// @namespace must be before any rule but @charset and @import
UnexpectedNamespaceRule,
/// @import must be before any rule but @charset
UnexpectedImportRule,
/// Unexpected @charset rule encountered.
UnexpectedCharsetRule,
/// Unsupported @ rule
UnsupportedAtRule(CowRcStr<'i>),
/// A placeholder for many sources of errors that require more specific variants.
UnspecifiedError,
/// An unexpected token was found within a namespace rule.
UnexpectedTokenWithinNamespace(Token<'i>),
/// An error was encountered while parsing a property value.
ValueError(ValueParseErrorKind<'i>),
/// An error was encountered while parsing a selector
SelectorError(SelectorParseErrorKind<'i>),
/// The property declaration was for an unknown property.
UnknownProperty(CowRcStr<'i>),
/// An unknown vendor-specific identifier was encountered.
UnknownVendorProperty,
/// The property declaration was for a disabled experimental property.
ExperimentalProperty,
/// The property declaration contained an invalid color value.
InvalidColor(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid filter value.
InvalidFilter(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid value.
OtherInvalidValue(CowRcStr<'i>),
/// The declaration contained an animation property, and we were parsing
/// this as a keyframe block (so that property should be ignored).
///
/// See: https://drafts.csswg.org/css-animations/#keyframes
AnimationPropertyInKeyframeBlock,
/// The property is not allowed within a page rule.
NotAllowedInPageRule,
}
impl<'i> From<ValueParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn from(this: ValueParseErrorKind<'i>) -> Self {
StyleParseErrorKind::ValueError(this)
}
}
impl<'i> From<SelectorParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn from(this: SelectorParseErrorKind<'i>) -> Self {
StyleParseErrorKind::SelectorError(this)
}
}
/// Specific errors that can be encountered while parsing property values.
#[derive(Clone, Debug, PartialEq)]
pub enum ValueParseErrorKind<'i> {
/// An invalid token was encountered while parsing a color value.
InvalidColor(Token<'i>),
/// An invalid filter value was encountered.
InvalidFilter(Token<'i>),
}
impl<'i> StyleParseErrorKind<'i> {
/// Create an InvalidValue parse error
pub fn new_invalid(name: CowRcStr<'i>, value_error: ParseError<'i>) -> ParseError<'i> {
let variant = match value_error.kind {
cssparser::ParseErrorKind::Custom(StyleParseErrorKind::ValueError(e)) => {
match e {
ValueParseErrorKind::InvalidColor(token) => {
StyleParseErrorKind::InvalidColor(name, token)
}
ValueParseErrorKind::InvalidFilter(token) => {
StyleParseErrorKind::InvalidFilter(name, token)
}
}
}
_ => StyleParseErrorKind::OtherInvalidValue(name),
};
cssparser::ParseError {
kind: cssparser::ParseErrorKind::Custom(variant),
location: value_error.location,
}
}
}
bitflags! {
/// The mode to use when parsing values.
pub struct ParsingMode: u8 {
/// In CSS; lengths must have units, except for zero values, where the unit can be omitted.
/// <https://www.w3.org/TR/css3-values/#lengths>
const DEFAULT = 0x00;
/// In SVG; a coordinate or length value without a unit identifier (e.g., "25") is assumed
/// to be in user units (px).
/// <https://www.w3.org/TR/SVG/coords.html#Units>
const ALLOW_UNITLESS_LENGTH = 0x01;
/// In SVG; out-of-range values are not treated as an error in parsing.
/// <https://www.w3.org/TR/SVG/implnote.html#RangeClamping>
const ALLOW_ALL_NUMERIC_VALUES = 0x02;
}
}
impl ParsingMode {
/// Whether the parsing mode allows unitless lengths for non-zero values to be intpreted as px.
#[inline]
pub fn allows_unitless_lengths(&self) -> bool {
self.intersects(ParsingMode::ALLOW_UNITLESS_LENGTH)
}
/// Whether the parsing mode allows all numeric values.
#[inline]
pub fn allows_all_numeric_values(&self) -> bool {
self.intersects(ParsingMode::ALLOW_ALL_NUMERIC_VALUES)
}
}
#[cfg(feature = "servo")]
/// Speculatively execute paint code in the worklet thread pool.
pub trait SpeculativePainter: Send + Sync {
/// <https://drafts.css-houdini.org/css-paint-api/#draw-a-paint-image>
fn speculatively_draw_a_paint_image(&self, properties: Vec<(Atom, String)>, arguments: Vec<String>);
}
| {
PinchZoomFactor(scale)
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "style_traits"]
#![crate_type = "rlib"]
#![deny(unsafe_code, missing_docs)]
extern crate app_units;
#[macro_use] extern crate bitflags;
#[macro_use] extern crate cssparser;
extern crate euclid;
extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate selectors;
#[cfg(feature = "servo")] #[macro_use] extern crate serde;
#[cfg(feature = "servo")] extern crate webrender_api;
extern crate servo_arc;
#[cfg(feature = "servo")] extern crate servo_atoms;
#[cfg(feature = "servo")] extern crate servo_url;
#[cfg(feature = "servo")] pub use webrender_api::DevicePixel;
use cssparser::{CowRcStr, Token};
use selectors::parser::SelectorParseErrorKind;
#[cfg(feature = "servo")] use servo_atoms::Atom;
/// One hardware pixel.
///
/// This unit corresponds to the smallest addressable element of the display hardware.
#[cfg(not(feature = "servo"))]
#[derive(Clone, Copy, Debug)]
pub enum DevicePixel {}
/// Represents a mobile style pinch zoom factor.
/// TODO(gw): Once WR supports pinch zoom, use a type directly from webrender_api.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize, MallocSizeOf))]
pub struct PinchZoomFactor(f32);
impl PinchZoomFactor {
/// Construct a new pinch zoom factor.
pub fn new(scale: f32) -> PinchZoomFactor {
PinchZoomFactor(scale)
}
/// Get the pinch zoom factor as an untyped float.
pub fn get(&self) -> f32 {
self.0
}
}
/// One CSS "px" in the coordinate system of the "initial viewport":
/// <http://www.w3.org/TR/css-device-adapt/#initial-viewport>
///
/// `CSSPixel` is equal to `DeviceIndependentPixel` times a "page zoom" factor controlled by the user. This is
/// the desktop-style "full page" zoom that enlarges content but then reflows the layout viewport
/// so it still exactly fits the visible area.
///
/// At the default zoom level of 100%, one `CSSPixel` is equal to one `DeviceIndependentPixel`. However, if the
/// document is zoomed in or out then this scale may be larger or smaller.
#[derive(Clone, Copy, Debug)]
pub enum CSSPixel {}
// In summary, the hierarchy of pixel units and the factors to convert from one to the next:
//
// DevicePixel
// / hidpi_ratio => DeviceIndependentPixel
// / desktop_zoom => CSSPixel
pub mod cursor;
pub mod specified_value_info;
#[macro_use]
pub mod values;
#[macro_use]
pub mod viewport;
pub use specified_value_info::{CssType, KeywordsCollectFn, SpecifiedValueInfo};
pub use values::{Comma, CommaWithSpace, CssWriter, OneOrMoreSeparated, Separator, Space, ToCss};
/// The error type for all CSS parsing routines.
pub type ParseError<'i> = cssparser::ParseError<'i, StyleParseErrorKind<'i>>;
/// Error in property value parsing
pub type ValueParseError<'i> = cssparser::ParseError<'i, ValueParseErrorKind<'i>>;
#[derive(Clone, Debug, PartialEq)]
/// Errors that can be encountered while parsing CSS values.
pub enum StyleParseErrorKind<'i> {
/// A bad URL token in a DVB.
BadUrlInDeclarationValueBlock(CowRcStr<'i>),
/// A bad string token in a DVB.
BadStringInDeclarationValueBlock(CowRcStr<'i>),
/// Unexpected closing parenthesis in a DVB.
UnbalancedCloseParenthesisInDeclarationValueBlock,
/// Unexpected closing bracket in a DVB.
UnbalancedCloseSquareBracketInDeclarationValueBlock,
/// Unexpected closing curly bracket in a DVB.
UnbalancedCloseCurlyBracketInDeclarationValueBlock,
/// A property declaration value had input remaining after successfully parsing.
PropertyDeclarationValueNotExhausted,
/// An unexpected dimension token was encountered.
UnexpectedDimension(CowRcStr<'i>),
/// Expected identifier not found.
ExpectedIdentifier(Token<'i>),
/// Missing or invalid media feature name.
MediaQueryExpectedFeatureName(CowRcStr<'i>),
/// Missing or invalid media feature value.
MediaQueryExpectedFeatureValue,
/// min- or max- properties must have a value.
RangedExpressionWithNoValue,
/// A function was encountered that was not expected.
UnexpectedFunction(CowRcStr<'i>),
/// @namespace must be before any rule but @charset and @import
UnexpectedNamespaceRule,
/// @import must be before any rule but @charset
UnexpectedImportRule,
/// Unexpected @charset rule encountered.
UnexpectedCharsetRule,
/// Unsupported @ rule
UnsupportedAtRule(CowRcStr<'i>),
/// A placeholder for many sources of errors that require more specific variants.
UnspecifiedError,
/// An unexpected token was found within a namespace rule.
UnexpectedTokenWithinNamespace(Token<'i>),
/// An error was encountered while parsing a property value.
ValueError(ValueParseErrorKind<'i>),
/// An error was encountered while parsing a selector
SelectorError(SelectorParseErrorKind<'i>),
/// The property declaration was for an unknown property.
UnknownProperty(CowRcStr<'i>),
/// An unknown vendor-specific identifier was encountered.
UnknownVendorProperty,
/// The property declaration was for a disabled experimental property.
ExperimentalProperty,
/// The property declaration contained an invalid color value.
InvalidColor(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid filter value.
InvalidFilter(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid value.
OtherInvalidValue(CowRcStr<'i>),
/// The declaration contained an animation property, and we were parsing
/// this as a keyframe block (so that property should be ignored).
///
/// See: https://drafts.csswg.org/css-animations/#keyframes
AnimationPropertyInKeyframeBlock,
/// The property is not allowed within a page rule.
NotAllowedInPageRule,
}
impl<'i> From<ValueParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn | (this: ValueParseErrorKind<'i>) -> Self {
StyleParseErrorKind::ValueError(this)
}
}
impl<'i> From<SelectorParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn from(this: SelectorParseErrorKind<'i>) -> Self {
StyleParseErrorKind::SelectorError(this)
}
}
/// Specific errors that can be encountered while parsing property values.
#[derive(Clone, Debug, PartialEq)]
pub enum ValueParseErrorKind<'i> {
/// An invalid token was encountered while parsing a color value.
InvalidColor(Token<'i>),
/// An invalid filter value was encountered.
InvalidFilter(Token<'i>),
}
impl<'i> StyleParseErrorKind<'i> {
/// Create an InvalidValue parse error
pub fn new_invalid(name: CowRcStr<'i>, value_error: ParseError<'i>) -> ParseError<'i> {
let variant = match value_error.kind {
cssparser::ParseErrorKind::Custom(StyleParseErrorKind::ValueError(e)) => {
match e {
ValueParseErrorKind::InvalidColor(token) => {
StyleParseErrorKind::InvalidColor(name, token)
}
ValueParseErrorKind::InvalidFilter(token) => {
StyleParseErrorKind::InvalidFilter(name, token)
}
}
}
_ => StyleParseErrorKind::OtherInvalidValue(name),
};
cssparser::ParseError {
kind: cssparser::ParseErrorKind::Custom(variant),
location: value_error.location,
}
}
}
bitflags! {
/// The mode to use when parsing values.
pub struct ParsingMode: u8 {
/// In CSS; lengths must have units, except for zero values, where the unit can be omitted.
/// <https://www.w3.org/TR/css3-values/#lengths>
const DEFAULT = 0x00;
/// In SVG; a coordinate or length value without a unit identifier (e.g., "25") is assumed
/// to be in user units (px).
/// <https://www.w3.org/TR/SVG/coords.html#Units>
const ALLOW_UNITLESS_LENGTH = 0x01;
/// In SVG; out-of-range values are not treated as an error in parsing.
/// <https://www.w3.org/TR/SVG/implnote.html#RangeClamping>
const ALLOW_ALL_NUMERIC_VALUES = 0x02;
}
}
impl ParsingMode {
/// Whether the parsing mode allows unitless lengths for non-zero values to be intpreted as px.
#[inline]
pub fn allows_unitless_lengths(&self) -> bool {
self.intersects(ParsingMode::ALLOW_UNITLESS_LENGTH)
}
/// Whether the parsing mode allows all numeric values.
#[inline]
pub fn allows_all_numeric_values(&self) -> bool {
self.intersects(ParsingMode::ALLOW_ALL_NUMERIC_VALUES)
}
}
#[cfg(feature = "servo")]
/// Speculatively execute paint code in the worklet thread pool.
pub trait SpeculativePainter: Send + Sync {
/// <https://drafts.css-houdini.org/css-paint-api/#draw-a-paint-image>
fn speculatively_draw_a_paint_image(&self, properties: Vec<(Atom, String)>, arguments: Vec<String>);
}
| from | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "style_traits"]
#![crate_type = "rlib"]
#![deny(unsafe_code, missing_docs)]
extern crate app_units;
#[macro_use] extern crate bitflags;
#[macro_use] extern crate cssparser;
extern crate euclid;
extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate selectors;
#[cfg(feature = "servo")] #[macro_use] extern crate serde;
#[cfg(feature = "servo")] extern crate webrender_api;
extern crate servo_arc;
#[cfg(feature = "servo")] extern crate servo_atoms;
#[cfg(feature = "servo")] extern crate servo_url;
#[cfg(feature = "servo")] pub use webrender_api::DevicePixel;
use cssparser::{CowRcStr, Token};
use selectors::parser::SelectorParseErrorKind;
#[cfg(feature = "servo")] use servo_atoms::Atom;
/// One hardware pixel.
///
/// This unit corresponds to the smallest addressable element of the display hardware.
#[cfg(not(feature = "servo"))]
#[derive(Clone, Copy, Debug)]
pub enum DevicePixel {}
/// Represents a mobile style pinch zoom factor.
/// TODO(gw): Once WR supports pinch zoom, use a type directly from webrender_api.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize, MallocSizeOf))]
pub struct PinchZoomFactor(f32);
impl PinchZoomFactor {
/// Construct a new pinch zoom factor.
pub fn new(scale: f32) -> PinchZoomFactor {
PinchZoomFactor(scale)
}
/// Get the pinch zoom factor as an untyped float.
pub fn get(&self) -> f32 {
self.0
}
}
/// One CSS "px" in the coordinate system of the "initial viewport":
/// <http://www.w3.org/TR/css-device-adapt/#initial-viewport>
///
/// `CSSPixel` is equal to `DeviceIndependentPixel` times a "page zoom" factor controlled by the user. This is
/// the desktop-style "full page" zoom that enlarges content but then reflows the layout viewport
/// so it still exactly fits the visible area.
///
/// At the default zoom level of 100%, one `CSSPixel` is equal to one `DeviceIndependentPixel`. However, if the
/// document is zoomed in or out then this scale may be larger or smaller.
#[derive(Clone, Copy, Debug)]
pub enum CSSPixel {}
// In summary, the hierarchy of pixel units and the factors to convert from one to the next:
//
// DevicePixel
// / hidpi_ratio => DeviceIndependentPixel
// / desktop_zoom => CSSPixel
pub mod cursor;
pub mod specified_value_info;
#[macro_use]
pub mod values;
#[macro_use]
pub mod viewport;
pub use specified_value_info::{CssType, KeywordsCollectFn, SpecifiedValueInfo};
pub use values::{Comma, CommaWithSpace, CssWriter, OneOrMoreSeparated, Separator, Space, ToCss};
/// The error type for all CSS parsing routines.
pub type ParseError<'i> = cssparser::ParseError<'i, StyleParseErrorKind<'i>>;
/// Error in property value parsing
pub type ValueParseError<'i> = cssparser::ParseError<'i, ValueParseErrorKind<'i>>;
#[derive(Clone, Debug, PartialEq)]
/// Errors that can be encountered while parsing CSS values.
pub enum StyleParseErrorKind<'i> {
/// A bad URL token in a DVB.
BadUrlInDeclarationValueBlock(CowRcStr<'i>),
/// A bad string token in a DVB.
BadStringInDeclarationValueBlock(CowRcStr<'i>),
/// Unexpected closing parenthesis in a DVB.
UnbalancedCloseParenthesisInDeclarationValueBlock,
/// Unexpected closing bracket in a DVB.
UnbalancedCloseSquareBracketInDeclarationValueBlock,
/// Unexpected closing curly bracket in a DVB. | UnbalancedCloseCurlyBracketInDeclarationValueBlock,
/// A property declaration value had input remaining after successfully parsing.
PropertyDeclarationValueNotExhausted,
/// An unexpected dimension token was encountered.
UnexpectedDimension(CowRcStr<'i>),
/// Expected identifier not found.
ExpectedIdentifier(Token<'i>),
/// Missing or invalid media feature name.
MediaQueryExpectedFeatureName(CowRcStr<'i>),
/// Missing or invalid media feature value.
MediaQueryExpectedFeatureValue,
/// min- or max- properties must have a value.
RangedExpressionWithNoValue,
/// A function was encountered that was not expected.
UnexpectedFunction(CowRcStr<'i>),
/// @namespace must be before any rule but @charset and @import
UnexpectedNamespaceRule,
/// @import must be before any rule but @charset
UnexpectedImportRule,
/// Unexpected @charset rule encountered.
UnexpectedCharsetRule,
/// Unsupported @ rule
UnsupportedAtRule(CowRcStr<'i>),
/// A placeholder for many sources of errors that require more specific variants.
UnspecifiedError,
/// An unexpected token was found within a namespace rule.
UnexpectedTokenWithinNamespace(Token<'i>),
/// An error was encountered while parsing a property value.
ValueError(ValueParseErrorKind<'i>),
/// An error was encountered while parsing a selector
SelectorError(SelectorParseErrorKind<'i>),
/// The property declaration was for an unknown property.
UnknownProperty(CowRcStr<'i>),
/// An unknown vendor-specific identifier was encountered.
UnknownVendorProperty,
/// The property declaration was for a disabled experimental property.
ExperimentalProperty,
/// The property declaration contained an invalid color value.
InvalidColor(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid filter value.
InvalidFilter(CowRcStr<'i>, Token<'i>),
/// The property declaration contained an invalid value.
OtherInvalidValue(CowRcStr<'i>),
/// The declaration contained an animation property, and we were parsing
/// this as a keyframe block (so that property should be ignored).
///
/// See: https://drafts.csswg.org/css-animations/#keyframes
AnimationPropertyInKeyframeBlock,
/// The property is not allowed within a page rule.
NotAllowedInPageRule,
}
impl<'i> From<ValueParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn from(this: ValueParseErrorKind<'i>) -> Self {
StyleParseErrorKind::ValueError(this)
}
}
impl<'i> From<SelectorParseErrorKind<'i>> for StyleParseErrorKind<'i> {
fn from(this: SelectorParseErrorKind<'i>) -> Self {
StyleParseErrorKind::SelectorError(this)
}
}
/// Specific errors that can be encountered while parsing property values.
#[derive(Clone, Debug, PartialEq)]
pub enum ValueParseErrorKind<'i> {
/// An invalid token was encountered while parsing a color value.
InvalidColor(Token<'i>),
/// An invalid filter value was encountered.
InvalidFilter(Token<'i>),
}
impl<'i> StyleParseErrorKind<'i> {
/// Create an InvalidValue parse error
pub fn new_invalid(name: CowRcStr<'i>, value_error: ParseError<'i>) -> ParseError<'i> {
let variant = match value_error.kind {
cssparser::ParseErrorKind::Custom(StyleParseErrorKind::ValueError(e)) => {
match e {
ValueParseErrorKind::InvalidColor(token) => {
StyleParseErrorKind::InvalidColor(name, token)
}
ValueParseErrorKind::InvalidFilter(token) => {
StyleParseErrorKind::InvalidFilter(name, token)
}
}
}
_ => StyleParseErrorKind::OtherInvalidValue(name),
};
cssparser::ParseError {
kind: cssparser::ParseErrorKind::Custom(variant),
location: value_error.location,
}
}
}
bitflags! {
/// The mode to use when parsing values.
pub struct ParsingMode: u8 {
/// In CSS; lengths must have units, except for zero values, where the unit can be omitted.
/// <https://www.w3.org/TR/css3-values/#lengths>
const DEFAULT = 0x00;
/// In SVG; a coordinate or length value without a unit identifier (e.g., "25") is assumed
/// to be in user units (px).
/// <https://www.w3.org/TR/SVG/coords.html#Units>
const ALLOW_UNITLESS_LENGTH = 0x01;
/// In SVG; out-of-range values are not treated as an error in parsing.
/// <https://www.w3.org/TR/SVG/implnote.html#RangeClamping>
const ALLOW_ALL_NUMERIC_VALUES = 0x02;
}
}
impl ParsingMode {
/// Whether the parsing mode allows unitless lengths for non-zero values to be intpreted as px.
#[inline]
pub fn allows_unitless_lengths(&self) -> bool {
self.intersects(ParsingMode::ALLOW_UNITLESS_LENGTH)
}
/// Whether the parsing mode allows all numeric values.
#[inline]
pub fn allows_all_numeric_values(&self) -> bool {
self.intersects(ParsingMode::ALLOW_ALL_NUMERIC_VALUES)
}
}
#[cfg(feature = "servo")]
/// Speculatively execute paint code in the worklet thread pool.
pub trait SpeculativePainter: Send + Sync {
/// <https://drafts.css-houdini.org/css-paint-api/#draw-a-paint-image>
fn speculatively_draw_a_paint_image(&self, properties: Vec<(Atom, String)>, arguments: Vec<String>);
} | random_line_split | |
verify.js | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
var fs = require('fs');
var jsdom = require('jsdom');
function cleanTag (tree) {
cleanAttributes(tree);
}
function cleanAttributes (tree, acceptableAttribtues) {
var attributesToRemove = [];
for (var i = 0, l = tree.attributes.length; i < l; ++i) {
var name = tree.attributes[i].name;
if ( (acceptableAttribtues && acceptableAttribtues.indexOf(name) === -1)
|| ( name !== 'on' &&
( name.indexOf('on') === 0 || name.indexOf('On') === 0 || name.indexOf('oN') === 0
|| name.indexOf('ON') === 0) ) ) {
attributesToRemove.push(name);
}
}
while (attributesToRemove.length > 0) {
tree.removeAttribute(attributesToRemove.shift());
}
}
function cleanContentSubTree (tree) |
function checkTag (tree, tagName, attributes) {
if (!tree || tree._nodeName !== tagName) {
throw 'Invalid tag.';
}
var attr;
attributes = attributes || [];
Object.keys(attributes).forEach(function (name) {
var expectedValue = attributes[name];
var inputAttribute = tree.attributes[name];
if (name === 'id') {
if (expectedValue !== inputAttribute.value) {
throw 'Attribute "' + 'id' + '" does not match: ' + attributes[name];
}
}
else {
if (inputAttribute.value.indexOf(expectedValue) === -1) {
throw 'Attribute "' + name + '" does not match: ' + expectedValue + ', ' + inputAttribute.value;
}
}
});
}
function siftThroughChildren (childNodes, childFunction) {
var childrenToRemove = [];
Array.prototype.forEach.call(childNodes, function (child) {
if (child._nodeName === '#text') {
child._nodeValue.replace(/[^\s]/g, '');
}
else {
if (!childFunction(child)) {
childrenToRemove.push(child);
}
}
});
while (childrenToRemove.length > 0) {
var child = childrenToRemove.shift();
child.parentNode.removeChild(child);
}
}
function filterApp (tree) {
checkTag(tree, 'div', { id: 'flathead-app' });
cleanTag(tree);
if (tree._childNodes) {
siftThroughChildren(tree._childNodes, filterCard);
}
else {
throw "No children!";
}
return tree;
}
function filterCard (tree) {
checkTag(tree, 'div', { class: 'ceci-card' });
cleanTag(tree);
var childClasses = ['fixed-top', 'phone-canvas', 'fixed-bottom'];
if (tree._childNodes) {
siftThroughChildren(tree._childNodes, function (child) {
return filterSection(child, childClasses.shift());
});
return true;
}
else {
throw "No children!";
}
}
function filterSection (tree, name) {
checkTag(tree, 'div', { class: name });
cleanTag(tree);
if (tree._childNodes) {
siftThroughChildren(tree._childNodes, filterComponent);
}
return true;
}
function filterSubscription (tree) {
cleanAttributes(tree, ['on', 'for']);
siftThroughChildren(tree._childNodes, function (child) {
cleanAttributes(child, ['color']);
});
return true;
}
function filterComponent (tree) {
if (tree._nodeName.indexOf('app-') === 0) {
if (tree._childNodes) {
siftThroughChildren(tree._childNodes, filterComponent);
return true;
}
}
else if (['broadcast', 'listen'].indexOf(tree._nodeName) > -1) {
return filterSubscription(tree);
}
return false;
}
module.exports = {
filter: function (html, callback) {
html = html.replace(/<script>[\s.]*<\/script>/g, '');
jsdom.env(html, {
done: function (errors, window) {
var inputDocument = window.document;
var inputNode = inputDocument.firstChild;
var output = null;
var appDiv = inputNode.firstChild.firstChild;
try {
output = filterApp(appDiv).outerHTML;
}
catch (e) {
console.error(e);
throw e;
}
callback(output);
}
});
}
}; | {
var output = '';
var childOutput = '';
if (tree._nodeName.indexOf('app-') === 0) {
for (var i = 0, l = tree._childNodes.length; i < l; ++i) {
childOutput += cleanContentSubTree(tree._childNodes[i]);
}
output += '<' + tree._nodeName + createAttributeString(tree) + '>' + childOutput + '</' + tree._nodeName + '>';
}
else if (tree._nodeName === '#text') {
output += tree._nodeValue.replace(/[^\s]/g, '');
}
return output;
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.