file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
activitybarPart.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import 'vs/css!./media/activityBarPart';
import nls = require('vs/nls');
import {Promise} from 'vs/base/common/winjs.base';
import {Builder, $} from 'vs/base/browser/builder';
import {Action, IAction} from 'vs/base/common/actions';
import errors = require('vs/base/common/errors');
import events = require('vs/base/common/events');
import {ActionsOrientation, ActionBar, IActionItem} from 'vs/base/browser/ui/actionbar/actionbar';
import {Scope, IActionBarRegistry, Extensions as ActionBarExtensions, prepareActions} from 'vs/workbench/browser/actionBarRegistry';
import {CONTEXT, ToolBar} from 'vs/base/browser/ui/toolbar/toolbar';
import {Registry} from 'vs/platform/platform';
import {CompositeEvent, EventType} from 'vs/workbench/common/events';
import {ViewletDescriptor, ViewletRegistry, Extensions as ViewletExtensions} from 'vs/workbench/browser/viewlet';
import {Part} from 'vs/workbench/browser/part';
import {ActivityAction, ActivityActionItem} from 'vs/workbench/browser/parts/activitybar/activityAction';
import {IViewletService} from 'vs/workbench/services/viewlet/common/viewletService';
import {IActivityService, IBadge} from 'vs/workbench/services/activity/common/activityService';
import {IPartService} from 'vs/workbench/services/part/common/partService';
import {IContextMenuService} from 'vs/platform/contextview/browser/contextView';
import {IEventService} from 'vs/platform/event/common/event';
import {IInstantiationService} from 'vs/platform/instantiation/common/instantiation';
import {IMessageService, Severity} from 'vs/platform/message/common/message';
import {ITelemetryService} from 'vs/platform/telemetry/common/telemetry';
import {IKeybindingService} from 'vs/platform/keybinding/common/keybindingService';
export class ActivitybarPart extends Part implements IActivityService {
public serviceId = IActivityService;
private viewletSwitcherBar: ActionBar;
private globalToolBar: ToolBar;
private activityActionItems: { [actionId: string]: IActionItem; };
private viewletIdToActions: { [viewletId: string]: ActivityAction; };
private instantiationService: IInstantiationService;
constructor(
private viewletService: IViewletService,
private messageService: IMessageService,
private telemetryService: ITelemetryService,
private eventService: IEventService,
private contextMenuService: IContextMenuService,
private keybindingService: IKeybindingService,
id: string
) {
super(id);
this.activityActionItems = {};
this.viewletIdToActions = {};
this.registerListeners();
}
public setInstantiationService(service: IInstantiationService): void {
this.instantiationService = service;
}
private registerListeners(): void {
// Activate viewlet action on opening of a viewlet
this.toUnbind.push(this.eventService.addListener(EventType.COMPOSITE_OPENING, (e: CompositeEvent) => this.onCompositeOpening(e)));
// Deactivate viewlet action on close
this.toUnbind.push(this.eventService.addListener(EventType.COMPOSITE_CLOSED, (e: CompositeEvent) => this.onCompositeClosed(e)));
}
private onCompositeOpening(e: CompositeEvent): void {
if (this.viewletIdToActions[e.compositeId]) {
this.viewletIdToActions[e.compositeId].activate();
// There can only be one active viewlet action
for (let key in this.viewletIdToActions) {
if (this.viewletIdToActions.hasOwnProperty(key) && key !== e.compositeId) {
this.viewletIdToActions[key].deactivate();
}
}
}
}
private onCompositeClosed(e: CompositeEvent): void {
if (this.viewletIdToActions[e.compositeId]) {
this.viewletIdToActions[e.compositeId].deactivate();
}
}
public showActivity(viewletId: string, badge: IBadge, clazz?: string): void {
let action = this.viewletIdToActions[viewletId];
if (action) {
action.setBadge(badge);
if (clazz) {
action.class = clazz;
}
}
}
public clearActivity(viewletId: string): void {
this.showActivity(viewletId, null);
}
public createContentArea(parent: Builder): Builder {
let $el = $(parent);
let $result = $('.content').appendTo($el);
// Top Actionbar with action items for each viewlet action
this.createViewletSwitcher($result.clone());
// Bottom Toolbar with action items for global actions
// this.createGlobalToolBarArea($result.clone()); // not used currently
return $result;
}
private createViewletSwitcher(div: Builder): void {
// Viewlet switcher is on top
this.viewletSwitcherBar = new ActionBar(div, {
actionItemProvider: (action: Action) => this.activityActionItems[action.id],
orientation: ActionsOrientation.VERTICAL,
ariaLabel: nls.localize('activityBarAriaLabel', "Active View Switcher")
});
this.viewletSwitcherBar.getContainer().addClass('position-top');
// Build Viewlet Actions in correct order
let activeViewlet = this.viewletService.getActiveViewlet();
let registry = (<ViewletRegistry>Registry.as(ViewletExtensions.Viewlets));
let viewletActions: Action[] = registry.getViewlets()
.sort((v1: ViewletDescriptor, v2: ViewletDescriptor) => v1.order - v2.order)
.map((viewlet: ViewletDescriptor) => {
let action = this.instantiationService.createInstance(ViewletActivityAction, viewlet.id + '.activity-bar-action', viewlet);
let keybinding: string = null;
let keys = this.keybindingService.lookupKeybindings(viewlet.id).map(k => this.keybindingService.getLabelFor(k));
if (keys && keys.length) {
keybinding = keys[0];
}
this.activityActionItems[action.id] = new ActivityActionItem(action, viewlet.name, keybinding);
this.viewletIdToActions[viewlet.id] = action;
// Mark active viewlet action as active
if (activeViewlet && activeViewlet.getId() === viewlet.id) {
action.activate();
}
return action;
}
);
// Add to viewlet switcher
this.viewletSwitcherBar.push(viewletActions, { label: true, icon: true });
}
private createGlobalToolBarArea(div: Builder): void {
// Global action bar is on the bottom
this.globalToolBar = new ToolBar(div.getHTMLElement(), this.contextMenuService, {
actionItemProvider: (action: Action) => this.activityActionItems[action.id],
orientation: ActionsOrientation.VERTICAL
});
this.globalToolBar.getContainer().addClass('global');
this.globalToolBar.actionRunner.addListener(events.EventType.RUN, (e: any) => {
// Check for Error
if (e.error && !errors.isPromiseCanceledError(e.error)) {
this.messageService.show(Severity.Error, e.error);
}
// Log in telemetry
if (this.telemetryService) {
this.telemetryService.publicLog('workbenchActionExecuted', { id: e.action.id, from: 'activityBar' });
}
});
// Build Global Actions in correct order
let primaryActions = this.getGlobalActions(true);
let secondaryActions = this.getGlobalActions(false);
if (primaryActions.length + secondaryActions.length > 0) {
this.globalToolBar.getContainer().addClass('position-bottom');
}
// Add to global action bar
this.globalToolBar.setActions(prepareActions(primaryActions), prepareActions(secondaryActions))();
}
private getGlobalActions(primary: boolean): IAction[] {
let actionBarRegistry = <IActionBarRegistry>Registry.as(ActionBarExtensions.Actionbar);
// Collect actions from actionbar contributor
let actions: IAction[];
if (primary) {
actions = actionBarRegistry.getActionBarActionsForContext(Scope.GLOBAL, CONTEXT);
} else {
actions = actionBarRegistry.getSecondaryActionBarActionsForContext(Scope.GLOBAL, CONTEXT);
}
return actions.map((action: Action) => {
if (primary) {
let keybinding: string = null;
let keys = this.keybindingService.lookupKeybindings(action.id).map(k => this.keybindingService.getLabelFor(k));
if (keys && keys.length) {
keybinding = keys[0];
}
let actionItem = actionBarRegistry.getActionItemForContext(Scope.GLOBAL, CONTEXT, action);
if (!actionItem) {
actionItem = new ActivityActionItem(action, action.label, keybinding);
}
if (actionItem instanceof ActivityActionItem) {
(<ActivityActionItem> actionItem).keybinding = keybinding;
}
this.activityActionItems[action.id] = actionItem;
}
return action;
});
}
public dispose(): void {
if (this.viewletSwitcherBar) {
this.viewletSwitcherBar.dispose();
this.viewletSwitcherBar = null;
}
if (this.globalToolBar) {
this.globalToolBar.dispose();
this.globalToolBar = null;
}
super.dispose();
}
}
class ViewletActivityAction extends ActivityAction {
private static preventDoubleClickDelay = 300;
private static lastRun: number = 0;
private viewlet: ViewletDescriptor;
constructor(
id: string, viewlet: ViewletDescriptor,
@IViewletService private viewletService: IViewletService,
@IPartService private partService: IPartService
) {
super(id, viewlet.name, viewlet.cssClass);
| public run(): Promise {
// cheap trick to prevent accident trigger on a doubleclick (to help nervous people)
let now = new Date().getTime();
if (now - ViewletActivityAction.lastRun < ViewletActivityAction.preventDoubleClickDelay) {
return Promise.as(true);
}
ViewletActivityAction.lastRun = now;
let sideBarHidden = this.partService.isSideBarHidden();
let activeViewlet = this.viewletService.getActiveViewlet();
// Hide sidebar if selected viewlet already visible
if (!sideBarHidden && activeViewlet && activeViewlet.getId() === this.viewlet.id) {
this.partService.setSideBarHidden(true);
}
// Open viewlet and focus it
else {
this.viewletService.openViewlet(this.viewlet.id, true).done(null, errors.onUnexpectedError);
this.activate();
}
return Promise.as(true);
}
} | this.viewlet = viewlet;
}
| random_line_split |
hlp.py | """
Microsoft Windows Help (HLP) parser for Hachoir project.
Documents:
- Windows Help File Format / Annotation File Format / SHG and MRB File Format
written by M. Winterhoff (100326.2776@compuserve.com)
found on http://www.wotsit.org/
Author: Victor Stinner
Creation date: 2007-09-03
"""
| from hachoir_py3.field import (FieldSet,
Bits, Int32, UInt16, UInt32,
NullBytes, RawBytes, PaddingBytes, String)
from hachoir_py3.core.endian import LITTLE_ENDIAN
from hachoir_py3.core.text_handler import (textHandler, hexadecimal,
displayHandler, humanFilesize)
class FileEntry(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["res_space"].value * 8
def createFields(self):
yield displayHandler(UInt32(self, "res_space", "Reserved space"), humanFilesize)
yield displayHandler(UInt32(self, "used_space", "Used space"), humanFilesize)
yield Bits(self, "file_flags", 8, "(=4)")
yield textHandler(UInt16(self, "magic"), hexadecimal)
yield Bits(self, "flags", 16)
yield displayHandler(UInt16(self, "page_size", "Page size in bytes"), humanFilesize)
yield String(self, "structure", 16, strip="\0", charset="ASCII")
yield NullBytes(self, "zero", 2)
yield UInt16(self, "nb_page_splits", "Number of page splits B+ tree has suffered")
yield UInt16(self, "root_page", "Page number of B+ tree root page")
yield PaddingBytes(self, "one", 2, pattern="\xFF")
yield UInt16(self, "nb_page", "Number of B+ tree pages")
yield UInt16(self, "nb_level", "Number of levels of B+ tree")
yield UInt16(self, "nb_entry", "Number of entries in B+ tree")
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "reserved_space", size)
class HlpFile(Parser):
PARSER_TAGS = {
"id": "hlp",
"category": "misc",
"file_ext": ("hlp",),
"min_size": 32,
"description": "Microsoft Windows Help (HLP)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["magic"].value != 0x00035F3F:
return "Invalid magic"
if self["filesize"].value != self.stream.size // 8:
return "Invalid magic"
return True
def createFields(self):
yield textHandler(UInt32(self, "magic"), hexadecimal)
yield UInt32(self, "dir_start", "Directory start")
yield Int32(self, "first_free_block", "First free block")
yield UInt32(self, "filesize", "File size in bytes")
yield self.seekByte(self["dir_start"].value)
yield FileEntry(self, "file[]")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size) | from hachoir_py3.parser import Parser | random_line_split |
hlp.py | """
Microsoft Windows Help (HLP) parser for Hachoir project.
Documents:
- Windows Help File Format / Annotation File Format / SHG and MRB File Format
written by M. Winterhoff (100326.2776@compuserve.com)
found on http://www.wotsit.org/
Author: Victor Stinner
Creation date: 2007-09-03
"""
from hachoir_py3.parser import Parser
from hachoir_py3.field import (FieldSet,
Bits, Int32, UInt16, UInt32,
NullBytes, RawBytes, PaddingBytes, String)
from hachoir_py3.core.endian import LITTLE_ENDIAN
from hachoir_py3.core.text_handler import (textHandler, hexadecimal,
displayHandler, humanFilesize)
class FileEntry(FieldSet):
|
class HlpFile(Parser):
PARSER_TAGS = {
"id": "hlp",
"category": "misc",
"file_ext": ("hlp",),
"min_size": 32,
"description": "Microsoft Windows Help (HLP)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["magic"].value != 0x00035F3F:
return "Invalid magic"
if self["filesize"].value != self.stream.size // 8:
return "Invalid magic"
return True
def createFields(self):
yield textHandler(UInt32(self, "magic"), hexadecimal)
yield UInt32(self, "dir_start", "Directory start")
yield Int32(self, "first_free_block", "First free block")
yield UInt32(self, "filesize", "File size in bytes")
yield self.seekByte(self["dir_start"].value)
yield FileEntry(self, "file[]")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size)
| def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["res_space"].value * 8
def createFields(self):
yield displayHandler(UInt32(self, "res_space", "Reserved space"), humanFilesize)
yield displayHandler(UInt32(self, "used_space", "Used space"), humanFilesize)
yield Bits(self, "file_flags", 8, "(=4)")
yield textHandler(UInt16(self, "magic"), hexadecimal)
yield Bits(self, "flags", 16)
yield displayHandler(UInt16(self, "page_size", "Page size in bytes"), humanFilesize)
yield String(self, "structure", 16, strip="\0", charset="ASCII")
yield NullBytes(self, "zero", 2)
yield UInt16(self, "nb_page_splits", "Number of page splits B+ tree has suffered")
yield UInt16(self, "root_page", "Page number of B+ tree root page")
yield PaddingBytes(self, "one", 2, pattern="\xFF")
yield UInt16(self, "nb_page", "Number of B+ tree pages")
yield UInt16(self, "nb_level", "Number of levels of B+ tree")
yield UInt16(self, "nb_entry", "Number of entries in B+ tree")
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "reserved_space", size) | identifier_body |
hlp.py | """
Microsoft Windows Help (HLP) parser for Hachoir project.
Documents:
- Windows Help File Format / Annotation File Format / SHG and MRB File Format
written by M. Winterhoff (100326.2776@compuserve.com)
found on http://www.wotsit.org/
Author: Victor Stinner
Creation date: 2007-09-03
"""
from hachoir_py3.parser import Parser
from hachoir_py3.field import (FieldSet,
Bits, Int32, UInt16, UInt32,
NullBytes, RawBytes, PaddingBytes, String)
from hachoir_py3.core.endian import LITTLE_ENDIAN
from hachoir_py3.core.text_handler import (textHandler, hexadecimal,
displayHandler, humanFilesize)
class FileEntry(FieldSet):
def | (self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["res_space"].value * 8
def createFields(self):
yield displayHandler(UInt32(self, "res_space", "Reserved space"), humanFilesize)
yield displayHandler(UInt32(self, "used_space", "Used space"), humanFilesize)
yield Bits(self, "file_flags", 8, "(=4)")
yield textHandler(UInt16(self, "magic"), hexadecimal)
yield Bits(self, "flags", 16)
yield displayHandler(UInt16(self, "page_size", "Page size in bytes"), humanFilesize)
yield String(self, "structure", 16, strip="\0", charset="ASCII")
yield NullBytes(self, "zero", 2)
yield UInt16(self, "nb_page_splits", "Number of page splits B+ tree has suffered")
yield UInt16(self, "root_page", "Page number of B+ tree root page")
yield PaddingBytes(self, "one", 2, pattern="\xFF")
yield UInt16(self, "nb_page", "Number of B+ tree pages")
yield UInt16(self, "nb_level", "Number of levels of B+ tree")
yield UInt16(self, "nb_entry", "Number of entries in B+ tree")
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "reserved_space", size)
class HlpFile(Parser):
PARSER_TAGS = {
"id": "hlp",
"category": "misc",
"file_ext": ("hlp",),
"min_size": 32,
"description": "Microsoft Windows Help (HLP)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["magic"].value != 0x00035F3F:
return "Invalid magic"
if self["filesize"].value != self.stream.size // 8:
return "Invalid magic"
return True
def createFields(self):
yield textHandler(UInt32(self, "magic"), hexadecimal)
yield UInt32(self, "dir_start", "Directory start")
yield Int32(self, "first_free_block", "First free block")
yield UInt32(self, "filesize", "File size in bytes")
yield self.seekByte(self["dir_start"].value)
yield FileEntry(self, "file[]")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size)
| __init__ | identifier_name |
hlp.py | """
Microsoft Windows Help (HLP) parser for Hachoir project.
Documents:
- Windows Help File Format / Annotation File Format / SHG and MRB File Format
written by M. Winterhoff (100326.2776@compuserve.com)
found on http://www.wotsit.org/
Author: Victor Stinner
Creation date: 2007-09-03
"""
from hachoir_py3.parser import Parser
from hachoir_py3.field import (FieldSet,
Bits, Int32, UInt16, UInt32,
NullBytes, RawBytes, PaddingBytes, String)
from hachoir_py3.core.endian import LITTLE_ENDIAN
from hachoir_py3.core.text_handler import (textHandler, hexadecimal,
displayHandler, humanFilesize)
class FileEntry(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["res_space"].value * 8
def createFields(self):
yield displayHandler(UInt32(self, "res_space", "Reserved space"), humanFilesize)
yield displayHandler(UInt32(self, "used_space", "Used space"), humanFilesize)
yield Bits(self, "file_flags", 8, "(=4)")
yield textHandler(UInt16(self, "magic"), hexadecimal)
yield Bits(self, "flags", 16)
yield displayHandler(UInt16(self, "page_size", "Page size in bytes"), humanFilesize)
yield String(self, "structure", 16, strip="\0", charset="ASCII")
yield NullBytes(self, "zero", 2)
yield UInt16(self, "nb_page_splits", "Number of page splits B+ tree has suffered")
yield UInt16(self, "root_page", "Page number of B+ tree root page")
yield PaddingBytes(self, "one", 2, pattern="\xFF")
yield UInt16(self, "nb_page", "Number of B+ tree pages")
yield UInt16(self, "nb_level", "Number of levels of B+ tree")
yield UInt16(self, "nb_entry", "Number of entries in B+ tree")
size = (self.size - self.current_size) // 8
if size:
|
class HlpFile(Parser):
PARSER_TAGS = {
"id": "hlp",
"category": "misc",
"file_ext": ("hlp",),
"min_size": 32,
"description": "Microsoft Windows Help (HLP)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["magic"].value != 0x00035F3F:
return "Invalid magic"
if self["filesize"].value != self.stream.size // 8:
return "Invalid magic"
return True
def createFields(self):
yield textHandler(UInt32(self, "magic"), hexadecimal)
yield UInt32(self, "dir_start", "Directory start")
yield Int32(self, "first_free_block", "First free block")
yield UInt32(self, "filesize", "File size in bytes")
yield self.seekByte(self["dir_start"].value)
yield FileEntry(self, "file[]")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size)
| yield PaddingBytes(self, "reserved_space", size) | conditional_block |
data-store-loader.js | var jsonfile = require('jsonfile');
var logger = require('../logger.js');
var middleware = require('swagger-express-middleware');
var recursive = require('recursive-readdir');
var DataStoreLoader = {};
module.exports = DataStoreLoader;
DataStoreLoader.load = function(myDB, baseDir) {
// Filter function
var ignoreFunc = function(file, stats) {
return stats.isFile() && !file.endsWith('.json');
};
| // Scan through the /db directory for all *.json files and add them into the custom data store
recursive(baseDir, [ignoreFunc], function(err, files) {
for (var i = 0; i < files.length; i++) {
var file = files[i];
var path = file.slice(baseDir.length, file.length - 5).replace(/\\/g, '/');
var data = jsonfile.readFileSync(file, 'utf8');
logger.debug(`Loading file: ${file}, and configured path to: ${path}`);
myDB.save(new middleware.Resource(path, data));
}
});
} | random_line_split | |
data-store-loader.js | var jsonfile = require('jsonfile');
var logger = require('../logger.js');
var middleware = require('swagger-express-middleware');
var recursive = require('recursive-readdir');
var DataStoreLoader = {};
module.exports = DataStoreLoader;
DataStoreLoader.load = function(myDB, baseDir) {
// Filter function
var ignoreFunc = function(file, stats) {
return stats.isFile() && !file.endsWith('.json');
};
// Scan through the /db directory for all *.json files and add them into the custom data store
recursive(baseDir, [ignoreFunc], function(err, files) {
for (var i = 0; i < files.length; i++) |
});
}
| {
var file = files[i];
var path = file.slice(baseDir.length, file.length - 5).replace(/\\/g, '/');
var data = jsonfile.readFileSync(file, 'utf8');
logger.debug(`Loading file: ${file}, and configured path to: ${path}`);
myDB.save(new middleware.Resource(path, data));
} | conditional_block |
template.js | define({
"builder": {
"initPopup": {
"title": "Esiet sveicināts!",
"startBtn": "Sākt"
},
"addEditPopup": {
"test": "tests",
"add": "pievienot",
"edit": "labot",
"editTab": "REDIĢĒT CILNI",
"addTab": "PIEVIENOT CILNI",
"organizeTabs": "KĀRTOT CILNES",
"themeColor": "Tēmas krāsa",
"location": "Novietojums",
"confirmDeletion": "Apstiprināt dzēšanu šim: "
},
"landing": {
"lblAdd": "Kā vēlaties nosaukt savu atlases sarakstu?",
"phAdd": "Ievadīt virsrakstu...",
"lblOR": "Vai",
"lblHelp": "Iepazīties"
},
"organizePopup": {
"title": "Organizēt",
"tab": "cilne",
"dragNDrop": "Velciet un nometiet cilnes, lai sakārtotu savu stāstu",
"lblColTitle": "Virsraksts"
},
"settingsLayoutOptions": {
"title": "Izkārtojuma izvēlnes",
"lblDescription": "Apraksts"
},
"addFeatureBar": {
"add": "Pievienot",
"import": "Importēt",
"done": "Pabeigts",
"deleteFeature": "Izdzēst",
"move": "Pārvietot",
"locateFeaturesTooltip": "Atsevišķām vietām nepieciešams atrast novietojumu. Noklikšķiniet, lai tās redzētu"
},
"detailPanelBuilder": {
"changeLocation": "Mainīt novietojumu",
"setLocation": "Iestatīt novietojumu",
"cancel": "Atcelt",
"addImage": "Lai pievienotu attēlu, noklikšķiniet vai velciet un nometiet",
"chooseImage": "IZVĒLĒTIES ATTĒLU",
"importImages": "IMPORTĒT ATTĒLUS",
"import": "IMPORTĒT",
"enterPlaceName": "Ievadiet vietas nosaukumu",
"enterPlaceDescription": "Ievadiet vietas aprakstu",
"unnamedPlace": "Nenosaukta vieta",
"update": "Atjaunināt",
"useLocation": "Lietot šo izvietojumu",
"selectAll": "Izvēlēties visu",
"imageSizeHelper": "Labāku rezultātu iegūšanai attēliem ir jābūt mazākiem par 400 KB. Ieteicamais izmērs un forma ir 1000 x 750 pikseļi (platuma:augstuma proporcija 4:3) vai mazāks. Lielāki attēli var palēnināt veiktspēju. Lai veiktspēja būtu labāka, izmantojiet saspiestus JPG attēlus ar 80% attēlu kvalitāti.",
"imageSizeHelperUpdated": "Lai panāktu vislabākos rezultātus, attēliem jābūt mazākiem par 400 KB. Samaziniet attēla faila lielumu, lietojot formātu JPG, kas saglabāts ar 80% attēla kvalitāti. Ja jūsu attēls joprojām pārsniedz 400 KB, tā lielumu var papildus samazināt, mainot tā izmērus: ieteicams 1000 pikseļu platums x 750 pikseļu augstums (platuma–augstuma attiecība 4:3).",
"thumbnailLink": "Sīktēla saite",
"thumbnailHelp": "Ieteicamais sīktēla izmērs un forma ir 280 x 210 pikseļi (platuma:augstuma proporcija 4:3). Lielāki sīktēli var palēnināt veiktspēju. 4:3 proporcijas sīktēli ietilpst mozaīkās bez saspiešanas."
},
| "extentSensitive": "Cilnēs rādīt tikai vietas, kas redzamas kartē (tikai skatītājs)",
"extentSensitiveTooltip": "Šī izvēlne ir spēkā tikai tad, kad jūsu atlases saraksts tiek skatīts. Shortlist veidotāja cilnēs vienmēr tiek rādītas visas vietas, arī tās, kuras nav redzamas kartē. Lai Shortlist lietotnes skatīšanas laikā cilnēs vienmēr rādītu visas vietas, noņemiet atzīmi šai izvēlnei.",
"locateButton": "Poga Meklēt",
"locateButtonTooltip": "Sniedziet iespēju lasītājiem kartē redzēt viņu pašreizējo novietojumu. Šis līdzeklis tiek atbalstīts lielākajā daļā ierīču un pārlūku, bet poga ir redzama tikai tad, ja stāstu kopīgojat kā HTTPS saiti un stāsts netiek iedarināts.",
"geocoder": "Adrešu, vietu un elementu meklētājs",
"bookmarks": "Grāmatzīmes",
"bookmarksMenuName": "Izvēlnes nosaukums",
"defaultMapLocation": "Noklusējuma kartes izvietojums",
"auto": "Automātiski",
"autoTooltip": "Novietojums tiek pārvaldīts automātiski, lai visas jūsu vietas būtu redzamas",
"custom": "Pielāgots",
"customTooltip": "Iestatiet izvietojumu, izmantojot pogu, kas parādās kartes mērogmaiņas vadīklās",
"mapLocationTooltip": "Izvietojums, ko lietotāji redz, kad atver jūsu atlases sarakstu",
"bookmarksHelp": "Lai aktivizētu Shortlist grāmatzīmes, pievienojiet un pārvaldiet tīmekļa kartes grāmatzīmes tīmekļa kartes skatītājā",
"generalOptions": "Vispārīgās opcijas",
"mapOptions": "Kartes opcijas",
"changeBasemap": "Mainīt pamatkarti",
"saveHomeLocation": "Saglabāt sākuma novietojumu",
"choose": "Izvēlieties",
"headerColor": "Galvenes krāsa"
},
"help": {
"title": "PALĪDZĪBA",
"shortlistHelp1": "Esiet sveicināts lietotnē Story Map Shortlist! Šī lietotne sniedz iespēju apskates objektus izkārtot cilnēs, lai lietotājiem būtu interesanti iepazīt apkārtni. Šajā veidotājā varat interaktīvi veidot savas vietas.",
"shortlistHelp2": "Tāpat varat izveidot Shortlist sarakstu no esošas ArcGIS tīmekļa kartes, tostarp izmantot opciju lietot esošos punktu datus kartē kā vietas. Šos punktu datus varat importēt funkcijā Shortlist Builder turpmākai labošanai vai, ja jūsu vietas izmanto <a href='http://links.esri.com/storymaps/shortlist_layer_template' target='_blank'>Shortlist saraksta datu shēmu</a>, darbināt Shortlist sarakstu tieši no šiem datiem tādu, kāds tas ir. Lai uzzinātu vairāk, skatiet <a href='http://links.esri.com/storymaps/faq_shortlist_create_from_webmap' target='_blank'>šos bieži uzdotos jautājumus</a>.",
"shortlistHelp3": "Lai izveidotu atlases sarakstu no tīmekļa kartes, dodieties uz",
"shortlistHelp4": "atveriet tīmekļa karti, izveidojiet no tās tīmekļa lietotni un lietotņu galerijā izvēlēties Story Map Shortlist. Ja jūsu tīmekļa kartē ir punktu slāņi, Shortlist veidotājs aicinās izvēlēties slāņus, ko izmantosiet kā vietas. Ja atlases sarakstu izveidojāt, izmantojot lietotnes oriģinālo, neviesoto versiju, varat savu atlases sarakstu migrēt šajā lietotnes viesotajā versijā, veicot tās pašas darbības.",
"shortlistHelp5": "Lai iegūtu papildinformāciju",
"shortlistHelp6": "Apmeklējiet Esri Story Maps tīmekļa vietnes sadaļu Shortlist",
"shortlistFAQ": "Bieži uzdotie jautājumi par Shortlist",
"shortlistBetaFeedback": "Atsauksmes par Beta versiju",
"shortlistBetaFeedback2": "Mēs labprāt uzzinātu jūsu viedokli. Informējiet mūs par problēmām un elementiem, kas jums būtu nepieciešami — apmeklējiet",
"geonetForum": "GeoNet forumu par Story Maps"
},
"migration": {
"migrationPattern": {
"welcome": "Esiet sveicināts Shortlist veidotājā!",
"importQuestion": "Jūsu tīmekļa kartē ir punktu dati. Vai vēlaties šos punktus izmantot kā vietas atlases sarakstā?",
"importExplainYes": "Shortlist veidotājā varēsiet labot, pārvaldīt un papildināt savas vietas. Tiek automātiski saglabāta tīmekļa kartes kopija, tāpēc oriģinālie dati netiek modificēti.",
"importExplainNo": "Jūsu punkti tiek rādīti Shortlist kartē, bet tie netiek izmantoti kā vietas. Tā vietā jūs pievienojat vietas atlases sarakstā veidotājā.",
"no": "Nē",
"importOption": "Jā, importēt tās",
"asIsOption": "Jā, izmantot tās esošajā stāvoklī",
"asIsText": "Jūs turpināsiet labot un pārvaldīt vietas savā web kartē, nevis Shortlist veidotājā. Šajos datos veiktā atjaunošana tiek automātiski atspoguļota Shortlist atlases sarakstā. Šī izvēlne nosaka, ka jūsu dati izmanto šo sagatavi.",
"badData": "Punktu slānis, kurā ir jūsu vietas, neizmanto noteikto datu sagatavi. Pārskatiet sagataves prasības.",
"downloadTemplate": "Lejupielādēt sagatavi",
"selectPtData": "IZVĒLIETIES PUNKTA DATUS",
"multipleThemes": "VAI SLĀNIM IR VAIRĀKAS TĒMAS",
"chooseFields": "IZVĒLIETIES ATLASES SARAKSTĀ IZMANTOJAMOS LAUKUS"
},
"fieldPicker": {
"nameField": "Lauks ar katras vietas nosaukumu: ",
"descriptionField": "Lauks(i), kas parādīsies katras vietas aprakstā un vietu secībā: ",
"urlField": "Lauks ar katras vietas papildinformācijas vietrādi URL (nav obligāti): ",
"none": "nav",
"imageFields": "Lauki ar katras vietas attēlu vietrāžiem URL (nav obligāti): ",
"mainImageField": "Galvenais attēls: ",
"thumbImageField": "Sīktēls: ",
"noImageFields": "Atstājiet iestatījumu \"Nav\", ja veidotājā vēlaties pievienot attēlus savām vietām",
"tabField": "Ja ir lauka nosaukums, kas slānī esošās vietas sadala dažādās tēmās, zemāk izvēlieties atbilstošo lauka nosaukumu."
},
"layerPicker": {
"pointLayers": "Tīmekļa kartē izvēlieties punktu slāni(ņus), kas jāizmanto kā vietas: ",
"layerInfo": "Ja izvēlaties vairākus slāņus, tiem visiem ir jābūt vienādām lauku kopām. Katrs jūsu izvēlētais slānis kļūst par atlases saraksta cilni."
}
}
}
}); | "settings": {
"numberedPlaces": "Rādīt vietas ar skaitļiem",
| random_line_split |
__main__.py | #!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Command line interface for mcuxpresso_builder."""
import argparse
import pathlib
import sys
from pw_build_mcuxpresso import components
def _parse_args() -> argparse.Namespace:
"""Setup argparse and parse command line args."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command',
metavar='<command>',
required=True)
project_parser = subparsers.add_parser(
'project', help='output components of an MCUXpresso project')
project_parser.add_argument('manifest_filename', type=pathlib.Path)
project_parser.add_argument('--include', type=str, action='append')
project_parser.add_argument('--exclude', type=str, action='append')
project_parser.add_argument('--prefix', dest='path_prefix', type=str)
return parser.parse_args()
def main():
"""Main command line function."""
args = _parse_args()
if args.command == 'project':
|
sys.exit(0)
if __name__ == '__main__':
main()
| components.project(args.manifest_filename,
include=args.include,
exclude=args.exclude,
path_prefix=args.path_prefix) | conditional_block |
__main__.py | #!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Command line interface for mcuxpresso_builder."""
import argparse
import pathlib
import sys
from pw_build_mcuxpresso import components
def _parse_args() -> argparse.Namespace:
"""Setup argparse and parse command line args."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command',
metavar='<command>',
required=True)
project_parser = subparsers.add_parser(
'project', help='output components of an MCUXpresso project')
project_parser.add_argument('manifest_filename', type=pathlib.Path)
project_parser.add_argument('--include', type=str, action='append')
project_parser.add_argument('--exclude', type=str, action='append')
project_parser.add_argument('--prefix', dest='path_prefix', type=str)
return parser.parse_args()
def main():
|
if __name__ == '__main__':
main()
| """Main command line function."""
args = _parse_args()
if args.command == 'project':
components.project(args.manifest_filename,
include=args.include,
exclude=args.exclude,
path_prefix=args.path_prefix)
sys.exit(0) | identifier_body |
__main__.py | #!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Command line interface for mcuxpresso_builder."""
import argparse
import pathlib
import sys
from pw_build_mcuxpresso import components
def | () -> argparse.Namespace:
"""Setup argparse and parse command line args."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command',
metavar='<command>',
required=True)
project_parser = subparsers.add_parser(
'project', help='output components of an MCUXpresso project')
project_parser.add_argument('manifest_filename', type=pathlib.Path)
project_parser.add_argument('--include', type=str, action='append')
project_parser.add_argument('--exclude', type=str, action='append')
project_parser.add_argument('--prefix', dest='path_prefix', type=str)
return parser.parse_args()
def main():
"""Main command line function."""
args = _parse_args()
if args.command == 'project':
components.project(args.manifest_filename,
include=args.include,
exclude=args.exclude,
path_prefix=args.path_prefix)
sys.exit(0)
if __name__ == '__main__':
main()
| _parse_args | identifier_name |
__main__.py | #!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Command line interface for mcuxpresso_builder."""
import argparse
import pathlib
import sys
from pw_build_mcuxpresso import components
def _parse_args() -> argparse.Namespace:
"""Setup argparse and parse command line args."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command',
metavar='<command>',
required=True)
project_parser = subparsers.add_parser(
'project', help='output components of an MCUXpresso project')
project_parser.add_argument('manifest_filename', type=pathlib.Path)
project_parser.add_argument('--include', type=str, action='append')
project_parser.add_argument('--exclude', type=str, action='append')
project_parser.add_argument('--prefix', dest='path_prefix', type=str)
return parser.parse_args()
def main(): |
if args.command == 'project':
components.project(args.manifest_filename,
include=args.include,
exclude=args.exclude,
path_prefix=args.path_prefix)
sys.exit(0)
if __name__ == '__main__':
main() | """Main command line function."""
args = _parse_args() | random_line_split |
setup.py | from setuptools import setup, find_packages
import sys, os
version = '1.3'
long_description = """The raisin.restyler package is a part of Raisin, the web application
used for publishing the summary statistics of Grape, a pipeline used for processing and
analyzing RNA-Seq data."""
setup(name='raisin.restyler',
version=version,
description="A package used in the Raisin web application",
long_description=long_description, | classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: POSIX :: Linux'],
keywords='RNA-Seq pipeline ngs transcriptome bioinformatics ETL',
author='Maik Roder',
author_email='maikroeder@gmail.com',
url='http://big.crg.cat/services/grape',
license='GPL',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages = ['raisin'],
package_data = {'raisin.restyler':['templates/*.pt']},
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'configobj',
'zope.pagetemplate'
],
entry_points="""
# -*- Entry points: -*-
""",
) | random_line_split | |
endpoint-service-replace.py | #!/usr/bin/env python
from optparse import OptionParser
import getpass
import os
import sys
import yaml
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
|
from anvil import log as logging
from anvil import importer
from anvil import passwords
from anvil.components.helpers import keystone
from anvil import utils
def get_token():
pw_storage = passwords.KeyringProxy(path='/etc/anvil/passwords.cfg')
lookup_name = "service_token"
prompt = "Please enter the password for %s: " % ('/etc/anvil/passwords.cfg')
(exists, token) = pw_storage.read(lookup_name, prompt)
if not exists:
pw_storage.save(lookup_name, token)
return token
def replace_services_endpoints(token, options):
client = importer.construct_entry_point("keystoneclient.v2_0.client:Client",
token=token, endpoint=options.keystone_uri)
current_endpoints = client.endpoints.list()
current_services = client.services.list()
def filter_resource(r):
raw = dict(r.__dict__) # Can't access the raw attrs, arg...
raw_cleaned = {}
for k, v in raw.items():
if k == 'manager' or k.startswith('_'):
continue
raw_cleaned[k] = v
return raw_cleaned
for e in current_endpoints:
print("Deleting endpoint: ")
print(utils.prettify_yaml(filter_resource(e)))
client.endpoints.delete(e.id)
for s in current_services:
print("Deleting service: ")
print(utils.prettify_yaml(filter_resource(s)))
client.services.delete(s.id)
if options.file:
with(open(options.file, 'r')) as fh:
contents = yaml.load(fh)
set_contents = {
'services': contents.get('services', []),
'endpoints': contents.get('endpoints', []),
}
print("Regenerating with:")
print(utils.prettify_yaml(set_contents))
set_contents['users'] = []
set_contents['roles'] = []
set_contents['tenants'] = []
initer = keystone.Initializer(token, options.keystone_uri)
initer.initialize(**set_contents)
def main():
parser = OptionParser()
parser.add_option("-k", '--keystone', dest='keystone_uri',
help='keystone endpoint uri to authenticate with', metavar='KEYSTONE')
parser.add_option("-f", '--file', dest='file',
help='service and endpoint creation file', metavar='FILE')
(options, args) = parser.parse_args()
if not options.keystone_uri or not options.file:
parser.error("options are missing, please try -h")
logging.setupLogging(logging.DEBUG)
replace_services_endpoints(get_token(), options)
if __name__ == "__main__":
sys.exit(main())
| sys.path.insert(0, possible_topdir) | conditional_block |
endpoint-service-replace.py | #!/usr/bin/env python
| from optparse import OptionParser
import getpass
import os
import sys
import yaml
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import log as logging
from anvil import importer
from anvil import passwords
from anvil.components.helpers import keystone
from anvil import utils
def get_token():
pw_storage = passwords.KeyringProxy(path='/etc/anvil/passwords.cfg')
lookup_name = "service_token"
prompt = "Please enter the password for %s: " % ('/etc/anvil/passwords.cfg')
(exists, token) = pw_storage.read(lookup_name, prompt)
if not exists:
pw_storage.save(lookup_name, token)
return token
def replace_services_endpoints(token, options):
client = importer.construct_entry_point("keystoneclient.v2_0.client:Client",
token=token, endpoint=options.keystone_uri)
current_endpoints = client.endpoints.list()
current_services = client.services.list()
def filter_resource(r):
raw = dict(r.__dict__) # Can't access the raw attrs, arg...
raw_cleaned = {}
for k, v in raw.items():
if k == 'manager' or k.startswith('_'):
continue
raw_cleaned[k] = v
return raw_cleaned
for e in current_endpoints:
print("Deleting endpoint: ")
print(utils.prettify_yaml(filter_resource(e)))
client.endpoints.delete(e.id)
for s in current_services:
print("Deleting service: ")
print(utils.prettify_yaml(filter_resource(s)))
client.services.delete(s.id)
if options.file:
with(open(options.file, 'r')) as fh:
contents = yaml.load(fh)
set_contents = {
'services': contents.get('services', []),
'endpoints': contents.get('endpoints', []),
}
print("Regenerating with:")
print(utils.prettify_yaml(set_contents))
set_contents['users'] = []
set_contents['roles'] = []
set_contents['tenants'] = []
initer = keystone.Initializer(token, options.keystone_uri)
initer.initialize(**set_contents)
def main():
parser = OptionParser()
parser.add_option("-k", '--keystone', dest='keystone_uri',
help='keystone endpoint uri to authenticate with', metavar='KEYSTONE')
parser.add_option("-f", '--file', dest='file',
help='service and endpoint creation file', metavar='FILE')
(options, args) = parser.parse_args()
if not options.keystone_uri or not options.file:
parser.error("options are missing, please try -h")
logging.setupLogging(logging.DEBUG)
replace_services_endpoints(get_token(), options)
if __name__ == "__main__":
sys.exit(main()) | random_line_split | |
endpoint-service-replace.py | #!/usr/bin/env python
from optparse import OptionParser
import getpass
import os
import sys
import yaml
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import log as logging
from anvil import importer
from anvil import passwords
from anvil.components.helpers import keystone
from anvil import utils
def get_token():
pw_storage = passwords.KeyringProxy(path='/etc/anvil/passwords.cfg')
lookup_name = "service_token"
prompt = "Please enter the password for %s: " % ('/etc/anvil/passwords.cfg')
(exists, token) = pw_storage.read(lookup_name, prompt)
if not exists:
pw_storage.save(lookup_name, token)
return token
def replace_services_endpoints(token, options):
client = importer.construct_entry_point("keystoneclient.v2_0.client:Client",
token=token, endpoint=options.keystone_uri)
current_endpoints = client.endpoints.list()
current_services = client.services.list()
def filter_resource(r):
|
for e in current_endpoints:
print("Deleting endpoint: ")
print(utils.prettify_yaml(filter_resource(e)))
client.endpoints.delete(e.id)
for s in current_services:
print("Deleting service: ")
print(utils.prettify_yaml(filter_resource(s)))
client.services.delete(s.id)
if options.file:
with(open(options.file, 'r')) as fh:
contents = yaml.load(fh)
set_contents = {
'services': contents.get('services', []),
'endpoints': contents.get('endpoints', []),
}
print("Regenerating with:")
print(utils.prettify_yaml(set_contents))
set_contents['users'] = []
set_contents['roles'] = []
set_contents['tenants'] = []
initer = keystone.Initializer(token, options.keystone_uri)
initer.initialize(**set_contents)
def main():
parser = OptionParser()
parser.add_option("-k", '--keystone', dest='keystone_uri',
help='keystone endpoint uri to authenticate with', metavar='KEYSTONE')
parser.add_option("-f", '--file', dest='file',
help='service and endpoint creation file', metavar='FILE')
(options, args) = parser.parse_args()
if not options.keystone_uri or not options.file:
parser.error("options are missing, please try -h")
logging.setupLogging(logging.DEBUG)
replace_services_endpoints(get_token(), options)
if __name__ == "__main__":
sys.exit(main())
| raw = dict(r.__dict__) # Can't access the raw attrs, arg...
raw_cleaned = {}
for k, v in raw.items():
if k == 'manager' or k.startswith('_'):
continue
raw_cleaned[k] = v
return raw_cleaned | identifier_body |
endpoint-service-replace.py | #!/usr/bin/env python
from optparse import OptionParser
import getpass
import os
import sys
import yaml
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import log as logging
from anvil import importer
from anvil import passwords
from anvil.components.helpers import keystone
from anvil import utils
def | ():
pw_storage = passwords.KeyringProxy(path='/etc/anvil/passwords.cfg')
lookup_name = "service_token"
prompt = "Please enter the password for %s: " % ('/etc/anvil/passwords.cfg')
(exists, token) = pw_storage.read(lookup_name, prompt)
if not exists:
pw_storage.save(lookup_name, token)
return token
def replace_services_endpoints(token, options):
client = importer.construct_entry_point("keystoneclient.v2_0.client:Client",
token=token, endpoint=options.keystone_uri)
current_endpoints = client.endpoints.list()
current_services = client.services.list()
def filter_resource(r):
raw = dict(r.__dict__) # Can't access the raw attrs, arg...
raw_cleaned = {}
for k, v in raw.items():
if k == 'manager' or k.startswith('_'):
continue
raw_cleaned[k] = v
return raw_cleaned
for e in current_endpoints:
print("Deleting endpoint: ")
print(utils.prettify_yaml(filter_resource(e)))
client.endpoints.delete(e.id)
for s in current_services:
print("Deleting service: ")
print(utils.prettify_yaml(filter_resource(s)))
client.services.delete(s.id)
if options.file:
with(open(options.file, 'r')) as fh:
contents = yaml.load(fh)
set_contents = {
'services': contents.get('services', []),
'endpoints': contents.get('endpoints', []),
}
print("Regenerating with:")
print(utils.prettify_yaml(set_contents))
set_contents['users'] = []
set_contents['roles'] = []
set_contents['tenants'] = []
initer = keystone.Initializer(token, options.keystone_uri)
initer.initialize(**set_contents)
def main():
parser = OptionParser()
parser.add_option("-k", '--keystone', dest='keystone_uri',
help='keystone endpoint uri to authenticate with', metavar='KEYSTONE')
parser.add_option("-f", '--file', dest='file',
help='service and endpoint creation file', metavar='FILE')
(options, args) = parser.parse_args()
if not options.keystone_uri or not options.file:
parser.error("options are missing, please try -h")
logging.setupLogging(logging.DEBUG)
replace_services_endpoints(get_token(), options)
if __name__ == "__main__":
sys.exit(main())
| get_token | identifier_name |
server-started-notification.component.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { TranslateService } from '@ngx-translate/core'
import { ChallengeService } from '../Services/challenge.service'
import { ChangeDetectorRef, Component, NgZone, OnInit } from '@angular/core'
import { CookieService } from 'ngx-cookie'
import { SocketIoService } from '../Services/socket-io.service' |
@Component({
selector: 'app-server-started-notification',
templateUrl: './server-started-notification.component.html',
styleUrls: ['./server-started-notification.component.scss']
})
export class ServerStartedNotificationComponent implements OnInit {
public hackingProgress: HackingProgress = {} as HackingProgress
constructor (private readonly ngZone: NgZone, private readonly challengeService: ChallengeService, private readonly translate: TranslateService, private readonly cookieService: CookieService, private readonly ref: ChangeDetectorRef, private readonly io: SocketIoService) {
}
ngOnInit () {
this.ngZone.runOutsideAngular(() => {
this.io.socket().on('server started', () => {
const continueCode = this.cookieService.get('continueCode')
const continueCodeFindIt = this.cookieService.get('continueCodeFindIt')
const continueCodeFixIt = this.cookieService.get('continueCodeFixIt')
if (continueCode) {
this.challengeService.restoreProgress(encodeURIComponent(continueCode)).subscribe(() => {
this.translate.get('AUTO_RESTORED_PROGRESS').subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
}, (error) => {
console.log(error)
this.translate.get('AUTO_RESTORE_PROGRESS_FAILED', { error: error }).subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
})
}
if (continueCodeFindIt) {
this.challengeService.restoreProgressFindIt(encodeURIComponent(continueCodeFindIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
}
if (continueCodeFixIt) {
this.challengeService.restoreProgressFixIt(encodeURIComponent(continueCodeFixIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
}
this.ref.detectChanges()
})
})
}
closeNotification () {
this.hackingProgress.autoRestoreMessage = null
}
clearProgress () {
this.cookieService.remove('continueCode')
this.cookieService.remove('continueCodeFixIt')
this.cookieService.remove('continueCodeFindIt')
this.cookieService.remove('token')
sessionStorage.removeItem('bid')
sessionStorage.removeItem('itemTotal')
localStorage.removeItem('token')
localStorage.removeItem('displayedDifficulties')
localStorage.removeItem('showSolvedChallenges')
localStorage.removeItem('showDisabledChallenges')
localStorage.removeItem('showOnlyTutorialChallenges')
localStorage.removeItem('displayedChallengeCategories')
this.hackingProgress.cleared = true
}
} |
interface HackingProgress {
autoRestoreMessage: string | null
cleared: boolean
} | random_line_split |
server-started-notification.component.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { TranslateService } from '@ngx-translate/core'
import { ChallengeService } from '../Services/challenge.service'
import { ChangeDetectorRef, Component, NgZone, OnInit } from '@angular/core'
import { CookieService } from 'ngx-cookie'
import { SocketIoService } from '../Services/socket-io.service'
interface HackingProgress {
autoRestoreMessage: string | null
cleared: boolean
}
@Component({
selector: 'app-server-started-notification',
templateUrl: './server-started-notification.component.html',
styleUrls: ['./server-started-notification.component.scss']
})
export class ServerStartedNotificationComponent implements OnInit {
public hackingProgress: HackingProgress = {} as HackingProgress
constructor (private readonly ngZone: NgZone, private readonly challengeService: ChallengeService, private readonly translate: TranslateService, private readonly cookieService: CookieService, private readonly ref: ChangeDetectorRef, private readonly io: SocketIoService) {
}
ngOnInit () {
this.ngZone.runOutsideAngular(() => {
this.io.socket().on('server started', () => {
const continueCode = this.cookieService.get('continueCode')
const continueCodeFindIt = this.cookieService.get('continueCodeFindIt')
const continueCodeFixIt = this.cookieService.get('continueCodeFixIt')
if (continueCode) {
this.challengeService.restoreProgress(encodeURIComponent(continueCode)).subscribe(() => {
this.translate.get('AUTO_RESTORED_PROGRESS').subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
}, (error) => {
console.log(error)
this.translate.get('AUTO_RESTORE_PROGRESS_FAILED', { error: error }).subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
})
}
if (continueCodeFindIt) |
if (continueCodeFixIt) {
this.challengeService.restoreProgressFixIt(encodeURIComponent(continueCodeFixIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
}
this.ref.detectChanges()
})
})
}
closeNotification () {
this.hackingProgress.autoRestoreMessage = null
}
clearProgress () {
this.cookieService.remove('continueCode')
this.cookieService.remove('continueCodeFixIt')
this.cookieService.remove('continueCodeFindIt')
this.cookieService.remove('token')
sessionStorage.removeItem('bid')
sessionStorage.removeItem('itemTotal')
localStorage.removeItem('token')
localStorage.removeItem('displayedDifficulties')
localStorage.removeItem('showSolvedChallenges')
localStorage.removeItem('showDisabledChallenges')
localStorage.removeItem('showOnlyTutorialChallenges')
localStorage.removeItem('displayedChallengeCategories')
this.hackingProgress.cleared = true
}
}
| {
this.challengeService.restoreProgressFindIt(encodeURIComponent(continueCodeFindIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
} | conditional_block |
server-started-notification.component.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { TranslateService } from '@ngx-translate/core'
import { ChallengeService } from '../Services/challenge.service'
import { ChangeDetectorRef, Component, NgZone, OnInit } from '@angular/core'
import { CookieService } from 'ngx-cookie'
import { SocketIoService } from '../Services/socket-io.service'
interface HackingProgress {
autoRestoreMessage: string | null
cleared: boolean
}
@Component({
selector: 'app-server-started-notification',
templateUrl: './server-started-notification.component.html',
styleUrls: ['./server-started-notification.component.scss']
})
export class ServerStartedNotificationComponent implements OnInit {
public hackingProgress: HackingProgress = {} as HackingProgress
constructor (private readonly ngZone: NgZone, private readonly challengeService: ChallengeService, private readonly translate: TranslateService, private readonly cookieService: CookieService, private readonly ref: ChangeDetectorRef, private readonly io: SocketIoService) {
}
ngOnInit () {
this.ngZone.runOutsideAngular(() => {
this.io.socket().on('server started', () => {
const continueCode = this.cookieService.get('continueCode')
const continueCodeFindIt = this.cookieService.get('continueCodeFindIt')
const continueCodeFixIt = this.cookieService.get('continueCodeFixIt')
if (continueCode) {
this.challengeService.restoreProgress(encodeURIComponent(continueCode)).subscribe(() => {
this.translate.get('AUTO_RESTORED_PROGRESS').subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
}, (error) => {
console.log(error)
this.translate.get('AUTO_RESTORE_PROGRESS_FAILED', { error: error }).subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
})
}
if (continueCodeFindIt) {
this.challengeService.restoreProgressFindIt(encodeURIComponent(continueCodeFindIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
}
if (continueCodeFixIt) {
this.challengeService.restoreProgressFixIt(encodeURIComponent(continueCodeFixIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
}
this.ref.detectChanges()
})
})
}
| () {
this.hackingProgress.autoRestoreMessage = null
}
clearProgress () {
this.cookieService.remove('continueCode')
this.cookieService.remove('continueCodeFixIt')
this.cookieService.remove('continueCodeFindIt')
this.cookieService.remove('token')
sessionStorage.removeItem('bid')
sessionStorage.removeItem('itemTotal')
localStorage.removeItem('token')
localStorage.removeItem('displayedDifficulties')
localStorage.removeItem('showSolvedChallenges')
localStorage.removeItem('showDisabledChallenges')
localStorage.removeItem('showOnlyTutorialChallenges')
localStorage.removeItem('displayedChallengeCategories')
this.hackingProgress.cleared = true
}
}
| closeNotification | identifier_name |
server-started-notification.component.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { TranslateService } from '@ngx-translate/core'
import { ChallengeService } from '../Services/challenge.service'
import { ChangeDetectorRef, Component, NgZone, OnInit } from '@angular/core'
import { CookieService } from 'ngx-cookie'
import { SocketIoService } from '../Services/socket-io.service'
interface HackingProgress {
autoRestoreMessage: string | null
cleared: boolean
}
@Component({
selector: 'app-server-started-notification',
templateUrl: './server-started-notification.component.html',
styleUrls: ['./server-started-notification.component.scss']
})
export class ServerStartedNotificationComponent implements OnInit {
public hackingProgress: HackingProgress = {} as HackingProgress
constructor (private readonly ngZone: NgZone, private readonly challengeService: ChallengeService, private readonly translate: TranslateService, private readonly cookieService: CookieService, private readonly ref: ChangeDetectorRef, private readonly io: SocketIoService) {
}
ngOnInit () {
this.ngZone.runOutsideAngular(() => {
this.io.socket().on('server started', () => {
const continueCode = this.cookieService.get('continueCode')
const continueCodeFindIt = this.cookieService.get('continueCodeFindIt')
const continueCodeFixIt = this.cookieService.get('continueCodeFixIt')
if (continueCode) {
this.challengeService.restoreProgress(encodeURIComponent(continueCode)).subscribe(() => {
this.translate.get('AUTO_RESTORED_PROGRESS').subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
}, (error) => {
console.log(error)
this.translate.get('AUTO_RESTORE_PROGRESS_FAILED', { error: error }).subscribe((notificationServerStarted) => {
this.hackingProgress.autoRestoreMessage = notificationServerStarted
}, (translationId) => {
this.hackingProgress.autoRestoreMessage = translationId
})
})
}
if (continueCodeFindIt) {
this.challengeService.restoreProgressFindIt(encodeURIComponent(continueCodeFindIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
}
if (continueCodeFixIt) {
this.challengeService.restoreProgressFixIt(encodeURIComponent(continueCodeFixIt)).subscribe(() => {
}, (error) => {
console.log(error)
})
}
this.ref.detectChanges()
})
})
}
closeNotification () {
this.hackingProgress.autoRestoreMessage = null
}
clearProgress () |
}
| {
this.cookieService.remove('continueCode')
this.cookieService.remove('continueCodeFixIt')
this.cookieService.remove('continueCodeFindIt')
this.cookieService.remove('token')
sessionStorage.removeItem('bid')
sessionStorage.removeItem('itemTotal')
localStorage.removeItem('token')
localStorage.removeItem('displayedDifficulties')
localStorage.removeItem('showSolvedChallenges')
localStorage.removeItem('showDisabledChallenges')
localStorage.removeItem('showOnlyTutorialChallenges')
localStorage.removeItem('displayedChallengeCategories')
this.hackingProgress.cleared = true
} | identifier_body |
bootstrap-popover.js | //Wrapped in an outer function to preserve global this
(function (root) { var amdExports; define(['bootstrap/bootstrap-transition','bootstrap/bootstrap-tooltip'], function () { (function () {
/* ===========================================================
* bootstrap-popover.js v2.3.1
* http://twitter.github.com/bootstrap/javascript.html#popovers
* ===========================================================
* Copyright 2012 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =========================================================== */
!function ($) {
"use strict"; // jshint ;_;
/* POPOVER PUBLIC CLASS DEFINITION
* =============================== */
var Popover = function (element, options) {
this.init('popover', element, options)
}
/* NOTE: POPOVER EXTENDS BOOTSTRAP-TOOLTIP.js
========================================== */
Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype, {
constructor: Popover
, setContent: function () {
var $tip = this.tip()
, title = this.getTitle()
, content = this.getContent()
$tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)
$tip.find('.popover-content')[this.options.html ? 'html' : 'text'](content)
$tip.removeClass('fade top bottom left right in')
}
, hasContent: function () {
return this.getTitle() || this.getContent()
}
, getContent: function () {
var content
, $e = this.$element
, o = this.options
content = (typeof o.content == 'function' ? o.content.call($e[0]) : o.content)
|| $e.attr('data-content')
return content
}
, tip: function () {
if (!this.$tip) {
this.$tip = $(this.options.template)
}
return this.$tip
}
, destroy: function () {
this.hide().$element.off('.' + this.type).removeData(this.type)
}
})
/* POPOVER PLUGIN DEFINITION
* ======================= */
var old = $.fn.popover
$.fn.popover = function (option) {
return this.each(function () {
var $this = $(this)
, data = $this.data('popover')
, options = typeof option == 'object' && option
if (!data) $this.data('popover', (data = new Popover(this, options)))
if (typeof option == 'string') data[option]()
})
} | placement: 'right'
, trigger: 'click'
, content: ''
, template: '<div class="popover"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'
})
/* POPOVER NO CONFLICT
* =================== */
$.fn.popover.noConflict = function () {
$.fn.popover = old
return this
}
}(window.jQuery);
}.call(root));
return amdExports;
}); }(this)); |
$.fn.popover.Constructor = Popover
$.fn.popover.defaults = $.extend({} , $.fn.tooltip.defaults, { | random_line_split |
binomial_prob.py | #!/usr/bin/env python
"""
Let's say we play a game where I keep flipping a coin until I get
heads. If the first time I get heads is on the nth coin, then I pay
you 2n-1 dollars. How much would you pay me to play this game?
You should end up with a sequence that you need to find the closed
form of. If you don't know how to do this, write some python code that
sums the first 100.
E(W) = sum_{n >= 1} (2n-1)/2^n = 3
"""
import matplotlib.pyplot as plt
import numpy as np
## simulate the number of flips before heads
def | ():
tails, num_flips = True, 0
while tails:
num_flips += 1
if np.random.binomial(1,0.5):
tails = False
return num_flips
if __name__ == '__main__':
## simulate
flips = [coin() for k in xrange(10000)]
## get the distribution of counts condition on the number of flips
range_flips = range(1, max(flips) + 1)
counts = np.array([flips.count(k)*1. for k in range_flips])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(range_flips,counts,alpha=0.4)
ax.set_ylabel("counts")
ax.set_xlabel("num flips to win")
#print [int(i) for i in counts]
winnings = sum([counts[k - 1]*(2*(k)-1)/sum(counts) for k in range_flips])
#print range_flips
print winnings
plt.show()
| coin | identifier_name |
binomial_prob.py | #!/usr/bin/env python
"""
Let's say we play a game where I keep flipping a coin until I get
heads. If the first time I get heads is on the nth coin, then I pay
you 2n-1 dollars. How much would you pay me to play this game?
You should end up with a sequence that you need to find the closed
form of. If you don't know how to do this, write some python code that
sums the first 100.
E(W) = sum_{n >= 1} (2n-1)/2^n = 3
"""
import matplotlib.pyplot as plt
import numpy as np
## simulate the number of flips before heads
def coin():
|
if __name__ == '__main__':
## simulate
flips = [coin() for k in xrange(10000)]
## get the distribution of counts condition on the number of flips
range_flips = range(1, max(flips) + 1)
counts = np.array([flips.count(k)*1. for k in range_flips])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(range_flips,counts,alpha=0.4)
ax.set_ylabel("counts")
ax.set_xlabel("num flips to win")
#print [int(i) for i in counts]
winnings = sum([counts[k - 1]*(2*(k)-1)/sum(counts) for k in range_flips])
#print range_flips
print winnings
plt.show()
| tails, num_flips = True, 0
while tails:
num_flips += 1
if np.random.binomial(1,0.5):
tails = False
return num_flips | identifier_body |
binomial_prob.py | #!/usr/bin/env python
"""
Let's say we play a game where I keep flipping a coin until I get
heads. If the first time I get heads is on the nth coin, then I pay
you 2n-1 dollars. How much would you pay me to play this game?
You should end up with a sequence that you need to find the closed
form of. If you don't know how to do this, write some python code that
sums the first 100.
E(W) = sum_{n >= 1} (2n-1)/2^n = 3
"""
import matplotlib.pyplot as plt
import numpy as np
## simulate the number of flips before heads
def coin():
tails, num_flips = True, 0
while tails:
num_flips += 1
if np.random.binomial(1,0.5):
|
return num_flips
if __name__ == '__main__':
## simulate
flips = [coin() for k in xrange(10000)]
## get the distribution of counts condition on the number of flips
range_flips = range(1, max(flips) + 1)
counts = np.array([flips.count(k)*1. for k in range_flips])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(range_flips,counts,alpha=0.4)
ax.set_ylabel("counts")
ax.set_xlabel("num flips to win")
#print [int(i) for i in counts]
winnings = sum([counts[k - 1]*(2*(k)-1)/sum(counts) for k in range_flips])
#print range_flips
print winnings
plt.show()
| tails = False | conditional_block |
binomial_prob.py | #!/usr/bin/env python
"""
Let's say we play a game where I keep flipping a coin until I get
heads. If the first time I get heads is on the nth coin, then I pay
you 2n-1 dollars. How much would you pay me to play this game?
You should end up with a sequence that you need to find the closed
form of. If you don't know how to do this, write some python code that
sums the first 100.
E(W) = sum_{n >= 1} (2n-1)/2^n = 3
"""
import matplotlib.pyplot as plt
import numpy as np |
## simulate the number of flips before heads
def coin():
tails, num_flips = True, 0
while tails:
num_flips += 1
if np.random.binomial(1,0.5):
tails = False
return num_flips
if __name__ == '__main__':
## simulate
flips = [coin() for k in xrange(10000)]
## get the distribution of counts condition on the number of flips
range_flips = range(1, max(flips) + 1)
counts = np.array([flips.count(k)*1. for k in range_flips])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(range_flips,counts,alpha=0.4)
ax.set_ylabel("counts")
ax.set_xlabel("num flips to win")
#print [int(i) for i in counts]
winnings = sum([counts[k - 1]*(2*(k)-1)/sum(counts) for k in range_flips])
#print range_flips
print winnings
plt.show() | random_line_split | |
ordering.py | # -*- coding: utf-8 -*-
'''auto ordering call chain test mixins'''
from inspect import ismodule
from twoq.support import port
class ARandomQMixin(object):
def test_choice(self):
self.assertEqual(len(list(self.qclass(1, 2, 3, 4, 5, 6).choice())), 1)
def test_sample(self):
self.assertEqual(len(self.qclass(1, 2, 3, 4, 5, 6).sample(3).end()), 3)
def test_shuffle(self):
self.assertEqual(
len(self.qclass(1, 2, 3, 4, 5, 6).shuffle()),
len([5, 4, 6, 3, 1, 2]),
)
class ACombineQMixin(object):
# def test_combinations(self):
# foo = self.qclass('ABCD').combinations(2).value(),
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'C'), ('B', 'D'),
# ('C', 'D')],
# foo,
# )
#
# def test_permutations(self):
# foo = self.qclass('ABCD').permutations(2).value()
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'C'),
# ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'D'), ('D', 'A'),
# ('D', 'B'), ('D', 'C')],
# foo,
# )
def test_product(self):
foo = self.qclass('ABCD', 'xy').product().value()
self.assertEqual(
foo,
[('A', 'x'), ('A', 'y'), ('B', 'x'), ('B', 'y'), ('C', 'x'),
('C', 'y'), ('D', 'x'), ('D', 'y')],
foo,
)
class AOrderQMixin(ARandomQMixin, ACombineQMixin):
'''combination mixin'''
def test_group(self,):
from math import floor
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).tap(lambda x: floor(x)).group().end(),
[[1.0, [1.3]], [2.0, [2.1, 2.4]]]
)
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).group().end(),
[[1.3, [1.3]], [2.1, [2.1]], [2.4, [2.4]]],
)
def test_grouper(self):
|
def test_reversed(self):
self.assertEqual(
self.qclass(5, 4, 3, 2, 1).reverse().end(), [1, 2, 3, 4, 5],
)
def test_sort(self):
from math import sin
self.assertEqual(
self.qclass(1, 2, 3, 4, 5, 6).tap(
lambda x: sin(x)
).sort().end(),
[5, 4, 6, 3, 1, 2],
)
self.assertEqual(
self.qclass(4, 6, 65, 3, 63, 2, 4).sort().end(),
[2, 3, 4, 4, 6, 63, 65],
)
__all__ = sorted(name for name, obj in port.items(locals()) if not any([
name.startswith('_'), ismodule(obj), name in ['ismodule', 'port']
]))
del ismodule
| self.assertEqual(
self.qclass(
'moe', 'larry', 'curly', 30, 40, 50, True
).grouper(2, 'x').end(),
[('moe', 'larry'), ('curly', 30), (40, 50), (True, 'x')]
) | identifier_body |
ordering.py | # -*- coding: utf-8 -*-
'''auto ordering call chain test mixins'''
from inspect import ismodule
from twoq.support import port
class ARandomQMixin(object):
def test_choice(self):
self.assertEqual(len(list(self.qclass(1, 2, 3, 4, 5, 6).choice())), 1)
def test_sample(self):
self.assertEqual(len(self.qclass(1, 2, 3, 4, 5, 6).sample(3).end()), 3)
def test_shuffle(self):
self.assertEqual(
len(self.qclass(1, 2, 3, 4, 5, 6).shuffle()),
len([5, 4, 6, 3, 1, 2]),
)
class ACombineQMixin(object):
# def test_combinations(self):
# foo = self.qclass('ABCD').combinations(2).value(),
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'C'), ('B', 'D'),
# ('C', 'D')],
# foo,
# )
#
# def test_permutations(self):
# foo = self.qclass('ABCD').permutations(2).value()
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'C'),
# ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'D'), ('D', 'A'),
# ('D', 'B'), ('D', 'C')],
# foo,
# )
def test_product(self):
foo = self.qclass('ABCD', 'xy').product().value()
self.assertEqual(
foo,
[('A', 'x'), ('A', 'y'), ('B', 'x'), ('B', 'y'), ('C', 'x'),
('C', 'y'), ('D', 'x'), ('D', 'y')],
foo,
)
class AOrderQMixin(ARandomQMixin, ACombineQMixin):
'''combination mixin'''
def test_group(self,):
from math import floor
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).tap(lambda x: floor(x)).group().end(),
[[1.0, [1.3]], [2.0, [2.1, 2.4]]]
)
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).group().end(),
[[1.3, [1.3]], [2.1, [2.1]], [2.4, [2.4]]],
)
def test_grouper(self):
self.assertEqual(
self.qclass(
'moe', 'larry', 'curly', 30, 40, 50, True
).grouper(2, 'x').end(),
[('moe', 'larry'), ('curly', 30), (40, 50), (True, 'x')]
)
def | (self):
self.assertEqual(
self.qclass(5, 4, 3, 2, 1).reverse().end(), [1, 2, 3, 4, 5],
)
def test_sort(self):
from math import sin
self.assertEqual(
self.qclass(1, 2, 3, 4, 5, 6).tap(
lambda x: sin(x)
).sort().end(),
[5, 4, 6, 3, 1, 2],
)
self.assertEqual(
self.qclass(4, 6, 65, 3, 63, 2, 4).sort().end(),
[2, 3, 4, 4, 6, 63, 65],
)
__all__ = sorted(name for name, obj in port.items(locals()) if not any([
name.startswith('_'), ismodule(obj), name in ['ismodule', 'port']
]))
del ismodule
| test_reversed | identifier_name |
ordering.py | # -*- coding: utf-8 -*-
'''auto ordering call chain test mixins'''
from inspect import ismodule
from twoq.support import port
class ARandomQMixin(object):
def test_choice(self):
self.assertEqual(len(list(self.qclass(1, 2, 3, 4, 5, 6).choice())), 1)
def test_sample(self):
self.assertEqual(len(self.qclass(1, 2, 3, 4, 5, 6).sample(3).end()), 3)
def test_shuffle(self):
self.assertEqual(
len(self.qclass(1, 2, 3, 4, 5, 6).shuffle()),
len([5, 4, 6, 3, 1, 2]),
)
class ACombineQMixin(object):
# def test_combinations(self):
# foo = self.qclass('ABCD').combinations(2).value(),
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'C'), ('B', 'D'),
# ('C', 'D')],
# foo,
# ) | #
# def test_permutations(self):
# foo = self.qclass('ABCD').permutations(2).value()
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'C'),
# ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'D'), ('D', 'A'),
# ('D', 'B'), ('D', 'C')],
# foo,
# )
def test_product(self):
foo = self.qclass('ABCD', 'xy').product().value()
self.assertEqual(
foo,
[('A', 'x'), ('A', 'y'), ('B', 'x'), ('B', 'y'), ('C', 'x'),
('C', 'y'), ('D', 'x'), ('D', 'y')],
foo,
)
class AOrderQMixin(ARandomQMixin, ACombineQMixin):
'''combination mixin'''
def test_group(self,):
from math import floor
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).tap(lambda x: floor(x)).group().end(),
[[1.0, [1.3]], [2.0, [2.1, 2.4]]]
)
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).group().end(),
[[1.3, [1.3]], [2.1, [2.1]], [2.4, [2.4]]],
)
def test_grouper(self):
self.assertEqual(
self.qclass(
'moe', 'larry', 'curly', 30, 40, 50, True
).grouper(2, 'x').end(),
[('moe', 'larry'), ('curly', 30), (40, 50), (True, 'x')]
)
def test_reversed(self):
self.assertEqual(
self.qclass(5, 4, 3, 2, 1).reverse().end(), [1, 2, 3, 4, 5],
)
def test_sort(self):
from math import sin
self.assertEqual(
self.qclass(1, 2, 3, 4, 5, 6).tap(
lambda x: sin(x)
).sort().end(),
[5, 4, 6, 3, 1, 2],
)
self.assertEqual(
self.qclass(4, 6, 65, 3, 63, 2, 4).sort().end(),
[2, 3, 4, 4, 6, 63, 65],
)
__all__ = sorted(name for name, obj in port.items(locals()) if not any([
name.startswith('_'), ismodule(obj), name in ['ismodule', 'port']
]))
del ismodule | random_line_split | |
cursor.py | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Cursor classes
"""
import sys
import weakref
import re
import itertools
from mysql.connector import constants
from mysql.connector import errors
from mysql.connector import utils
RE_SQL_COMMENT = re.compile("\/\*.*\*\/")
RE_SQL_INSERT_VALUES = re.compile(
r'VALUES\s*(\(\s*(?:%(?:\(.*\)|)s\s*(?:,|)\s*)+\))',
re.I | re.M)
RE_SQL_INSERT_STMT = re.compile(r'INSERT\s+INTO', re.I)
RE_SQL_SPLIT_STMTS = re.compile(
r''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
class CursorBase(object):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
def callproc(self, procname, args=()):
pass
def close(self):
pass
def execute(self, operation, params=()):
pass
def executemany(self, operation, seqparams):
pass
def fetchone(self):
pass
def fetchmany(self, size=1):
pass
def fetchall(self):
pass
def nextset(self):
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
def reset(self):
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor(CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
self._rowcount = -1
self._lastrowid = None
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k,v in params.items():
c = v
c = to_mysql(c)
c = escape(c)
c = quote(c)
res[k] = c
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % e)
else:
return res
return None
def _process_params(self, params):
"""
Process the parameters which were given when self.execute() was
called. It does following using the MySQLConnection converter:
* Convert Python types to MySQL types
* Escapes characters required for MySQL.
* Quote values when needed.
Returns a list.
"""
if isinstance(params,dict):
return self._process_params_dict(params)
try:
res = params
res = map(self._connection.converter.to_mysql,res)
res = map(self._connection.converter.escape,res)
res = map(self._connection.converter.quote,res)
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % e)
else:
return tuple(res)
return None
def _row_to_python(self, rowdata, desc=None):
res = ()
try:
if not desc:
desc = self.description
for idx,v in enumerate(rowdata):
flddsc = desc[idx]
res += (self._connection.converter.to_python(flddsc, v),)
except StandardError, e:
raise errors.InterfaceError(
"Failed converting row to Python types; %s" % e)
else:
return res
return None
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, TypeError), err:
raise errors.ProgrammingError(
"Failed handling non-resultset; %s" % err)
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_resultset(self):
pass
def _handle_result(self, result):
"""
Handle the result after a command was send. The result can be either
an OK-packet or a dictionary containing column/eof information.
Raises InterfaceError when result is not a dict() or result is
invalid.
"""
if not isinstance(result, dict):
raise errors.InterfaceError('Result was not a dict()')
if 'columns' in result:
# Weak test, must be column/eof information
self._description = result['columns']
self._connection.unread_result = True
self._handle_resultset()
elif 'affected_rows' in result:
# Weak test, must be an OK-packet
self._connection.unread_result = False
self._handle_noresultset(result)
else:
raise errors.InterfaceError('Invalid result')
def _execute_iter(self, query_iter):
"""Generator returns MySQLCursor objects for multiple statements
This method is only used when multiple statements are executed
by the execute() method. It uses itertools.izip to iterate over the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
Yields a MySQLCursor instance.
"""
if not self._executed_list:
self._executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
for result, stmt in itertools.izip(query_iter,
iter(self._executed_list)):
self._reset_result()
self._handle_result(result)
self._executed = stmt
yield self
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
self._reset_result()
stmt = ''
try:
if isinstance(operation, unicode):
operation = operation.encode(self._connection.charset)
except (UnicodeDecodeError, UnicodeEncodeError), e:
raise errors.ProgrammingError(str(e))
if params is not None:
try: | stmt = operation % self._process_params(params)
except TypeError:
raise errors.ProgrammingError(
"Wrong number of arguments during string formatting")
else:
stmt = operation
if multi:
self._executed = stmt
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
self._executed = stmt
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError, err:
if self._connection._have_next_result:
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
elif len(RE_SQL_SPLIT_STMTS.split(operation)) > 1:
raise errors.InternalError(
"executemany() does not support multiple statements")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT,operation):
opnocom = re.sub(RE_SQL_COMMENT, '', operation)
m = re.search(RE_SQL_INSERT_VALUES, opnocom)
fmt = m.group(1)
values = []
for params in seq_params:
values.append(fmt % self._process_params(params))
operation = operation.replace(m.group(1), ','.join(values), 1)
return self.execute(operation)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError), err:
raise errors.InterfaceError(
"Failed executing the operation; %s" % err)
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
return iter(self._stored_results)
def callproc(self, procname, args=()):
"""Calls a stored procedue with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Definining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print cursor.fetchone()
The last print should output ('5', '5', 25L)
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
argfmt = "@_%s_arg%d"
self._stored_results = []
results = []
try:
procargs = self._process_params(args)
argnames = []
for idx,arg in enumerate(procargs):
argname = argfmt % (procname, idx+1)
argnames.append(argname)
setquery = "SET %s=%%s" % argname
self.execute(setquery, (arg,))
call = "CALL %s(%s)" % (procname,','.join(argnames))
for result in self._connection.cmd_query_iter(call):
if 'columns' in result:
tmp = MySQLCursorBuffered(self._connection._get_self())
tmp._handle_result(result)
results.append(tmp)
if argnames:
select = "SELECT %s" % ','.join(argnames)
self.execute(select)
self._stored_results = results
return self.fetchone()
else:
self._stored_results = results
return ()
except errors.Error:
raise
except StandardError, e:
raise errors.InterfaceError(
"Failed calling stored routine; %s" % e)
def getlastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
This method is kept for backward compatibility. Please use the
property lastrowid instead.
Returns a long value or None.
"""
return self.lastrowid
def _fetch_warnings(self):
"""
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
"""
res = []
try:
c = self._connection.cursor()
cnt = c.execute("SHOW WARNINGS")
res = c.fetchall()
c.close()
except StandardError, e:
raise errors.InterfaceError, errors.InterfaceError(
"Failed getting warnings; %s" % e), sys.exc_info()[2]
if self._connection.raise_on_warnings is True:
msg = '; '.join([ "(%s) %s" % (r[1],r[2]) for r in res])
raise errors.get_mysql_exception(res[0][1],res[0][2])
else:
if len(res):
return res
return None
def _handle_eof(self, eof):
self._connection.unread_result = False
self._nextrow = (None, None)
self._warning_count = eof['warning_count']
if self._connection.get_warnings is True and eof['warning_count']:
self._warnings = self._fetch_warnings()
def _fetch_row(self):
if not self._have_unread_result():
return None
row = None
try:
if self._nextrow == (None, None):
(row, eof) = self._connection.get_row()
else:
(row, eof) = self._nextrow
if row:
(foo, eof) = self._nextrow = self._connection.get_row()
if eof is not None:
self._handle_eof(eof)
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._handle_eof(eof)
except:
raise
else:
return row
return None
def fetchwarnings(self):
return self._warnings
def fetchone(self):
row = self._fetch_row()
if row:
return self._row_to_python(row)
return None
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
res = []
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
for i in xrange(0,self.rowcount):
res.append(self._row_to_python(rows[i]))
self._handle_eof(eof)
return res
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple( [d[0].decode('utf8') for d in self.description] )
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if not self.description:
return False
return True
def __unicode__(self):
fmt = "MySQLCursor: %s"
if self._executed:
if len(self._executed) > 30:
res = fmt % (self._executed[:30] + '..')
else:
res = fmt % (self._executed)
else:
res = fmt % '(Nothing executed yet)'
return res
def __str__(self):
return repr(self.__unicode__())
class MySQLCursorBuffered(MySQLCursor):
"""Cursor which fetches rows within execute()"""
def __init__(self, connection=None):
MySQLCursor.__init__(self, connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
(self._rows, eof) = self._connection.get_rows()
self._rowcount = len(self._rows)
self._handle_eof(eof)
self._next_row = 0
try:
self._connection.unread_result = False
except:
pass
def reset(self):
self._rows = None
def _fetch_row(self):
row = None
try:
row = self._rows[self._next_row]
except:
return None
else:
self._next_row += 1
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
res = []
for row in self._rows:
res.append(self._row_to_python(row))
self._next_row = len(self._rows)
return res
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0:
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorRaw(MySQLCursor):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
self._handle_eof(eof)
return rows
class MySQLCursorBufferedRaw(MySQLCursorBuffered):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
return [ r for r in self._rows ] | random_line_split | |
cursor.py | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Cursor classes
"""
import sys
import weakref
import re
import itertools
from mysql.connector import constants
from mysql.connector import errors
from mysql.connector import utils
RE_SQL_COMMENT = re.compile("\/\*.*\*\/")
RE_SQL_INSERT_VALUES = re.compile(
r'VALUES\s*(\(\s*(?:%(?:\(.*\)|)s\s*(?:,|)\s*)+\))',
re.I | re.M)
RE_SQL_INSERT_STMT = re.compile(r'INSERT\s+INTO', re.I)
RE_SQL_SPLIT_STMTS = re.compile(
r''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
class CursorBase(object):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
def callproc(self, procname, args=()):
pass
def close(self):
pass
def execute(self, operation, params=()):
pass
def executemany(self, operation, seqparams):
pass
def fetchone(self):
|
def fetchmany(self, size=1):
pass
def fetchall(self):
pass
def nextset(self):
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
def reset(self):
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor(CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
self._rowcount = -1
self._lastrowid = None
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k,v in params.items():
c = v
c = to_mysql(c)
c = escape(c)
c = quote(c)
res[k] = c
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % e)
else:
return res
return None
def _process_params(self, params):
"""
Process the parameters which were given when self.execute() was
called. It does following using the MySQLConnection converter:
* Convert Python types to MySQL types
* Escapes characters required for MySQL.
* Quote values when needed.
Returns a list.
"""
if isinstance(params,dict):
return self._process_params_dict(params)
try:
res = params
res = map(self._connection.converter.to_mysql,res)
res = map(self._connection.converter.escape,res)
res = map(self._connection.converter.quote,res)
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % e)
else:
return tuple(res)
return None
def _row_to_python(self, rowdata, desc=None):
res = ()
try:
if not desc:
desc = self.description
for idx,v in enumerate(rowdata):
flddsc = desc[idx]
res += (self._connection.converter.to_python(flddsc, v),)
except StandardError, e:
raise errors.InterfaceError(
"Failed converting row to Python types; %s" % e)
else:
return res
return None
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, TypeError), err:
raise errors.ProgrammingError(
"Failed handling non-resultset; %s" % err)
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_resultset(self):
pass
def _handle_result(self, result):
"""
Handle the result after a command was send. The result can be either
an OK-packet or a dictionary containing column/eof information.
Raises InterfaceError when result is not a dict() or result is
invalid.
"""
if not isinstance(result, dict):
raise errors.InterfaceError('Result was not a dict()')
if 'columns' in result:
# Weak test, must be column/eof information
self._description = result['columns']
self._connection.unread_result = True
self._handle_resultset()
elif 'affected_rows' in result:
# Weak test, must be an OK-packet
self._connection.unread_result = False
self._handle_noresultset(result)
else:
raise errors.InterfaceError('Invalid result')
def _execute_iter(self, query_iter):
"""Generator returns MySQLCursor objects for multiple statements
This method is only used when multiple statements are executed
by the execute() method. It uses itertools.izip to iterate over the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
Yields a MySQLCursor instance.
"""
if not self._executed_list:
self._executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
for result, stmt in itertools.izip(query_iter,
iter(self._executed_list)):
self._reset_result()
self._handle_result(result)
self._executed = stmt
yield self
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
self._reset_result()
stmt = ''
try:
if isinstance(operation, unicode):
operation = operation.encode(self._connection.charset)
except (UnicodeDecodeError, UnicodeEncodeError), e:
raise errors.ProgrammingError(str(e))
if params is not None:
try:
stmt = operation % self._process_params(params)
except TypeError:
raise errors.ProgrammingError(
"Wrong number of arguments during string formatting")
else:
stmt = operation
if multi:
self._executed = stmt
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
self._executed = stmt
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError, err:
if self._connection._have_next_result:
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
elif len(RE_SQL_SPLIT_STMTS.split(operation)) > 1:
raise errors.InternalError(
"executemany() does not support multiple statements")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT,operation):
opnocom = re.sub(RE_SQL_COMMENT, '', operation)
m = re.search(RE_SQL_INSERT_VALUES, opnocom)
fmt = m.group(1)
values = []
for params in seq_params:
values.append(fmt % self._process_params(params))
operation = operation.replace(m.group(1), ','.join(values), 1)
return self.execute(operation)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError), err:
raise errors.InterfaceError(
"Failed executing the operation; %s" % err)
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
return iter(self._stored_results)
def callproc(self, procname, args=()):
"""Calls a stored procedue with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Definining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print cursor.fetchone()
The last print should output ('5', '5', 25L)
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
argfmt = "@_%s_arg%d"
self._stored_results = []
results = []
try:
procargs = self._process_params(args)
argnames = []
for idx,arg in enumerate(procargs):
argname = argfmt % (procname, idx+1)
argnames.append(argname)
setquery = "SET %s=%%s" % argname
self.execute(setquery, (arg,))
call = "CALL %s(%s)" % (procname,','.join(argnames))
for result in self._connection.cmd_query_iter(call):
if 'columns' in result:
tmp = MySQLCursorBuffered(self._connection._get_self())
tmp._handle_result(result)
results.append(tmp)
if argnames:
select = "SELECT %s" % ','.join(argnames)
self.execute(select)
self._stored_results = results
return self.fetchone()
else:
self._stored_results = results
return ()
except errors.Error:
raise
except StandardError, e:
raise errors.InterfaceError(
"Failed calling stored routine; %s" % e)
def getlastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
This method is kept for backward compatibility. Please use the
property lastrowid instead.
Returns a long value or None.
"""
return self.lastrowid
def _fetch_warnings(self):
"""
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
"""
res = []
try:
c = self._connection.cursor()
cnt = c.execute("SHOW WARNINGS")
res = c.fetchall()
c.close()
except StandardError, e:
raise errors.InterfaceError, errors.InterfaceError(
"Failed getting warnings; %s" % e), sys.exc_info()[2]
if self._connection.raise_on_warnings is True:
msg = '; '.join([ "(%s) %s" % (r[1],r[2]) for r in res])
raise errors.get_mysql_exception(res[0][1],res[0][2])
else:
if len(res):
return res
return None
def _handle_eof(self, eof):
self._connection.unread_result = False
self._nextrow = (None, None)
self._warning_count = eof['warning_count']
if self._connection.get_warnings is True and eof['warning_count']:
self._warnings = self._fetch_warnings()
def _fetch_row(self):
if not self._have_unread_result():
return None
row = None
try:
if self._nextrow == (None, None):
(row, eof) = self._connection.get_row()
else:
(row, eof) = self._nextrow
if row:
(foo, eof) = self._nextrow = self._connection.get_row()
if eof is not None:
self._handle_eof(eof)
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._handle_eof(eof)
except:
raise
else:
return row
return None
def fetchwarnings(self):
return self._warnings
def fetchone(self):
row = self._fetch_row()
if row:
return self._row_to_python(row)
return None
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
res = []
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
for i in xrange(0,self.rowcount):
res.append(self._row_to_python(rows[i]))
self._handle_eof(eof)
return res
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple( [d[0].decode('utf8') for d in self.description] )
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if not self.description:
return False
return True
def __unicode__(self):
fmt = "MySQLCursor: %s"
if self._executed:
if len(self._executed) > 30:
res = fmt % (self._executed[:30] + '..')
else:
res = fmt % (self._executed)
else:
res = fmt % '(Nothing executed yet)'
return res
def __str__(self):
return repr(self.__unicode__())
class MySQLCursorBuffered(MySQLCursor):
"""Cursor which fetches rows within execute()"""
def __init__(self, connection=None):
MySQLCursor.__init__(self, connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
(self._rows, eof) = self._connection.get_rows()
self._rowcount = len(self._rows)
self._handle_eof(eof)
self._next_row = 0
try:
self._connection.unread_result = False
except:
pass
def reset(self):
self._rows = None
def _fetch_row(self):
row = None
try:
row = self._rows[self._next_row]
except:
return None
else:
self._next_row += 1
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
res = []
for row in self._rows:
res.append(self._row_to_python(row))
self._next_row = len(self._rows)
return res
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0:
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorRaw(MySQLCursor):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
self._handle_eof(eof)
return rows
class MySQLCursorBufferedRaw(MySQLCursorBuffered):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
return [ r for r in self._rows ]
| pass | identifier_body |
cursor.py | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Cursor classes
"""
import sys
import weakref
import re
import itertools
from mysql.connector import constants
from mysql.connector import errors
from mysql.connector import utils
RE_SQL_COMMENT = re.compile("\/\*.*\*\/")
RE_SQL_INSERT_VALUES = re.compile(
r'VALUES\s*(\(\s*(?:%(?:\(.*\)|)s\s*(?:,|)\s*)+\))',
re.I | re.M)
RE_SQL_INSERT_STMT = re.compile(r'INSERT\s+INTO', re.I)
RE_SQL_SPLIT_STMTS = re.compile(
r''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
class CursorBase(object):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
def callproc(self, procname, args=()):
pass
def close(self):
pass
def execute(self, operation, params=()):
pass
def executemany(self, operation, seqparams):
pass
def fetchone(self):
pass
def fetchmany(self, size=1):
pass
def fetchall(self):
pass
def nextset(self):
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
def reset(self):
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor(CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
self._rowcount = -1
self._lastrowid = None
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k,v in params.items():
c = v
c = to_mysql(c)
c = escape(c)
c = quote(c)
res[k] = c
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % e)
else:
return res
return None
def _process_params(self, params):
"""
Process the parameters which were given when self.execute() was
called. It does following using the MySQLConnection converter:
* Convert Python types to MySQL types
* Escapes characters required for MySQL.
* Quote values when needed.
Returns a list.
"""
if isinstance(params,dict):
return self._process_params_dict(params)
try:
res = params
res = map(self._connection.converter.to_mysql,res)
res = map(self._connection.converter.escape,res)
res = map(self._connection.converter.quote,res)
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % e)
else:
return tuple(res)
return None
def _row_to_python(self, rowdata, desc=None):
res = ()
try:
if not desc:
desc = self.description
for idx,v in enumerate(rowdata):
flddsc = desc[idx]
res += (self._connection.converter.to_python(flddsc, v),)
except StandardError, e:
raise errors.InterfaceError(
"Failed converting row to Python types; %s" % e)
else:
return res
return None
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, TypeError), err:
raise errors.ProgrammingError(
"Failed handling non-resultset; %s" % err)
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_resultset(self):
pass
def _handle_result(self, result):
"""
Handle the result after a command was send. The result can be either
an OK-packet or a dictionary containing column/eof information.
Raises InterfaceError when result is not a dict() or result is
invalid.
"""
if not isinstance(result, dict):
raise errors.InterfaceError('Result was not a dict()')
if 'columns' in result:
# Weak test, must be column/eof information
self._description = result['columns']
self._connection.unread_result = True
self._handle_resultset()
elif 'affected_rows' in result:
# Weak test, must be an OK-packet
self._connection.unread_result = False
self._handle_noresultset(result)
else:
raise errors.InterfaceError('Invalid result')
def _execute_iter(self, query_iter):
"""Generator returns MySQLCursor objects for multiple statements
This method is only used when multiple statements are executed
by the execute() method. It uses itertools.izip to iterate over the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
Yields a MySQLCursor instance.
"""
if not self._executed_list:
self._executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
for result, stmt in itertools.izip(query_iter,
iter(self._executed_list)):
self._reset_result()
self._handle_result(result)
self._executed = stmt
yield self
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
self._reset_result()
stmt = ''
try:
if isinstance(operation, unicode):
operation = operation.encode(self._connection.charset)
except (UnicodeDecodeError, UnicodeEncodeError), e:
raise errors.ProgrammingError(str(e))
if params is not None:
try:
stmt = operation % self._process_params(params)
except TypeError:
raise errors.ProgrammingError(
"Wrong number of arguments during string formatting")
else:
stmt = operation
if multi:
self._executed = stmt
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
self._executed = stmt
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError, err:
if self._connection._have_next_result:
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
elif len(RE_SQL_SPLIT_STMTS.split(operation)) > 1:
raise errors.InternalError(
"executemany() does not support multiple statements")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT,operation):
opnocom = re.sub(RE_SQL_COMMENT, '', operation)
m = re.search(RE_SQL_INSERT_VALUES, opnocom)
fmt = m.group(1)
values = []
for params in seq_params:
values.append(fmt % self._process_params(params))
operation = operation.replace(m.group(1), ','.join(values), 1)
return self.execute(operation)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError), err:
raise errors.InterfaceError(
"Failed executing the operation; %s" % err)
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
return iter(self._stored_results)
def callproc(self, procname, args=()):
"""Calls a stored procedue with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Definining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print cursor.fetchone()
The last print should output ('5', '5', 25L)
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
argfmt = "@_%s_arg%d"
self._stored_results = []
results = []
try:
procargs = self._process_params(args)
argnames = []
for idx,arg in enumerate(procargs):
argname = argfmt % (procname, idx+1)
argnames.append(argname)
setquery = "SET %s=%%s" % argname
self.execute(setquery, (arg,))
call = "CALL %s(%s)" % (procname,','.join(argnames))
for result in self._connection.cmd_query_iter(call):
if 'columns' in result:
tmp = MySQLCursorBuffered(self._connection._get_self())
tmp._handle_result(result)
results.append(tmp)
if argnames:
select = "SELECT %s" % ','.join(argnames)
self.execute(select)
self._stored_results = results
return self.fetchone()
else:
self._stored_results = results
return ()
except errors.Error:
raise
except StandardError, e:
raise errors.InterfaceError(
"Failed calling stored routine; %s" % e)
def getlastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
This method is kept for backward compatibility. Please use the
property lastrowid instead.
Returns a long value or None.
"""
return self.lastrowid
def _fetch_warnings(self):
"""
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
"""
res = []
try:
c = self._connection.cursor()
cnt = c.execute("SHOW WARNINGS")
res = c.fetchall()
c.close()
except StandardError, e:
raise errors.InterfaceError, errors.InterfaceError(
"Failed getting warnings; %s" % e), sys.exc_info()[2]
if self._connection.raise_on_warnings is True:
msg = '; '.join([ "(%s) %s" % (r[1],r[2]) for r in res])
raise errors.get_mysql_exception(res[0][1],res[0][2])
else:
if len(res):
return res
return None
def _handle_eof(self, eof):
self._connection.unread_result = False
self._nextrow = (None, None)
self._warning_count = eof['warning_count']
if self._connection.get_warnings is True and eof['warning_count']:
self._warnings = self._fetch_warnings()
def _fetch_row(self):
if not self._have_unread_result():
return None
row = None
try:
if self._nextrow == (None, None):
(row, eof) = self._connection.get_row()
else:
(row, eof) = self._nextrow
if row:
(foo, eof) = self._nextrow = self._connection.get_row()
if eof is not None:
self._handle_eof(eof)
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._handle_eof(eof)
except:
raise
else:
return row
return None
def fetchwarnings(self):
return self._warnings
def fetchone(self):
row = self._fetch_row()
if row:
return self._row_to_python(row)
return None
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self.fetchone()
if row:
|
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
res = []
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
for i in xrange(0,self.rowcount):
res.append(self._row_to_python(rows[i]))
self._handle_eof(eof)
return res
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple( [d[0].decode('utf8') for d in self.description] )
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if not self.description:
return False
return True
def __unicode__(self):
fmt = "MySQLCursor: %s"
if self._executed:
if len(self._executed) > 30:
res = fmt % (self._executed[:30] + '..')
else:
res = fmt % (self._executed)
else:
res = fmt % '(Nothing executed yet)'
return res
def __str__(self):
return repr(self.__unicode__())
class MySQLCursorBuffered(MySQLCursor):
"""Cursor which fetches rows within execute()"""
def __init__(self, connection=None):
MySQLCursor.__init__(self, connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
(self._rows, eof) = self._connection.get_rows()
self._rowcount = len(self._rows)
self._handle_eof(eof)
self._next_row = 0
try:
self._connection.unread_result = False
except:
pass
def reset(self):
self._rows = None
def _fetch_row(self):
row = None
try:
row = self._rows[self._next_row]
except:
return None
else:
self._next_row += 1
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
res = []
for row in self._rows:
res.append(self._row_to_python(row))
self._next_row = len(self._rows)
return res
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0:
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorRaw(MySQLCursor):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
self._handle_eof(eof)
return rows
class MySQLCursorBufferedRaw(MySQLCursorBuffered):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
return [ r for r in self._rows ]
| res.append(row) | conditional_block |
cursor.py | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Cursor classes
"""
import sys
import weakref
import re
import itertools
from mysql.connector import constants
from mysql.connector import errors
from mysql.connector import utils
RE_SQL_COMMENT = re.compile("\/\*.*\*\/")
RE_SQL_INSERT_VALUES = re.compile(
r'VALUES\s*(\(\s*(?:%(?:\(.*\)|)s\s*(?:,|)\s*)+\))',
re.I | re.M)
RE_SQL_INSERT_STMT = re.compile(r'INSERT\s+INTO', re.I)
RE_SQL_SPLIT_STMTS = re.compile(
r''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
class CursorBase(object):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
def callproc(self, procname, args=()):
pass
def close(self):
pass
def execute(self, operation, params=()):
pass
def executemany(self, operation, seqparams):
pass
def fetchone(self):
pass
def fetchmany(self, size=1):
pass
def fetchall(self):
pass
def nextset(self):
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
def reset(self):
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def | (self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor(CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
self._rowcount = -1
self._lastrowid = None
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k,v in params.items():
c = v
c = to_mysql(c)
c = escape(c)
c = quote(c)
res[k] = c
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % e)
else:
return res
return None
def _process_params(self, params):
"""
Process the parameters which were given when self.execute() was
called. It does following using the MySQLConnection converter:
* Convert Python types to MySQL types
* Escapes characters required for MySQL.
* Quote values when needed.
Returns a list.
"""
if isinstance(params,dict):
return self._process_params_dict(params)
try:
res = params
res = map(self._connection.converter.to_mysql,res)
res = map(self._connection.converter.escape,res)
res = map(self._connection.converter.quote,res)
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % e)
else:
return tuple(res)
return None
def _row_to_python(self, rowdata, desc=None):
res = ()
try:
if not desc:
desc = self.description
for idx,v in enumerate(rowdata):
flddsc = desc[idx]
res += (self._connection.converter.to_python(flddsc, v),)
except StandardError, e:
raise errors.InterfaceError(
"Failed converting row to Python types; %s" % e)
else:
return res
return None
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, TypeError), err:
raise errors.ProgrammingError(
"Failed handling non-resultset; %s" % err)
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_resultset(self):
pass
def _handle_result(self, result):
"""
Handle the result after a command was send. The result can be either
an OK-packet or a dictionary containing column/eof information.
Raises InterfaceError when result is not a dict() or result is
invalid.
"""
if not isinstance(result, dict):
raise errors.InterfaceError('Result was not a dict()')
if 'columns' in result:
# Weak test, must be column/eof information
self._description = result['columns']
self._connection.unread_result = True
self._handle_resultset()
elif 'affected_rows' in result:
# Weak test, must be an OK-packet
self._connection.unread_result = False
self._handle_noresultset(result)
else:
raise errors.InterfaceError('Invalid result')
def _execute_iter(self, query_iter):
"""Generator returns MySQLCursor objects for multiple statements
This method is only used when multiple statements are executed
by the execute() method. It uses itertools.izip to iterate over the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
Yields a MySQLCursor instance.
"""
if not self._executed_list:
self._executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
for result, stmt in itertools.izip(query_iter,
iter(self._executed_list)):
self._reset_result()
self._handle_result(result)
self._executed = stmt
yield self
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
self._reset_result()
stmt = ''
try:
if isinstance(operation, unicode):
operation = operation.encode(self._connection.charset)
except (UnicodeDecodeError, UnicodeEncodeError), e:
raise errors.ProgrammingError(str(e))
if params is not None:
try:
stmt = operation % self._process_params(params)
except TypeError:
raise errors.ProgrammingError(
"Wrong number of arguments during string formatting")
else:
stmt = operation
if multi:
self._executed = stmt
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
self._executed = stmt
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError, err:
if self._connection._have_next_result:
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
elif len(RE_SQL_SPLIT_STMTS.split(operation)) > 1:
raise errors.InternalError(
"executemany() does not support multiple statements")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT,operation):
opnocom = re.sub(RE_SQL_COMMENT, '', operation)
m = re.search(RE_SQL_INSERT_VALUES, opnocom)
fmt = m.group(1)
values = []
for params in seq_params:
values.append(fmt % self._process_params(params))
operation = operation.replace(m.group(1), ','.join(values), 1)
return self.execute(operation)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError), err:
raise errors.InterfaceError(
"Failed executing the operation; %s" % err)
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
return iter(self._stored_results)
def callproc(self, procname, args=()):
"""Calls a stored procedue with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Definining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print cursor.fetchone()
The last print should output ('5', '5', 25L)
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
argfmt = "@_%s_arg%d"
self._stored_results = []
results = []
try:
procargs = self._process_params(args)
argnames = []
for idx,arg in enumerate(procargs):
argname = argfmt % (procname, idx+1)
argnames.append(argname)
setquery = "SET %s=%%s" % argname
self.execute(setquery, (arg,))
call = "CALL %s(%s)" % (procname,','.join(argnames))
for result in self._connection.cmd_query_iter(call):
if 'columns' in result:
tmp = MySQLCursorBuffered(self._connection._get_self())
tmp._handle_result(result)
results.append(tmp)
if argnames:
select = "SELECT %s" % ','.join(argnames)
self.execute(select)
self._stored_results = results
return self.fetchone()
else:
self._stored_results = results
return ()
except errors.Error:
raise
except StandardError, e:
raise errors.InterfaceError(
"Failed calling stored routine; %s" % e)
def getlastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
This method is kept for backward compatibility. Please use the
property lastrowid instead.
Returns a long value or None.
"""
return self.lastrowid
def _fetch_warnings(self):
"""
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
"""
res = []
try:
c = self._connection.cursor()
cnt = c.execute("SHOW WARNINGS")
res = c.fetchall()
c.close()
except StandardError, e:
raise errors.InterfaceError, errors.InterfaceError(
"Failed getting warnings; %s" % e), sys.exc_info()[2]
if self._connection.raise_on_warnings is True:
msg = '; '.join([ "(%s) %s" % (r[1],r[2]) for r in res])
raise errors.get_mysql_exception(res[0][1],res[0][2])
else:
if len(res):
return res
return None
def _handle_eof(self, eof):
self._connection.unread_result = False
self._nextrow = (None, None)
self._warning_count = eof['warning_count']
if self._connection.get_warnings is True and eof['warning_count']:
self._warnings = self._fetch_warnings()
def _fetch_row(self):
if not self._have_unread_result():
return None
row = None
try:
if self._nextrow == (None, None):
(row, eof) = self._connection.get_row()
else:
(row, eof) = self._nextrow
if row:
(foo, eof) = self._nextrow = self._connection.get_row()
if eof is not None:
self._handle_eof(eof)
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._handle_eof(eof)
except:
raise
else:
return row
return None
def fetchwarnings(self):
return self._warnings
def fetchone(self):
row = self._fetch_row()
if row:
return self._row_to_python(row)
return None
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
res = []
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
for i in xrange(0,self.rowcount):
res.append(self._row_to_python(rows[i]))
self._handle_eof(eof)
return res
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple( [d[0].decode('utf8') for d in self.description] )
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if not self.description:
return False
return True
def __unicode__(self):
fmt = "MySQLCursor: %s"
if self._executed:
if len(self._executed) > 30:
res = fmt % (self._executed[:30] + '..')
else:
res = fmt % (self._executed)
else:
res = fmt % '(Nothing executed yet)'
return res
def __str__(self):
return repr(self.__unicode__())
class MySQLCursorBuffered(MySQLCursor):
"""Cursor which fetches rows within execute()"""
def __init__(self, connection=None):
MySQLCursor.__init__(self, connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
(self._rows, eof) = self._connection.get_rows()
self._rowcount = len(self._rows)
self._handle_eof(eof)
self._next_row = 0
try:
self._connection.unread_result = False
except:
pass
def reset(self):
self._rows = None
def _fetch_row(self):
row = None
try:
row = self._rows[self._next_row]
except:
return None
else:
self._next_row += 1
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
res = []
for row in self._rows:
res.append(self._row_to_python(row))
self._next_row = len(self._rows)
return res
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0:
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorRaw(MySQLCursor):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
self._handle_eof(eof)
return rows
class MySQLCursorBufferedRaw(MySQLCursorBuffered):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
return [ r for r in self._rows ]
| lastrowid | identifier_name |
LanguageDistribution-test.tsx | /*
* SonarQube
* Copyright (C) 2009-2022 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
import { shallow } from 'enzyme';
import * as React from 'react';
import { LanguageDistribution } from '../LanguageDistribution';
it('renders', () => { | shallow(
<LanguageDistribution
distribution="java=1734;js=845;cpp=73;<null>=15"
languages={{ java: { key: 'java', name: 'Java' }, js: { key: 'js', name: 'JavaScript' } }}
/>
)
).toMatchSnapshot();
}); | expect( | random_line_split |
preview.test.ts | import { createPreview } from '@dicebear/core';
import * as style from '../dist';
import * as fs from 'fs';
import * as path from 'path';
const data = [
[style, { seed: 'test' }, 'eyes'],
[style, { seed: 'test' }, 'eyebrows'],
[style, { seed: 'test' }, 'mouth'],
[style, { seed: 'test' }, 'glasses'],
[style, { seed: 'test' }, 'hairColor'],
[style, { seed: 'test' }, 'mouthColor'],
[style, { seed: 'test' }, 'glassesColor'],
[style, { seed: 'test', backgroundColor: ['#ff0000'] }, 'backgroundColor'],
] as Array<Parameters<typeof createPreview>>;
data.forEach((params, key) => {
test(`Create avatar #${key}`, async () => {
const svgComponent = path.resolve(__dirname, 'svg/preview', `${key}.svg`);
if (false === fs.existsSync(svgComponent)) {
if (false === fs.existsSync(path.dirname(svgComponent))) |
fs.writeFileSync(svgComponent, createPreview(...params), {
encoding: 'utf-8',
});
}
const svg = fs.readFileSync(svgComponent, { encoding: 'utf-8' });
expect(createPreview(...params)).toEqual(svg);
});
});
| {
fs.mkdirSync(path.dirname(svgComponent), { recursive: true });
} | conditional_block |
preview.test.ts | import { createPreview } from '@dicebear/core';
import * as style from '../dist';
import * as fs from 'fs';
import * as path from 'path';
const data = [
[style, { seed: 'test' }, 'eyes'],
[style, { seed: 'test' }, 'eyebrows'],
[style, { seed: 'test' }, 'mouth'],
[style, { seed: 'test' }, 'glasses'],
[style, { seed: 'test' }, 'hairColor'],
[style, { seed: 'test' }, 'mouthColor'],
[style, { seed: 'test' }, 'glassesColor'],
[style, { seed: 'test', backgroundColor: ['#ff0000'] }, 'backgroundColor'],
] as Array<Parameters<typeof createPreview>>;
data.forEach((params, key) => { |
if (false === fs.existsSync(svgComponent)) {
if (false === fs.existsSync(path.dirname(svgComponent))) {
fs.mkdirSync(path.dirname(svgComponent), { recursive: true });
}
fs.writeFileSync(svgComponent, createPreview(...params), {
encoding: 'utf-8',
});
}
const svg = fs.readFileSync(svgComponent, { encoding: 'utf-8' });
expect(createPreview(...params)).toEqual(svg);
});
}); | test(`Create avatar #${key}`, async () => {
const svgComponent = path.resolve(__dirname, 'svg/preview', `${key}.svg`); | random_line_split |
values.js | var express = require( 'express' );
var router = express.Router( );
var http = require( 'http' );
var options = {
host: 'jettestarmpaas.cloudapp.net',
port: 80,
path: '/api/simple',
method: 'GET',
headers: {
connection: 'keep-alive'
}
};
function ca | options, callback ) {
return http.request( options, function ( res ) {
console.log( 'STATUS: ' + res.statusCode );
console.log( 'HEADERS: ' + JSON.stringify( res.headers ) );
res.setEncoding( 'utf8' );
var body = '';
res.on( 'data', function ( chunk ) {
console.log( 'BODY: ' + chunk );
body += chunk;
} );
res.on( 'end', function ( foo ) {
// Data reception is done, do whatever with it!
var parsed = JSON.parse( body );
var st = res.status;
if ( res.statusCode == 200 )
callback( null, parsed );
else {
callback( res.statusCode, parsed );
}
} );
} ).on( 'error', function ( e ) {
callback( true, "error" );
} ).on( 'timeout', function ( e ) {
res.abort( );
callback( true, "timeout" );
} );
}
/* GET values page. */
router.get( '/', function ( req, res ) {
var stuff, rawData;
callApi( options, function ( err, data ) {
if ( err ) {
res.status( err );
var currentStack = new Error( ).stack;
stuff = JSON.stringify( data );
res.render( 'error', { message: stuff, error: { status: err, stack: currentStack }} );
}
else {
stuff = JSON.stringify( data );
rawData = data;
res.render( 'values', { title: 'Values', body: stuff, data: rawData } );
}
} ).end( );
} );
module.exports = router;
| llApi( | identifier_name |
values.js | var express = require( 'express' );
var router = express.Router( );
var http = require( 'http' );
var options = {
host: 'jettestarmpaas.cloudapp.net',
port: 80,
path: '/api/simple',
method: 'GET',
headers: {
connection: 'keep-alive'
}
};
function callApi( options, callback ) {
|
/* GET values page. */
router.get( '/', function ( req, res ) {
var stuff, rawData;
callApi( options, function ( err, data ) {
if ( err ) {
res.status( err );
var currentStack = new Error( ).stack;
stuff = JSON.stringify( data );
res.render( 'error', { message: stuff, error: { status: err, stack: currentStack }} );
}
else {
stuff = JSON.stringify( data );
rawData = data;
res.render( 'values', { title: 'Values', body: stuff, data: rawData } );
}
} ).end( );
} );
module.exports = router;
| return http.request( options, function ( res ) {
console.log( 'STATUS: ' + res.statusCode );
console.log( 'HEADERS: ' + JSON.stringify( res.headers ) );
res.setEncoding( 'utf8' );
var body = '';
res.on( 'data', function ( chunk ) {
console.log( 'BODY: ' + chunk );
body += chunk;
} );
res.on( 'end', function ( foo ) {
// Data reception is done, do whatever with it!
var parsed = JSON.parse( body );
var st = res.status;
if ( res.statusCode == 200 )
callback( null, parsed );
else {
callback( res.statusCode, parsed );
}
} );
} ).on( 'error', function ( e ) {
callback( true, "error" );
} ).on( 'timeout', function ( e ) {
res.abort( );
callback( true, "timeout" );
} );
}
| identifier_body |
values.js | var express = require( 'express' );
var router = express.Router( );
var http = require( 'http' );
var options = {
host: 'jettestarmpaas.cloudapp.net',
port: 80,
path: '/api/simple',
method: 'GET',
headers: {
connection: 'keep-alive'
}
};
function callApi( options, callback ) {
return http.request( options, function ( res ) {
console.log( 'STATUS: ' + res.statusCode );
console.log( 'HEADERS: ' + JSON.stringify( res.headers ) );
res.setEncoding( 'utf8' );
var body = '';
res.on( 'data', function ( chunk ) {
console.log( 'BODY: ' + chunk );
body += chunk;
} );
res.on( 'end', function ( foo ) { |
var st = res.status;
if ( res.statusCode == 200 )
callback( null, parsed );
else {
callback( res.statusCode, parsed );
}
} );
} ).on( 'error', function ( e ) {
callback( true, "error" );
} ).on( 'timeout', function ( e ) {
res.abort( );
callback( true, "timeout" );
} );
}
/* GET values page. */
router.get( '/', function ( req, res ) {
var stuff, rawData;
callApi( options, function ( err, data ) {
if ( err ) {
res.status( err );
var currentStack = new Error( ).stack;
stuff = JSON.stringify( data );
res.render( 'error', { message: stuff, error: { status: err, stack: currentStack }} );
}
else {
stuff = JSON.stringify( data );
rawData = data;
res.render( 'values', { title: 'Values', body: stuff, data: rawData } );
}
} ).end( );
} );
module.exports = router; | // Data reception is done, do whatever with it!
var parsed = JSON.parse( body ); | random_line_split |
values.js | var express = require( 'express' );
var router = express.Router( );
var http = require( 'http' );
var options = {
host: 'jettestarmpaas.cloudapp.net',
port: 80,
path: '/api/simple',
method: 'GET',
headers: {
connection: 'keep-alive'
}
};
function callApi( options, callback ) {
return http.request( options, function ( res ) {
console.log( 'STATUS: ' + res.statusCode );
console.log( 'HEADERS: ' + JSON.stringify( res.headers ) );
res.setEncoding( 'utf8' );
var body = '';
res.on( 'data', function ( chunk ) {
console.log( 'BODY: ' + chunk );
body += chunk;
} );
res.on( 'end', function ( foo ) {
// Data reception is done, do whatever with it!
var parsed = JSON.parse( body );
var st = res.status;
if ( res.statusCode == 200 )
callback( null, parsed );
else {
| } );
} ).on( 'error', function ( e ) {
callback( true, "error" );
} ).on( 'timeout', function ( e ) {
res.abort( );
callback( true, "timeout" );
} );
}
/* GET values page. */
router.get( '/', function ( req, res ) {
var stuff, rawData;
callApi( options, function ( err, data ) {
if ( err ) {
res.status( err );
var currentStack = new Error( ).stack;
stuff = JSON.stringify( data );
res.render( 'error', { message: stuff, error: { status: err, stack: currentStack }} );
}
else {
stuff = JSON.stringify( data );
rawData = data;
res.render( 'values', { title: 'Values', body: stuff, data: rawData } );
}
} ).end( );
} );
module.exports = router;
| callback( res.statusCode, parsed );
}
| conditional_block |
data_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use mime_classifier::MIMEClassifier;
use net_traits::ProgressMsg::{Done, Payload};
use net_traits::{LoadConsumer, LoadData, Metadata};
use resource_task::{send_error, start_sending_sniffed_opt};
use rustc_serialize::base64::FromBase64;
use std::sync::Arc;
use url::SchemeData;
use url::percent_encoding::percent_decode;
pub fn factory(load_data: LoadData, senders: LoadConsumer, classifier: Arc<MIMEClassifier>) {
// NB: we don't spawn a new task.
// Hypothesis: data URLs are too small for parallel base64 etc. to be worth it.
// Should be tested at some point.
// Left in separate function to allow easy moving to a task, if desired.
load(load_data, senders, classifier)
}
pub fn load(load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>) | {
let url = load_data.url;
assert!(&*url.scheme == "data");
// Split out content type and data.
let mut scheme_data = match url.scheme_data {
SchemeData::NonRelative(ref scheme_data) => scheme_data.clone(),
_ => panic!("Expected a non-relative scheme URL.")
};
match url.query {
Some(ref query) => {
scheme_data.push_str("?");
scheme_data.push_str(query);
},
None => ()
}
let parts: Vec<&str> = scheme_data.splitn(2, ',').collect();
if parts.len() != 2 {
send_error(url, "invalid data uri".to_owned(), start_chan);
return;
}
// ";base64" must come at the end of the content type, per RFC 2397.
// rust-http will fail to parse it because there's no =value part.
let mut is_base64 = false;
let mut ct_str = parts[0].to_owned();
if ct_str.ends_with(";base64") {
is_base64 = true;
let end_index = ct_str.len() - 7;
ct_str.truncate(end_index);
}
if ct_str.starts_with(";charset=") {
ct_str = format!("text/plain{}", ct_str);
}
// Parse the content type using rust-http.
// FIXME: this can go into an infinite loop! (rust-http #25)
let mut content_type: Option<Mime> = ct_str.parse().ok();
if content_type == None {
content_type = Some(Mime(TopLevel::Text, SubLevel::Plain,
vec!((Attr::Charset, Value::Ext("US-ASCII".to_owned())))));
}
let bytes = percent_decode(parts[1].as_bytes());
let bytes = if is_base64 {
// FIXME(#2909): It’s unclear what to do with non-alphabet characters,
// but Acid 3 apparently depends on spaces being ignored.
let bytes = bytes.into_iter().filter(|&b| b != ' ' as u8).collect::<Vec<u8>>();
match bytes.from_base64() {
Err(..) => return send_error(url, "non-base64 data uri".to_owned(), start_chan),
Ok(data) => data,
}
} else {
bytes
};
let mut metadata = Metadata::default(url);
metadata.set_content_type(content_type.as_ref());
if let Ok(chan) = start_sending_sniffed_opt(start_chan, metadata, classifier, &bytes) {
let _ = chan.send(Payload(bytes));
let _ = chan.send(Done(Ok(())));
}
}
| identifier_body | |
data_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use mime_classifier::MIMEClassifier;
use net_traits::ProgressMsg::{Done, Payload};
use net_traits::{LoadConsumer, LoadData, Metadata};
use resource_task::{send_error, start_sending_sniffed_opt};
use rustc_serialize::base64::FromBase64;
use std::sync::Arc;
use url::SchemeData;
use url::percent_encoding::percent_decode;
pub fn factory(load_data: LoadData, senders: LoadConsumer, classifier: Arc<MIMEClassifier>) {
// NB: we don't spawn a new task.
// Hypothesis: data URLs are too small for parallel base64 etc. to be worth it.
// Should be tested at some point.
// Left in separate function to allow easy moving to a task, if desired.
load(load_data, senders, classifier)
}
pub fn load(load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>) {
let url = load_data.url;
assert!(&*url.scheme == "data");
// Split out content type and data.
let mut scheme_data = match url.scheme_data {
SchemeData::NonRelative(ref scheme_data) => scheme_data.clone(),
_ => panic!("Expected a non-relative scheme URL.")
};
match url.query {
Some(ref query) => {
scheme_data.push_str("?");
scheme_data.push_str(query);
},
None => ()
}
let parts: Vec<&str> = scheme_data.splitn(2, ',').collect();
if parts.len() != 2 {
send_error(url, "invalid data uri".to_owned(), start_chan);
return;
}
// ";base64" must come at the end of the content type, per RFC 2397.
// rust-http will fail to parse it because there's no =value part.
let mut is_base64 = false;
let mut ct_str = parts[0].to_owned();
if ct_str.ends_with(";base64") {
is_base64 = true;
let end_index = ct_str.len() - 7;
ct_str.truncate(end_index);
}
if ct_str.starts_with(";charset=") { | }
// Parse the content type using rust-http.
// FIXME: this can go into an infinite loop! (rust-http #25)
let mut content_type: Option<Mime> = ct_str.parse().ok();
if content_type == None {
content_type = Some(Mime(TopLevel::Text, SubLevel::Plain,
vec!((Attr::Charset, Value::Ext("US-ASCII".to_owned())))));
}
let bytes = percent_decode(parts[1].as_bytes());
let bytes = if is_base64 {
// FIXME(#2909): It’s unclear what to do with non-alphabet characters,
// but Acid 3 apparently depends on spaces being ignored.
let bytes = bytes.into_iter().filter(|&b| b != ' ' as u8).collect::<Vec<u8>>();
match bytes.from_base64() {
Err(..) => return send_error(url, "non-base64 data uri".to_owned(), start_chan),
Ok(data) => data,
}
} else {
bytes
};
let mut metadata = Metadata::default(url);
metadata.set_content_type(content_type.as_ref());
if let Ok(chan) = start_sending_sniffed_opt(start_chan, metadata, classifier, &bytes) {
let _ = chan.send(Payload(bytes));
let _ = chan.send(Done(Ok(())));
}
} | ct_str = format!("text/plain{}", ct_str); | random_line_split |
data_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use mime_classifier::MIMEClassifier;
use net_traits::ProgressMsg::{Done, Payload};
use net_traits::{LoadConsumer, LoadData, Metadata};
use resource_task::{send_error, start_sending_sniffed_opt};
use rustc_serialize::base64::FromBase64;
use std::sync::Arc;
use url::SchemeData;
use url::percent_encoding::percent_decode;
pub fn | (load_data: LoadData, senders: LoadConsumer, classifier: Arc<MIMEClassifier>) {
// NB: we don't spawn a new task.
// Hypothesis: data URLs are too small for parallel base64 etc. to be worth it.
// Should be tested at some point.
// Left in separate function to allow easy moving to a task, if desired.
load(load_data, senders, classifier)
}
pub fn load(load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>) {
let url = load_data.url;
assert!(&*url.scheme == "data");
// Split out content type and data.
let mut scheme_data = match url.scheme_data {
SchemeData::NonRelative(ref scheme_data) => scheme_data.clone(),
_ => panic!("Expected a non-relative scheme URL.")
};
match url.query {
Some(ref query) => {
scheme_data.push_str("?");
scheme_data.push_str(query);
},
None => ()
}
let parts: Vec<&str> = scheme_data.splitn(2, ',').collect();
if parts.len() != 2 {
send_error(url, "invalid data uri".to_owned(), start_chan);
return;
}
// ";base64" must come at the end of the content type, per RFC 2397.
// rust-http will fail to parse it because there's no =value part.
let mut is_base64 = false;
let mut ct_str = parts[0].to_owned();
if ct_str.ends_with(";base64") {
is_base64 = true;
let end_index = ct_str.len() - 7;
ct_str.truncate(end_index);
}
if ct_str.starts_with(";charset=") {
ct_str = format!("text/plain{}", ct_str);
}
// Parse the content type using rust-http.
// FIXME: this can go into an infinite loop! (rust-http #25)
let mut content_type: Option<Mime> = ct_str.parse().ok();
if content_type == None {
content_type = Some(Mime(TopLevel::Text, SubLevel::Plain,
vec!((Attr::Charset, Value::Ext("US-ASCII".to_owned())))));
}
let bytes = percent_decode(parts[1].as_bytes());
let bytes = if is_base64 {
// FIXME(#2909): It’s unclear what to do with non-alphabet characters,
// but Acid 3 apparently depends on spaces being ignored.
let bytes = bytes.into_iter().filter(|&b| b != ' ' as u8).collect::<Vec<u8>>();
match bytes.from_base64() {
Err(..) => return send_error(url, "non-base64 data uri".to_owned(), start_chan),
Ok(data) => data,
}
} else {
bytes
};
let mut metadata = Metadata::default(url);
metadata.set_content_type(content_type.as_ref());
if let Ok(chan) = start_sending_sniffed_opt(start_chan, metadata, classifier, &bytes) {
let _ = chan.send(Payload(bytes));
let _ = chan.send(Done(Ok(())));
}
}
| factory | identifier_name |
gettags.py | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList | ## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.501688
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/gettags.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class gettags(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(gettags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_91099948 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_91099948
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_gettags= 'respond'
## END CLASS DEFINITION
if not hasattr(gettags, '_initCheetahAttributes'):
templateAPIClass = getattr(gettags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(gettags)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=gettags()).run() | from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
################################################## | random_line_split |
gettags.py | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.501688
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/gettags.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class gettags(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(gettags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
|
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_91099948 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_91099948
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_gettags= 'respond'
## END CLASS DEFINITION
if not hasattr(gettags, '_initCheetahAttributes'):
templateAPIClass = getattr(gettags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(gettags)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=gettags()).run()
| trans = DummyTransaction()
_dummyTrans = True | conditional_block |
gettags.py | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.501688
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/gettags.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class gettags(Template):
##################################################
## CHEETAH GENERATED METHODS
def | (self, *args, **KWs):
super(gettags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_91099948 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_91099948
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_gettags= 'respond'
## END CLASS DEFINITION
if not hasattr(gettags, '_initCheetahAttributes'):
templateAPIClass = getattr(gettags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(gettags)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=gettags()).run()
| __init__ | identifier_name |
gettags.py | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.501688
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/gettags.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class gettags(Template):
##################################################
## CHEETAH GENERATED METHODS
|
## END CLASS DEFINITION
if not hasattr(gettags, '_initCheetahAttributes'):
templateAPIClass = getattr(gettags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(gettags)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=gettags()).run()
| def __init__(self, *args, **KWs):
super(gettags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_91099948 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_91099948
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_gettags= 'respond' | identifier_body |
rpc_blockchain.py | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
msg_block,
)
from test_framework.mininode import (
P2PInterface,
)
class BlockchainTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info('Create some old blocks')
address = self.nodes[0].get_deterministic_priv_key().address
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
# ten-minute steps from genesis block time
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, address)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
'bip34': {'type': 'buried', 'active': False, 'height': 500},
'bip66': {'type': 'buried', 'active': False, 'height': 1251},
'bip65': {'type': 'buried', 'active': False, 'height': 1351},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
'type': 'bip9',
'bip9': {
'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': 57,
'count': 57,
'possible': True,
},
},
'active': False}
})
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], 200)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 15000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generatetoaddress(6, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
node.p2p.send_message(msg_block(b))
node.p2p.sync_with_ping()
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
if __name__ == '__main__':
| BlockchainTest().main() | conditional_block | |
rpc_blockchain.py | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
msg_block,
)
from test_framework.mininode import (
P2PInterface,
)
class BlockchainTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info('Create some old blocks')
address = self.nodes[0].get_deterministic_priv_key().address
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
# ten-minute steps from genesis block time
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, address)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
'bip34': {'type': 'buried', 'active': False, 'height': 500},
'bip66': {'type': 'buried', 'active': False, 'height': 1251},
'bip65': {'type': 'buried', 'active': False, 'height': 1351},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
'type': 'bip9',
'bip9': {
'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': 57,
'count': 57,
'possible': True,
},
},
'active': False}
})
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], 200)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 15000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generatetoaddress(6, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
|
if __name__ == '__main__':
BlockchainTest().main()
| self.log.info("Test waitforblockheight")
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
node.p2p.send_message(msg_block(b))
node.p2p.sync_with_ping()
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1) | identifier_body |
rpc_blockchain.py | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
msg_block,
)
from test_framework.mininode import (
P2PInterface,
)
class BlockchainTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def | (self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info('Create some old blocks')
address = self.nodes[0].get_deterministic_priv_key().address
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
# ten-minute steps from genesis block time
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, address)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
'bip34': {'type': 'buried', 'active': False, 'height': 500},
'bip66': {'type': 'buried', 'active': False, 'height': 1251},
'bip65': {'type': 'buried', 'active': False, 'height': 1351},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
'type': 'bip9',
'bip9': {
'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': 57,
'count': 57,
'possible': True,
},
},
'active': False}
})
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], 200)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 15000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generatetoaddress(6, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
node.p2p.send_message(msg_block(b))
node.p2p.sync_with_ping()
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
if __name__ == '__main__':
BlockchainTest().main()
| run_test | identifier_name |
rpc_blockchain.py | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
msg_block,
)
from test_framework.mininode import (
P2PInterface,
)
class BlockchainTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info('Create some old blocks')
address = self.nodes[0].get_deterministic_priv_key().address
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
# ten-minute steps from genesis block time
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, address)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
'bip34': {'type': 'buried', 'active': False, 'height': 500},
'bip66': {'type': 'buried', 'active': False, 'height': 1251},
'bip65': {'type': 'buried', 'active': False, 'height': 1351},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
'type': 'bip9',
'bip9': {
'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': 57,
'count': 57,
'possible': True,
},
},
'active': False}
})
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], 200)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 15000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generatetoaddress(6, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
node.p2p.send_message(msg_block(b))
node.p2p.sync_with_ping()
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
|
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
if __name__ == '__main__':
BlockchainTest().main() | node.invalidateblock(b22f.hash) | random_line_split |
profile.service.ts | import { Injectable } from "@angular/core";
import { Http, Response, URLSearchParams } from "@angular/http";
@Injectable()
export class ProfileService {
constructor(public http: Http) {}
insert(doc) : any {
return this.http.post("/profiles", JSON.stringify(doc))
.map((res: Response) => res.json());
}
get(id: string) : any {
return this.http.get("/profiles/" + id)
.map((res: Response) => res.json());
}
all(options?: Object) : any {
return this.http.get("/profiles")
.map((res: Response) => res.json());
}
delete(id: string, rev: string) : any {
var url = this.query("/profiles/" + id, {rev: rev});
return this.http.delete(url);
}
activate(id: string) : any {
return this.http.post("/profiles/activate", id)
.map((res: Response) => res.json())
}
query(base: string, options?: Object) : string {
if (!options) return base;
var params = new URLSearchParams();
for (var key in options) { | }
} | params.set(key, String(options[key]));
}
return base + "?" + params.toString(); | random_line_split |
profile.service.ts | import { Injectable } from "@angular/core";
import { Http, Response, URLSearchParams } from "@angular/http";
@Injectable()
export class ProfileService {
| (public http: Http) {}
insert(doc) : any {
return this.http.post("/profiles", JSON.stringify(doc))
.map((res: Response) => res.json());
}
get(id: string) : any {
return this.http.get("/profiles/" + id)
.map((res: Response) => res.json());
}
all(options?: Object) : any {
return this.http.get("/profiles")
.map((res: Response) => res.json());
}
delete(id: string, rev: string) : any {
var url = this.query("/profiles/" + id, {rev: rev});
return this.http.delete(url);
}
activate(id: string) : any {
return this.http.post("/profiles/activate", id)
.map((res: Response) => res.json())
}
query(base: string, options?: Object) : string {
if (!options) return base;
var params = new URLSearchParams();
for (var key in options) {
params.set(key, String(options[key]));
}
return base + "?" + params.toString();
}
}
| constructor | identifier_name |
profile.service.ts | import { Injectable } from "@angular/core";
import { Http, Response, URLSearchParams } from "@angular/http";
@Injectable()
export class ProfileService {
constructor(public http: Http) {}
insert(doc) : any {
return this.http.post("/profiles", JSON.stringify(doc))
.map((res: Response) => res.json());
}
get(id: string) : any {
return this.http.get("/profiles/" + id)
.map((res: Response) => res.json());
}
all(options?: Object) : any {
return this.http.get("/profiles")
.map((res: Response) => res.json());
}
delete(id: string, rev: string) : any |
activate(id: string) : any {
return this.http.post("/profiles/activate", id)
.map((res: Response) => res.json())
}
query(base: string, options?: Object) : string {
if (!options) return base;
var params = new URLSearchParams();
for (var key in options) {
params.set(key, String(options[key]));
}
return base + "?" + params.toString();
}
}
| {
var url = this.query("/profiles/" + id, {rev: rev});
return this.http.delete(url);
} | identifier_body |
spsc_queue.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A single-producer single-consumer concurrent queue
//!
//! This module contains the implementation of an SPSC queue which can be used
//! concurrently between two threads. This data structure is safe to use and
//! enforces the semantics that there is one pusher and one popper.
// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
use alloc::boxed::Box;
use core::ptr;
use core::cell::UnsafeCell;
use sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use super::cache_aligned::CacheAligned;
// Node within the linked list queue of messages to send
struct Node<T> {
// FIXME: this could be an uninitialized T if we're careful enough, and
// that would reduce memory usage (and be a bit faster).
// is it worth it?
value: Option<T>, // nullable for re-use of nodes
cached: bool, // This node goes into the node cache
next: AtomicPtr<Node<T>>, // next node in the queue
}
/// The single-producer single-consumer queue. This structure is not cloneable,
/// but it can be safely shared in an Arc if it is guaranteed that there
/// is only one popper and one pusher touching the queue at any one point in
/// time.
pub struct Queue<T, ProducerAddition=(), ConsumerAddition=()> {
// consumer fields
consumer: CacheAligned<Consumer<T, ConsumerAddition>>,
// producer fields
producer: CacheAligned<Producer<T, ProducerAddition>>,
}
struct Consumer<T, Addition> {
tail: UnsafeCell<*mut Node<T>>, // where to pop from
tail_prev: AtomicPtr<Node<T>>, // where to pop from
cache_bound: usize, // maximum cache size
cached_nodes: AtomicUsize, // number of nodes marked as cachable
addition: Addition,
}
struct Producer<T, Addition> {
head: UnsafeCell<*mut Node<T>>, // where to push to
first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
addition: Addition,
}
unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> { }
unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> { }
impl<T> Node<T> {
fn new() -> *mut Node<T> {
Box::into_raw(box Node {
value: None,
cached: false,
next: AtomicPtr::new(ptr::null_mut::<Node<T>>()),
})
}
}
impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> {
/// Creates a new queue. With given additional elements in the producer and
/// consumer portions of the queue.
///
/// Due to the performance implications of cache-contention,
/// we wish to keep fields used mainly by the producer on a separate cache
/// line than those used by the consumer.
/// Since cache lines are usually 64 bytes, it is unreasonably expensive to
/// allocate one for small fields, so we allow users to insert additional
/// fields into the cache lines already allocated by this for the producer
/// and consumer.
///
/// This is unsafe as the type system doesn't enforce a single
/// consumer-producer relationship. It also allows the consumer to `pop`
/// items while there is a `peek` active due to all methods having a
/// non-mutable receiver.
///
/// # Arguments
///
/// * `bound` - This queue implementation is implemented with a linked
/// list, and this means that a push is always a malloc. In
/// order to amortize this cost, an internal cache of nodes is
/// maintained to prevent a malloc from always being
/// necessary. This bound is the limit on the size of the
/// cache (if desired). If the value is 0, then the cache has
/// no bound. Otherwise, the cache will never grow larger than
/// `bound` (although the queue itself could be much larger.
pub unsafe fn with_additions(
bound: usize,
producer_addition: ProducerAddition,
consumer_addition: ConsumerAddition,
) -> Self {
let n1 = Node::new();
let n2 = Node::new();
(*n1).next.store(n2, Ordering::Relaxed);
Queue {
consumer: CacheAligned::new(Consumer {
tail: UnsafeCell::new(n2),
tail_prev: AtomicPtr::new(n1),
cache_bound: bound,
cached_nodes: AtomicUsize::new(0),
addition: consumer_addition
}),
producer: CacheAligned::new(Producer {
head: UnsafeCell::new(n2),
first: UnsafeCell::new(n1),
tail_copy: UnsafeCell::new(n1),
addition: producer_addition
}),
}
}
/// Pushes a new value onto this queue. Note that to use this function
/// safely, it must be externally guaranteed that there is only one pusher.
pub fn push(&self, t: T) {
unsafe {
// Acquire a node (which either uses a cached one or allocates a new
// one), and then append this to the 'head' node.
let n = self.alloc();
assert!((*n).value.is_none());
(*n).value = Some(t);
(*n).next.store(ptr::null_mut(), Ordering::Relaxed);
(**self.producer.head.get()).next.store(n, Ordering::Release);
*(&self.producer.head).get() = n;
}
}
unsafe fn alloc(&self) -> *mut Node<T> {
// First try to see if we can consume the 'first' node for our uses.
if *self.producer.first.get() != *self.producer.tail_copy.get() {
let ret = *self.producer.first.get();
*self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
return ret;
}
// If the above fails, then update our copy of the tail and try
// again.
*self.producer.0.tail_copy.get() =
self.consumer.tail_prev.load(Ordering::Acquire);
if *self.producer.first.get() != *self.producer.tail_copy.get() {
let ret = *self.producer.first.get();
*self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
return ret;
}
// If all of that fails, then we have to allocate a new node
// (there's nothing in the node cache).
Node::new()
}
/// Attempts to pop a value from this queue. Remember that to use this type
/// safely you must ensure that there is only one popper at a time.
pub fn pop(&self) -> Option<T> {
unsafe {
// The `tail` node is not actually a used node, but rather a
// sentinel from where we should start popping from. Hence, look at
// tail's next field and see if we can use it. If we do a pop, then
// the current tail node is a candidate for going into the cache.
let tail = *self.consumer.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if next.is_null() { return None }
assert!((*next).value.is_some());
let ret = (*next).value.take();
*self.consumer.0.tail.get() = next;
if self.consumer.cache_bound == 0 {
self.consumer.tail_prev.store(tail, Ordering::Release);
} else {
let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed);
if cached_nodes < self.consumer.cache_bound && !(*tail).cached {
self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed);
(*tail).cached = true;
}
if (*tail).cached {
self.consumer.tail_prev.store(tail, Ordering::Release);
} else {
(*self.consumer.tail_prev.load(Ordering::Relaxed))
.next.store(next, Ordering::Relaxed);
// We have successfully erased all references to 'tail', so
// now we can safely drop it.
let _: Box<Node<T>> = Box::from_raw(tail);
}
}
ret
}
}
/// Attempts to peek at the head of the queue, returning `None` if the queue
/// has no data currently
///
/// # Warning
/// The reference returned is invalid if it is not used before the consumer
/// pops the value off the queue. If the producer then pushes another value
/// onto the queue, it will overwrite the value pointed to by the reference.
pub fn peek(&self) -> Option<&mut T> {
// This is essentially the same as above with all the popping bits
// stripped out.
unsafe {
let tail = *self.consumer.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if next.is_null() { None } else { (*next).value.as_mut() }
}
}
pub fn producer_addition(&self) -> &ProducerAddition {
&self.producer.addition
}
pub fn consumer_addition(&self) -> &ConsumerAddition {
&self.consumer.addition
}
}
impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.producer.first.get();
while !cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _n: Box<Node<T>> = Box::from_raw(cur);
cur = next;
}
}
}
}
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sync::Arc;
use super::Queue;
use thread;
use sync::mpsc::channel;
#[test]
fn smoke() {
unsafe {
let queue = Queue::with_additions(0, (), ());
queue.push(1);
queue.push(2);
assert_eq!(queue.pop(), Some(1));
assert_eq!(queue.pop(), Some(2));
assert_eq!(queue.pop(), None);
queue.push(3);
queue.push(4);
assert_eq!(queue.pop(), Some(3));
assert_eq!(queue.pop(), Some(4));
assert_eq!(queue.pop(), None);
}
}
#[test]
fn peek() {
unsafe {
let queue = Queue::with_additions(0, (), ());
queue.push(vec![1]);
// Ensure the borrowchecker works
match queue.peek() {
Some(vec) => {
assert_eq!(&*vec, &[1]);
},
None => unreachable!()
}
match queue.pop() {
Some(vec) => {
assert_eq!(&*vec, &[1]);
},
None => unreachable!()
}
}
}
#[test]
fn drop_full() |
#[test]
fn smoke_bound() {
unsafe {
let q = Queue::with_additions(0, (), ());
q.push(1);
q.push(2);
assert_eq!(q.pop(), Some(1));
assert_eq!(q.pop(), Some(2));
assert_eq!(q.pop(), None);
q.push(3);
q.push(4);
assert_eq!(q.pop(), Some(3));
assert_eq!(q.pop(), Some(4));
assert_eq!(q.pop(), None);
}
}
#[test]
fn stress() {
unsafe {
stress_bound(0);
stress_bound(1);
}
unsafe fn stress_bound(bound: usize) {
let q = Arc::new(Queue::with_additions(bound, (), ()));
let (tx, rx) = channel();
let q2 = q.clone();
let _t = thread::spawn(move|| {
for _ in 0..100000 {
loop {
match q2.pop() {
Some(1) => break,
Some(_) => panic!(),
None => {}
}
}
}
tx.send(()).unwrap();
});
for _ in 0..100000 {
q.push(1);
}
rx.recv().unwrap();
}
}
}
| {
unsafe {
let q: Queue<Box<_>> = Queue::with_additions(0, (), ());
q.push(box 1);
q.push(box 2);
}
} | identifier_body |
spsc_queue.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A single-producer single-consumer concurrent queue
//!
//! This module contains the implementation of an SPSC queue which can be used
//! concurrently between two threads. This data structure is safe to use and
//! enforces the semantics that there is one pusher and one popper.
// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
use alloc::boxed::Box;
use core::ptr;
use core::cell::UnsafeCell;
use sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use super::cache_aligned::CacheAligned;
// Node within the linked list queue of messages to send
struct Node<T> {
// FIXME: this could be an uninitialized T if we're careful enough, and
// that would reduce memory usage (and be a bit faster).
// is it worth it?
value: Option<T>, // nullable for re-use of nodes
cached: bool, // This node goes into the node cache
next: AtomicPtr<Node<T>>, // next node in the queue
}
/// The single-producer single-consumer queue. This structure is not cloneable,
/// but it can be safely shared in an Arc if it is guaranteed that there
/// is only one popper and one pusher touching the queue at any one point in
/// time.
pub struct Queue<T, ProducerAddition=(), ConsumerAddition=()> {
// consumer fields
consumer: CacheAligned<Consumer<T, ConsumerAddition>>,
// producer fields
producer: CacheAligned<Producer<T, ProducerAddition>>,
}
struct Consumer<T, Addition> {
tail: UnsafeCell<*mut Node<T>>, // where to pop from
tail_prev: AtomicPtr<Node<T>>, // where to pop from
cache_bound: usize, // maximum cache size
cached_nodes: AtomicUsize, // number of nodes marked as cachable
addition: Addition,
}
struct Producer<T, Addition> {
head: UnsafeCell<*mut Node<T>>, // where to push to
first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
addition: Addition,
}
unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> { }
unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> { }
impl<T> Node<T> {
fn new() -> *mut Node<T> {
Box::into_raw(box Node {
value: None,
cached: false,
next: AtomicPtr::new(ptr::null_mut::<Node<T>>()),
})
}
}
impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> {
/// Creates a new queue. With given additional elements in the producer and
/// consumer portions of the queue.
///
/// Due to the performance implications of cache-contention,
/// we wish to keep fields used mainly by the producer on a separate cache
/// line than those used by the consumer.
/// Since cache lines are usually 64 bytes, it is unreasonably expensive to
/// allocate one for small fields, so we allow users to insert additional
/// fields into the cache lines already allocated by this for the producer
/// and consumer.
///
/// This is unsafe as the type system doesn't enforce a single
/// consumer-producer relationship. It also allows the consumer to `pop`
/// items while there is a `peek` active due to all methods having a
/// non-mutable receiver.
///
/// # Arguments
///
/// * `bound` - This queue implementation is implemented with a linked
/// list, and this means that a push is always a malloc. In
/// order to amortize this cost, an internal cache of nodes is
/// maintained to prevent a malloc from always being
/// necessary. This bound is the limit on the size of the
/// cache (if desired). If the value is 0, then the cache has
/// no bound. Otherwise, the cache will never grow larger than
/// `bound` (although the queue itself could be much larger.
pub unsafe fn with_additions(
bound: usize,
producer_addition: ProducerAddition,
consumer_addition: ConsumerAddition,
) -> Self {
let n1 = Node::new();
let n2 = Node::new();
(*n1).next.store(n2, Ordering::Relaxed);
Queue {
consumer: CacheAligned::new(Consumer {
tail: UnsafeCell::new(n2),
tail_prev: AtomicPtr::new(n1),
cache_bound: bound,
cached_nodes: AtomicUsize::new(0),
addition: consumer_addition
}),
producer: CacheAligned::new(Producer {
head: UnsafeCell::new(n2),
first: UnsafeCell::new(n1),
tail_copy: UnsafeCell::new(n1),
addition: producer_addition
}),
}
}
/// Pushes a new value onto this queue. Note that to use this function
/// safely, it must be externally guaranteed that there is only one pusher.
pub fn push(&self, t: T) {
unsafe {
// Acquire a node (which either uses a cached one or allocates a new
// one), and then append this to the 'head' node.
let n = self.alloc();
assert!((*n).value.is_none());
(*n).value = Some(t);
(*n).next.store(ptr::null_mut(), Ordering::Relaxed);
(**self.producer.head.get()).next.store(n, Ordering::Release);
*(&self.producer.head).get() = n;
}
}
unsafe fn alloc(&self) -> *mut Node<T> {
// First try to see if we can consume the 'first' node for our uses.
if *self.producer.first.get() != *self.producer.tail_copy.get() {
let ret = *self.producer.first.get();
*self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
return ret;
}
// If the above fails, then update our copy of the tail and try
// again.
*self.producer.0.tail_copy.get() =
self.consumer.tail_prev.load(Ordering::Acquire);
if *self.producer.first.get() != *self.producer.tail_copy.get() {
let ret = *self.producer.first.get();
*self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
return ret;
}
// If all of that fails, then we have to allocate a new node
// (there's nothing in the node cache).
Node::new() | unsafe {
// The `tail` node is not actually a used node, but rather a
// sentinel from where we should start popping from. Hence, look at
// tail's next field and see if we can use it. If we do a pop, then
// the current tail node is a candidate for going into the cache.
let tail = *self.consumer.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if next.is_null() { return None }
assert!((*next).value.is_some());
let ret = (*next).value.take();
*self.consumer.0.tail.get() = next;
if self.consumer.cache_bound == 0 {
self.consumer.tail_prev.store(tail, Ordering::Release);
} else {
let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed);
if cached_nodes < self.consumer.cache_bound && !(*tail).cached {
self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed);
(*tail).cached = true;
}
if (*tail).cached {
self.consumer.tail_prev.store(tail, Ordering::Release);
} else {
(*self.consumer.tail_prev.load(Ordering::Relaxed))
.next.store(next, Ordering::Relaxed);
// We have successfully erased all references to 'tail', so
// now we can safely drop it.
let _: Box<Node<T>> = Box::from_raw(tail);
}
}
ret
}
}
/// Attempts to peek at the head of the queue, returning `None` if the queue
/// has no data currently
///
/// # Warning
/// The reference returned is invalid if it is not used before the consumer
/// pops the value off the queue. If the producer then pushes another value
/// onto the queue, it will overwrite the value pointed to by the reference.
pub fn peek(&self) -> Option<&mut T> {
// This is essentially the same as above with all the popping bits
// stripped out.
unsafe {
let tail = *self.consumer.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if next.is_null() { None } else { (*next).value.as_mut() }
}
}
pub fn producer_addition(&self) -> &ProducerAddition {
&self.producer.addition
}
pub fn consumer_addition(&self) -> &ConsumerAddition {
&self.consumer.addition
}
}
impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.producer.first.get();
while !cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _n: Box<Node<T>> = Box::from_raw(cur);
cur = next;
}
}
}
}
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sync::Arc;
use super::Queue;
use thread;
use sync::mpsc::channel;
#[test]
fn smoke() {
unsafe {
let queue = Queue::with_additions(0, (), ());
queue.push(1);
queue.push(2);
assert_eq!(queue.pop(), Some(1));
assert_eq!(queue.pop(), Some(2));
assert_eq!(queue.pop(), None);
queue.push(3);
queue.push(4);
assert_eq!(queue.pop(), Some(3));
assert_eq!(queue.pop(), Some(4));
assert_eq!(queue.pop(), None);
}
}
#[test]
fn peek() {
unsafe {
let queue = Queue::with_additions(0, (), ());
queue.push(vec![1]);
// Ensure the borrowchecker works
match queue.peek() {
Some(vec) => {
assert_eq!(&*vec, &[1]);
},
None => unreachable!()
}
match queue.pop() {
Some(vec) => {
assert_eq!(&*vec, &[1]);
},
None => unreachable!()
}
}
}
#[test]
fn drop_full() {
unsafe {
let q: Queue<Box<_>> = Queue::with_additions(0, (), ());
q.push(box 1);
q.push(box 2);
}
}
#[test]
fn smoke_bound() {
unsafe {
let q = Queue::with_additions(0, (), ());
q.push(1);
q.push(2);
assert_eq!(q.pop(), Some(1));
assert_eq!(q.pop(), Some(2));
assert_eq!(q.pop(), None);
q.push(3);
q.push(4);
assert_eq!(q.pop(), Some(3));
assert_eq!(q.pop(), Some(4));
assert_eq!(q.pop(), None);
}
}
#[test]
fn stress() {
unsafe {
stress_bound(0);
stress_bound(1);
}
unsafe fn stress_bound(bound: usize) {
let q = Arc::new(Queue::with_additions(bound, (), ()));
let (tx, rx) = channel();
let q2 = q.clone();
let _t = thread::spawn(move|| {
for _ in 0..100000 {
loop {
match q2.pop() {
Some(1) => break,
Some(_) => panic!(),
None => {}
}
}
}
tx.send(()).unwrap();
});
for _ in 0..100000 {
q.push(1);
}
rx.recv().unwrap();
}
}
} | }
/// Attempts to pop a value from this queue. Remember that to use this type
/// safely you must ensure that there is only one popper at a time.
pub fn pop(&self) -> Option<T> { | random_line_split |
spsc_queue.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A single-producer single-consumer concurrent queue
//!
//! This module contains the implementation of an SPSC queue which can be used
//! concurrently between two threads. This data structure is safe to use and
//! enforces the semantics that there is one pusher and one popper.
// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
use alloc::boxed::Box;
use core::ptr;
use core::cell::UnsafeCell;
use sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use super::cache_aligned::CacheAligned;
// Node within the linked list queue of messages to send
struct Node<T> {
// FIXME: this could be an uninitialized T if we're careful enough, and
// that would reduce memory usage (and be a bit faster).
// is it worth it?
value: Option<T>, // nullable for re-use of nodes
cached: bool, // This node goes into the node cache
next: AtomicPtr<Node<T>>, // next node in the queue
}
/// The single-producer single-consumer queue. This structure is not cloneable,
/// but it can be safely shared in an Arc if it is guaranteed that there
/// is only one popper and one pusher touching the queue at any one point in
/// time.
pub struct Queue<T, ProducerAddition=(), ConsumerAddition=()> {
// consumer fields
consumer: CacheAligned<Consumer<T, ConsumerAddition>>,
// producer fields
producer: CacheAligned<Producer<T, ProducerAddition>>,
}
struct Consumer<T, Addition> {
tail: UnsafeCell<*mut Node<T>>, // where to pop from
tail_prev: AtomicPtr<Node<T>>, // where to pop from
cache_bound: usize, // maximum cache size
cached_nodes: AtomicUsize, // number of nodes marked as cachable
addition: Addition,
}
struct Producer<T, Addition> {
head: UnsafeCell<*mut Node<T>>, // where to push to
first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
addition: Addition,
}
unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> { }
unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> { }
impl<T> Node<T> {
fn new() -> *mut Node<T> {
Box::into_raw(box Node {
value: None,
cached: false,
next: AtomicPtr::new(ptr::null_mut::<Node<T>>()),
})
}
}
impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> {
/// Creates a new queue. With given additional elements in the producer and
/// consumer portions of the queue.
///
/// Due to the performance implications of cache-contention,
/// we wish to keep fields used mainly by the producer on a separate cache
/// line than those used by the consumer.
/// Since cache lines are usually 64 bytes, it is unreasonably expensive to
/// allocate one for small fields, so we allow users to insert additional
/// fields into the cache lines already allocated by this for the producer
/// and consumer.
///
/// This is unsafe as the type system doesn't enforce a single
/// consumer-producer relationship. It also allows the consumer to `pop`
/// items while there is a `peek` active due to all methods having a
/// non-mutable receiver.
///
/// # Arguments
///
/// * `bound` - This queue implementation is implemented with a linked
/// list, and this means that a push is always a malloc. In
/// order to amortize this cost, an internal cache of nodes is
/// maintained to prevent a malloc from always being
/// necessary. This bound is the limit on the size of the
/// cache (if desired). If the value is 0, then the cache has
/// no bound. Otherwise, the cache will never grow larger than
/// `bound` (although the queue itself could be much larger.
pub unsafe fn with_additions(
bound: usize,
producer_addition: ProducerAddition,
consumer_addition: ConsumerAddition,
) -> Self {
let n1 = Node::new();
let n2 = Node::new();
(*n1).next.store(n2, Ordering::Relaxed);
Queue {
consumer: CacheAligned::new(Consumer {
tail: UnsafeCell::new(n2),
tail_prev: AtomicPtr::new(n1),
cache_bound: bound,
cached_nodes: AtomicUsize::new(0),
addition: consumer_addition
}),
producer: CacheAligned::new(Producer {
head: UnsafeCell::new(n2),
first: UnsafeCell::new(n1),
tail_copy: UnsafeCell::new(n1),
addition: producer_addition
}),
}
}
/// Pushes a new value onto this queue. Note that to use this function
/// safely, it must be externally guaranteed that there is only one pusher.
pub fn push(&self, t: T) {
unsafe {
// Acquire a node (which either uses a cached one or allocates a new
// one), and then append this to the 'head' node.
let n = self.alloc();
assert!((*n).value.is_none());
(*n).value = Some(t);
(*n).next.store(ptr::null_mut(), Ordering::Relaxed);
(**self.producer.head.get()).next.store(n, Ordering::Release);
*(&self.producer.head).get() = n;
}
}
unsafe fn alloc(&self) -> *mut Node<T> {
// First try to see if we can consume the 'first' node for our uses.
if *self.producer.first.get() != *self.producer.tail_copy.get() {
let ret = *self.producer.first.get();
*self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
return ret;
}
// If the above fails, then update our copy of the tail and try
// again.
*self.producer.0.tail_copy.get() =
self.consumer.tail_prev.load(Ordering::Acquire);
if *self.producer.first.get() != *self.producer.tail_copy.get() {
let ret = *self.producer.first.get();
*self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
return ret;
}
// If all of that fails, then we have to allocate a new node
// (there's nothing in the node cache).
Node::new()
}
/// Attempts to pop a value from this queue. Remember that to use this type
/// safely you must ensure that there is only one popper at a time.
pub fn pop(&self) -> Option<T> {
unsafe {
// The `tail` node is not actually a used node, but rather a
// sentinel from where we should start popping from. Hence, look at
// tail's next field and see if we can use it. If we do a pop, then
// the current tail node is a candidate for going into the cache.
let tail = *self.consumer.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if next.is_null() { return None }
assert!((*next).value.is_some());
let ret = (*next).value.take();
*self.consumer.0.tail.get() = next;
if self.consumer.cache_bound == 0 {
self.consumer.tail_prev.store(tail, Ordering::Release);
} else {
let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed);
if cached_nodes < self.consumer.cache_bound && !(*tail).cached {
self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed);
(*tail).cached = true;
}
if (*tail).cached {
self.consumer.tail_prev.store(tail, Ordering::Release);
} else {
(*self.consumer.tail_prev.load(Ordering::Relaxed))
.next.store(next, Ordering::Relaxed);
// We have successfully erased all references to 'tail', so
// now we can safely drop it.
let _: Box<Node<T>> = Box::from_raw(tail);
}
}
ret
}
}
/// Attempts to peek at the head of the queue, returning `None` if the queue
/// has no data currently
///
/// # Warning
/// The reference returned is invalid if it is not used before the consumer
/// pops the value off the queue. If the producer then pushes another value
/// onto the queue, it will overwrite the value pointed to by the reference.
pub fn peek(&self) -> Option<&mut T> {
// This is essentially the same as above with all the popping bits
// stripped out.
unsafe {
let tail = *self.consumer.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if next.is_null() { None } else { (*next).value.as_mut() }
}
}
pub fn | (&self) -> &ProducerAddition {
&self.producer.addition
}
pub fn consumer_addition(&self) -> &ConsumerAddition {
&self.consumer.addition
}
}
impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.producer.first.get();
while !cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _n: Box<Node<T>> = Box::from_raw(cur);
cur = next;
}
}
}
}
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sync::Arc;
use super::Queue;
use thread;
use sync::mpsc::channel;
#[test]
fn smoke() {
unsafe {
let queue = Queue::with_additions(0, (), ());
queue.push(1);
queue.push(2);
assert_eq!(queue.pop(), Some(1));
assert_eq!(queue.pop(), Some(2));
assert_eq!(queue.pop(), None);
queue.push(3);
queue.push(4);
assert_eq!(queue.pop(), Some(3));
assert_eq!(queue.pop(), Some(4));
assert_eq!(queue.pop(), None);
}
}
#[test]
fn peek() {
unsafe {
let queue = Queue::with_additions(0, (), ());
queue.push(vec![1]);
// Ensure the borrowchecker works
match queue.peek() {
Some(vec) => {
assert_eq!(&*vec, &[1]);
},
None => unreachable!()
}
match queue.pop() {
Some(vec) => {
assert_eq!(&*vec, &[1]);
},
None => unreachable!()
}
}
}
#[test]
fn drop_full() {
unsafe {
let q: Queue<Box<_>> = Queue::with_additions(0, (), ());
q.push(box 1);
q.push(box 2);
}
}
#[test]
fn smoke_bound() {
unsafe {
let q = Queue::with_additions(0, (), ());
q.push(1);
q.push(2);
assert_eq!(q.pop(), Some(1));
assert_eq!(q.pop(), Some(2));
assert_eq!(q.pop(), None);
q.push(3);
q.push(4);
assert_eq!(q.pop(), Some(3));
assert_eq!(q.pop(), Some(4));
assert_eq!(q.pop(), None);
}
}
#[test]
fn stress() {
unsafe {
stress_bound(0);
stress_bound(1);
}
unsafe fn stress_bound(bound: usize) {
let q = Arc::new(Queue::with_additions(bound, (), ()));
let (tx, rx) = channel();
let q2 = q.clone();
let _t = thread::spawn(move|| {
for _ in 0..100000 {
loop {
match q2.pop() {
Some(1) => break,
Some(_) => panic!(),
None => {}
}
}
}
tx.send(()).unwrap();
});
for _ in 0..100000 {
q.push(1);
}
rx.recv().unwrap();
}
}
}
| producer_addition | identifier_name |
abstracts.py | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import argparse
import viper.common.out as out
from viper.core.config import console_output
class ArgumentErrorCallback(Exception):
def __init__(self, message, level=''):
self.message = message.strip() + '\n'
self.level = level.strip()
def __str__(self):
return '{}: {}'.format(self.level, self.message)
def get(self):
return self.level, self.message
class | (argparse.ArgumentParser):
def print_usage(self):
raise ArgumentErrorCallback(self.format_usage())
def print_help(self):
raise ArgumentErrorCallback(self.format_help())
def error(self, message):
raise ArgumentErrorCallback(message, 'error')
def exit(self, status, message=None):
if message is not None:
raise ArgumentErrorCallback(message)
class Module(object):
cmd = ''
description = ''
command_line = []
args = None
authors = []
output = []
def __init__(self):
self.parser = ArgumentParser(prog=self.cmd, description=self.description)
def set_commandline(self, command):
self.command_line = command
def log(self, event_type, event_data):
self.output.append(dict(
type=event_type,
data=event_data
))
out.print_output([{'type': event_type, 'data': event_data}], console_output['filename'])
def usage(self):
self.log('', self.parser.format_usage())
def help(self):
self.log('', self.parser.format_help())
def run(self):
try:
self.args = self.parser.parse_args(self.command_line)
except ArgumentErrorCallback as e:
self.log(*e.get())
| ArgumentParser | identifier_name |
abstracts.py | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import argparse
import viper.common.out as out
from viper.core.config import console_output
class ArgumentErrorCallback(Exception):
def __init__(self, message, level=''):
self.message = message.strip() + '\n'
self.level = level.strip()
def __str__(self):
return '{}: {}'.format(self.level, self.message)
def get(self):
return self.level, self.message
class ArgumentParser(argparse.ArgumentParser):
def print_usage(self):
raise ArgumentErrorCallback(self.format_usage())
def print_help(self):
raise ArgumentErrorCallback(self.format_help())
def error(self, message):
raise ArgumentErrorCallback(message, 'error')
def exit(self, status, message=None):
if message is not None:
raise ArgumentErrorCallback(message)
class Module(object):
cmd = ''
description = ''
command_line = []
args = None
authors = []
output = []
def __init__(self):
self.parser = ArgumentParser(prog=self.cmd, description=self.description)
def set_commandline(self, command):
self.command_line = command
def log(self, event_type, event_data):
self.output.append(dict(
type=event_type,
data=event_data
))
out.print_output([{'type': event_type, 'data': event_data}], console_output['filename'])
def usage(self):
self.log('', self.parser.format_usage())
def help(self):
self.log('', self.parser.format_help())
def run(self):
try:
self.args = self.parser.parse_args(self.command_line)
except ArgumentErrorCallback as e: | self.log(*e.get()) | random_line_split | |
abstracts.py | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import argparse
import viper.common.out as out
from viper.core.config import console_output
class ArgumentErrorCallback(Exception):
def __init__(self, message, level=''):
self.message = message.strip() + '\n'
self.level = level.strip()
def __str__(self):
return '{}: {}'.format(self.level, self.message)
def get(self):
return self.level, self.message
class ArgumentParser(argparse.ArgumentParser):
def print_usage(self):
raise ArgumentErrorCallback(self.format_usage())
def print_help(self):
raise ArgumentErrorCallback(self.format_help())
def error(self, message):
raise ArgumentErrorCallback(message, 'error')
def exit(self, status, message=None):
if message is not None:
raise ArgumentErrorCallback(message)
class Module(object):
cmd = ''
description = ''
command_line = []
args = None
authors = []
output = []
def __init__(self):
self.parser = ArgumentParser(prog=self.cmd, description=self.description)
def set_commandline(self, command):
self.command_line = command
def log(self, event_type, event_data):
self.output.append(dict(
type=event_type,
data=event_data
))
out.print_output([{'type': event_type, 'data': event_data}], console_output['filename'])
def usage(self):
|
def help(self):
self.log('', self.parser.format_help())
def run(self):
try:
self.args = self.parser.parse_args(self.command_line)
except ArgumentErrorCallback as e:
self.log(*e.get())
| self.log('', self.parser.format_usage()) | identifier_body |
abstracts.py | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import argparse
import viper.common.out as out
from viper.core.config import console_output
class ArgumentErrorCallback(Exception):
def __init__(self, message, level=''):
self.message = message.strip() + '\n'
self.level = level.strip()
def __str__(self):
return '{}: {}'.format(self.level, self.message)
def get(self):
return self.level, self.message
class ArgumentParser(argparse.ArgumentParser):
def print_usage(self):
raise ArgumentErrorCallback(self.format_usage())
def print_help(self):
raise ArgumentErrorCallback(self.format_help())
def error(self, message):
raise ArgumentErrorCallback(message, 'error')
def exit(self, status, message=None):
if message is not None:
|
class Module(object):
cmd = ''
description = ''
command_line = []
args = None
authors = []
output = []
def __init__(self):
self.parser = ArgumentParser(prog=self.cmd, description=self.description)
def set_commandline(self, command):
self.command_line = command
def log(self, event_type, event_data):
self.output.append(dict(
type=event_type,
data=event_data
))
out.print_output([{'type': event_type, 'data': event_data}], console_output['filename'])
def usage(self):
self.log('', self.parser.format_usage())
def help(self):
self.log('', self.parser.format_help())
def run(self):
try:
self.args = self.parser.parse_args(self.command_line)
except ArgumentErrorCallback as e:
self.log(*e.get())
| raise ArgumentErrorCallback(message) | conditional_block |
dashboard.js | /*!
* Piwik - Web Analytics
*
* @link http://piwik.org
* @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
*/
function initDashboard(dashboardId, dashboardLayout) |
function createDashboard() {
$('#createDashboardName').attr('value', '');
piwikHelper.modalConfirm('#createDashboardConfirm', {yes: function(){
var dashboardName = $('#createDashboardName').attr('value');
var type = ($('#dashboard_type_empty:checked').length > 0) ? 'empty' : 'default';
piwikHelper.showAjaxLoading();
var ajaxRequest =
{
type: 'GET',
url: 'index.php?module=Dashboard&action=createNewDashboard',
dataType: 'json',
async: true,
error: piwikHelper.ajaxHandleError,
success: function(id) {
$('#dashboardWidgetsArea').dashboard('loadDashboard', id);
},
data: {
token_auth: piwik.token_auth,
idSite: piwik.idSite,
name: encodeURIComponent(dashboardName),
type: type
}
};
$.ajax(ajaxRequest);
}});
}
function resetDashboard() {
piwikHelper.modalConfirm('#resetDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('resetLayout'); }});
}
function renameDashboard() {
$('#newDashboardName').attr('value', $('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#renameDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('setDashboardName', $('#newDashboardName').attr('value')); }});
}
function removeDashboard() {
$('#removeDashboardConfirm h2 span').html($('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#removeDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('removeDashboard'); }});
}
function showChangeDashboardLayoutDialog() {
$('#columnPreview>div').removeClass('choosen');
$('#columnPreview>div[layout='+$('#dashboardWidgetsArea').dashboard('getColumnLayout')+']').addClass('choosen');
piwikHelper.modalConfirm('#changeDashboardLayout', {yes: function(){
$('#dashboardWidgetsArea').dashboard('setColumnLayout', $('#changeDashboardLayout .choosen').attr('layout'));
}});
}
function showEmptyDashboardNotification() {
piwikHelper.modalConfirm('#dashboardEmptyNotification', {
resetDashboard: function() { $('#dashboardWidgetsArea').dashboard('resetLayout'); },
addWidget: function(){ $('#dashboardSettings').trigger('click'); }
});
}
function setAsDefaultWidgets() {
piwikHelper.modalConfirm('#setAsDefaultWidgetsConfirm', {
yes: function(){ $('#dashboardWidgetsArea').dashboard('saveLayoutAsDefaultWidgetLayout'); }
});
}
| {
// Standard dashboard
if($('#periodString').length)
{
$('#periodString').after($('#dashboardSettings'));
$('#dashboardSettings').css({left:$('#periodString')[0].offsetWidth});
}
// Embed dashboard
if(!$('#topBars').length)
{
$('#dashboardSettings').css({left:0});
$('#dashboardSettings').after($('#Dashboard'));
$('#Dashboard > ul li a').each(function(){$(this).css({width:this.offestWidth+30, paddingLeft:0, paddingRight:0});});
$('#Dashboard_embeddedIndex_'+dashboardId).addClass('sfHover');
}
$('#dashboardSettings').on('click', function(){
$('#dashboardSettings').toggleClass('visible');
if ($('#dashboardWidgetsArea').dashboard('isDefaultDashboard')) {
$('#removeDashboardLink').hide();
} else {
$('#removeDashboardLink').show();
}
// fix position
$('#dashboardSettings .widgetpreview-widgetlist').css('paddingTop', $('#dashboardSettings .widgetpreview-categorylist').parent('li').position().top);
});
$('body').on('mouseup', function(e) {
if(!$(e.target).parents('#dashboardSettings').length && !$(e.target).is('#dashboardSettings')) {
$('#dashboardSettings').widgetPreview('reset');
$('#dashboardSettings').removeClass('visible');
}
});
widgetsHelper.getAvailableWidgets();
$('#dashboardWidgetsArea').on('dashboardempty', showEmptyDashboardNotification);
$('#dashboardWidgetsArea').dashboard({
idDashboard: dashboardId,
layout: dashboardLayout
});
$('#dashboardSettings').widgetPreview({
isWidgetAvailable: function(widgetUniqueId) {
return !$('#dashboardWidgetsArea [widgetId=' + widgetUniqueId + ']').length;
},
onSelect: function(widgetUniqueId) {
var widget = widgetsHelper.getWidgetObjectFromUniqueId(widgetUniqueId);
$('#dashboardWidgetsArea').dashboard('addWidget', widget.uniqueId, 1, widget.parameters, true, false);
$('#dashboardSettings').removeClass('visible');
},
resetOnSelect: true
});
$('#columnPreview>div').each(function(){
var width = new Array();
$('div', this).each(function(){
width.push(this.className.replace(/width-/, ''));
});
$(this).attr('layout', width.join('-'));
});
$('#columnPreview>div').on('click', function(){
$('#columnPreview>div').removeClass('choosen');
$(this).addClass('choosen');
});
$('.submenu>li').on('mouseenter', function(event){
if (!$('.widgetpreview-categorylist', event.target).length) {
$('#dashboardSettings').widgetPreview('reset');
}
});
} | identifier_body |
dashboard.js | /*!
* Piwik - Web Analytics
*
* @link http://piwik.org
* @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
*/
function initDashboard(dashboardId, dashboardLayout) {
// Standard dashboard
if($('#periodString').length)
{
$('#periodString').after($('#dashboardSettings'));
$('#dashboardSettings').css({left:$('#periodString')[0].offsetWidth});
}
// Embed dashboard
if(!$('#topBars').length)
{
$('#dashboardSettings').css({left:0});
$('#dashboardSettings').after($('#Dashboard'));
$('#Dashboard > ul li a').each(function(){$(this).css({width:this.offestWidth+30, paddingLeft:0, paddingRight:0});});
$('#Dashboard_embeddedIndex_'+dashboardId).addClass('sfHover');
}
$('#dashboardSettings').on('click', function(){
$('#dashboardSettings').toggleClass('visible');
if ($('#dashboardWidgetsArea').dashboard('isDefaultDashboard')) {
$('#removeDashboardLink').hide();
} else {
$('#removeDashboardLink').show();
}
// fix position
$('#dashboardSettings .widgetpreview-widgetlist').css('paddingTop', $('#dashboardSettings .widgetpreview-categorylist').parent('li').position().top);
});
$('body').on('mouseup', function(e) {
if(!$(e.target).parents('#dashboardSettings').length && !$(e.target).is('#dashboardSettings')) {
$('#dashboardSettings').widgetPreview('reset');
$('#dashboardSettings').removeClass('visible');
}
});
widgetsHelper.getAvailableWidgets();
$('#dashboardWidgetsArea').on('dashboardempty', showEmptyDashboardNotification);
$('#dashboardWidgetsArea').dashboard({
idDashboard: dashboardId,
layout: dashboardLayout
});
$('#dashboardSettings').widgetPreview({
isWidgetAvailable: function(widgetUniqueId) {
return !$('#dashboardWidgetsArea [widgetId=' + widgetUniqueId + ']').length;
},
onSelect: function(widgetUniqueId) {
var widget = widgetsHelper.getWidgetObjectFromUniqueId(widgetUniqueId);
$('#dashboardWidgetsArea').dashboard('addWidget', widget.uniqueId, 1, widget.parameters, true, false);
$('#dashboardSettings').removeClass('visible');
},
resetOnSelect: true
});
$('#columnPreview>div').each(function(){
var width = new Array();
$('div', this).each(function(){
width.push(this.className.replace(/width-/, ''));
});
$(this).attr('layout', width.join('-'));
});
$('#columnPreview>div').on('click', function(){
$('#columnPreview>div').removeClass('choosen');
$(this).addClass('choosen');
});
$('.submenu>li').on('mouseenter', function(event){
if (!$('.widgetpreview-categorylist', event.target).length) {
$('#dashboardSettings').widgetPreview('reset');
}
});
}
| var type = ($('#dashboard_type_empty:checked').length > 0) ? 'empty' : 'default';
piwikHelper.showAjaxLoading();
var ajaxRequest =
{
type: 'GET',
url: 'index.php?module=Dashboard&action=createNewDashboard',
dataType: 'json',
async: true,
error: piwikHelper.ajaxHandleError,
success: function(id) {
$('#dashboardWidgetsArea').dashboard('loadDashboard', id);
},
data: {
token_auth: piwik.token_auth,
idSite: piwik.idSite,
name: encodeURIComponent(dashboardName),
type: type
}
};
$.ajax(ajaxRequest);
}});
}
function resetDashboard() {
piwikHelper.modalConfirm('#resetDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('resetLayout'); }});
}
function renameDashboard() {
$('#newDashboardName').attr('value', $('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#renameDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('setDashboardName', $('#newDashboardName').attr('value')); }});
}
function removeDashboard() {
$('#removeDashboardConfirm h2 span').html($('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#removeDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('removeDashboard'); }});
}
function showChangeDashboardLayoutDialog() {
$('#columnPreview>div').removeClass('choosen');
$('#columnPreview>div[layout='+$('#dashboardWidgetsArea').dashboard('getColumnLayout')+']').addClass('choosen');
piwikHelper.modalConfirm('#changeDashboardLayout', {yes: function(){
$('#dashboardWidgetsArea').dashboard('setColumnLayout', $('#changeDashboardLayout .choosen').attr('layout'));
}});
}
function showEmptyDashboardNotification() {
piwikHelper.modalConfirm('#dashboardEmptyNotification', {
resetDashboard: function() { $('#dashboardWidgetsArea').dashboard('resetLayout'); },
addWidget: function(){ $('#dashboardSettings').trigger('click'); }
});
}
function setAsDefaultWidgets() {
piwikHelper.modalConfirm('#setAsDefaultWidgetsConfirm', {
yes: function(){ $('#dashboardWidgetsArea').dashboard('saveLayoutAsDefaultWidgetLayout'); }
});
} | function createDashboard() {
$('#createDashboardName').attr('value', '');
piwikHelper.modalConfirm('#createDashboardConfirm', {yes: function(){
var dashboardName = $('#createDashboardName').attr('value'); | random_line_split |
dashboard.js | /*!
* Piwik - Web Analytics
*
* @link http://piwik.org
* @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
*/
function initDashboard(dashboardId, dashboardLayout) {
// Standard dashboard
if($('#periodString').length)
{
$('#periodString').after($('#dashboardSettings'));
$('#dashboardSettings').css({left:$('#periodString')[0].offsetWidth});
}
// Embed dashboard
if(!$('#topBars').length)
{
$('#dashboardSettings').css({left:0});
$('#dashboardSettings').after($('#Dashboard'));
$('#Dashboard > ul li a').each(function(){$(this).css({width:this.offestWidth+30, paddingLeft:0, paddingRight:0});});
$('#Dashboard_embeddedIndex_'+dashboardId).addClass('sfHover');
}
$('#dashboardSettings').on('click', function(){
$('#dashboardSettings').toggleClass('visible');
if ($('#dashboardWidgetsArea').dashboard('isDefaultDashboard')) {
$('#removeDashboardLink').hide();
} else |
// fix position
$('#dashboardSettings .widgetpreview-widgetlist').css('paddingTop', $('#dashboardSettings .widgetpreview-categorylist').parent('li').position().top);
});
$('body').on('mouseup', function(e) {
if(!$(e.target).parents('#dashboardSettings').length && !$(e.target).is('#dashboardSettings')) {
$('#dashboardSettings').widgetPreview('reset');
$('#dashboardSettings').removeClass('visible');
}
});
widgetsHelper.getAvailableWidgets();
$('#dashboardWidgetsArea').on('dashboardempty', showEmptyDashboardNotification);
$('#dashboardWidgetsArea').dashboard({
idDashboard: dashboardId,
layout: dashboardLayout
});
$('#dashboardSettings').widgetPreview({
isWidgetAvailable: function(widgetUniqueId) {
return !$('#dashboardWidgetsArea [widgetId=' + widgetUniqueId + ']').length;
},
onSelect: function(widgetUniqueId) {
var widget = widgetsHelper.getWidgetObjectFromUniqueId(widgetUniqueId);
$('#dashboardWidgetsArea').dashboard('addWidget', widget.uniqueId, 1, widget.parameters, true, false);
$('#dashboardSettings').removeClass('visible');
},
resetOnSelect: true
});
$('#columnPreview>div').each(function(){
var width = new Array();
$('div', this).each(function(){
width.push(this.className.replace(/width-/, ''));
});
$(this).attr('layout', width.join('-'));
});
$('#columnPreview>div').on('click', function(){
$('#columnPreview>div').removeClass('choosen');
$(this).addClass('choosen');
});
$('.submenu>li').on('mouseenter', function(event){
if (!$('.widgetpreview-categorylist', event.target).length) {
$('#dashboardSettings').widgetPreview('reset');
}
});
}
function createDashboard() {
$('#createDashboardName').attr('value', '');
piwikHelper.modalConfirm('#createDashboardConfirm', {yes: function(){
var dashboardName = $('#createDashboardName').attr('value');
var type = ($('#dashboard_type_empty:checked').length > 0) ? 'empty' : 'default';
piwikHelper.showAjaxLoading();
var ajaxRequest =
{
type: 'GET',
url: 'index.php?module=Dashboard&action=createNewDashboard',
dataType: 'json',
async: true,
error: piwikHelper.ajaxHandleError,
success: function(id) {
$('#dashboardWidgetsArea').dashboard('loadDashboard', id);
},
data: {
token_auth: piwik.token_auth,
idSite: piwik.idSite,
name: encodeURIComponent(dashboardName),
type: type
}
};
$.ajax(ajaxRequest);
}});
}
function resetDashboard() {
piwikHelper.modalConfirm('#resetDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('resetLayout'); }});
}
function renameDashboard() {
$('#newDashboardName').attr('value', $('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#renameDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('setDashboardName', $('#newDashboardName').attr('value')); }});
}
function removeDashboard() {
$('#removeDashboardConfirm h2 span').html($('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#removeDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('removeDashboard'); }});
}
function showChangeDashboardLayoutDialog() {
$('#columnPreview>div').removeClass('choosen');
$('#columnPreview>div[layout='+$('#dashboardWidgetsArea').dashboard('getColumnLayout')+']').addClass('choosen');
piwikHelper.modalConfirm('#changeDashboardLayout', {yes: function(){
$('#dashboardWidgetsArea').dashboard('setColumnLayout', $('#changeDashboardLayout .choosen').attr('layout'));
}});
}
function showEmptyDashboardNotification() {
piwikHelper.modalConfirm('#dashboardEmptyNotification', {
resetDashboard: function() { $('#dashboardWidgetsArea').dashboard('resetLayout'); },
addWidget: function(){ $('#dashboardSettings').trigger('click'); }
});
}
function setAsDefaultWidgets() {
piwikHelper.modalConfirm('#setAsDefaultWidgetsConfirm', {
yes: function(){ $('#dashboardWidgetsArea').dashboard('saveLayoutAsDefaultWidgetLayout'); }
});
}
| {
$('#removeDashboardLink').show();
} | conditional_block |
dashboard.js | /*!
* Piwik - Web Analytics
*
* @link http://piwik.org
* @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
*/
function initDashboard(dashboardId, dashboardLayout) {
// Standard dashboard
if($('#periodString').length)
{
$('#periodString').after($('#dashboardSettings'));
$('#dashboardSettings').css({left:$('#periodString')[0].offsetWidth});
}
// Embed dashboard
if(!$('#topBars').length)
{
$('#dashboardSettings').css({left:0});
$('#dashboardSettings').after($('#Dashboard'));
$('#Dashboard > ul li a').each(function(){$(this).css({width:this.offestWidth+30, paddingLeft:0, paddingRight:0});});
$('#Dashboard_embeddedIndex_'+dashboardId).addClass('sfHover');
}
$('#dashboardSettings').on('click', function(){
$('#dashboardSettings').toggleClass('visible');
if ($('#dashboardWidgetsArea').dashboard('isDefaultDashboard')) {
$('#removeDashboardLink').hide();
} else {
$('#removeDashboardLink').show();
}
// fix position
$('#dashboardSettings .widgetpreview-widgetlist').css('paddingTop', $('#dashboardSettings .widgetpreview-categorylist').parent('li').position().top);
});
$('body').on('mouseup', function(e) {
if(!$(e.target).parents('#dashboardSettings').length && !$(e.target).is('#dashboardSettings')) {
$('#dashboardSettings').widgetPreview('reset');
$('#dashboardSettings').removeClass('visible');
}
});
widgetsHelper.getAvailableWidgets();
$('#dashboardWidgetsArea').on('dashboardempty', showEmptyDashboardNotification);
$('#dashboardWidgetsArea').dashboard({
idDashboard: dashboardId,
layout: dashboardLayout
});
$('#dashboardSettings').widgetPreview({
isWidgetAvailable: function(widgetUniqueId) {
return !$('#dashboardWidgetsArea [widgetId=' + widgetUniqueId + ']').length;
},
onSelect: function(widgetUniqueId) {
var widget = widgetsHelper.getWidgetObjectFromUniqueId(widgetUniqueId);
$('#dashboardWidgetsArea').dashboard('addWidget', widget.uniqueId, 1, widget.parameters, true, false);
$('#dashboardSettings').removeClass('visible');
},
resetOnSelect: true
});
$('#columnPreview>div').each(function(){
var width = new Array();
$('div', this).each(function(){
width.push(this.className.replace(/width-/, ''));
});
$(this).attr('layout', width.join('-'));
});
$('#columnPreview>div').on('click', function(){
$('#columnPreview>div').removeClass('choosen');
$(this).addClass('choosen');
});
$('.submenu>li').on('mouseenter', function(event){
if (!$('.widgetpreview-categorylist', event.target).length) {
$('#dashboardSettings').widgetPreview('reset');
}
});
}
function createDashboard() {
$('#createDashboardName').attr('value', '');
piwikHelper.modalConfirm('#createDashboardConfirm', {yes: function(){
var dashboardName = $('#createDashboardName').attr('value');
var type = ($('#dashboard_type_empty:checked').length > 0) ? 'empty' : 'default';
piwikHelper.showAjaxLoading();
var ajaxRequest =
{
type: 'GET',
url: 'index.php?module=Dashboard&action=createNewDashboard',
dataType: 'json',
async: true,
error: piwikHelper.ajaxHandleError,
success: function(id) {
$('#dashboardWidgetsArea').dashboard('loadDashboard', id);
},
data: {
token_auth: piwik.token_auth,
idSite: piwik.idSite,
name: encodeURIComponent(dashboardName),
type: type
}
};
$.ajax(ajaxRequest);
}});
}
function resetDashboard() {
piwikHelper.modalConfirm('#resetDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('resetLayout'); }});
}
function renameDashboard() {
$('#newDashboardName').attr('value', $('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#renameDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('setDashboardName', $('#newDashboardName').attr('value')); }});
}
function removeDashboard() {
$('#removeDashboardConfirm h2 span').html($('#dashboardWidgetsArea').dashboard('getDashboardName'));
piwikHelper.modalConfirm('#removeDashboardConfirm', {yes: function(){ $('#dashboardWidgetsArea').dashboard('removeDashboard'); }});
}
function showChangeDashboardLayoutDialog() {
$('#columnPreview>div').removeClass('choosen');
$('#columnPreview>div[layout='+$('#dashboardWidgetsArea').dashboard('getColumnLayout')+']').addClass('choosen');
piwikHelper.modalConfirm('#changeDashboardLayout', {yes: function(){
$('#dashboardWidgetsArea').dashboard('setColumnLayout', $('#changeDashboardLayout .choosen').attr('layout'));
}});
}
function | () {
piwikHelper.modalConfirm('#dashboardEmptyNotification', {
resetDashboard: function() { $('#dashboardWidgetsArea').dashboard('resetLayout'); },
addWidget: function(){ $('#dashboardSettings').trigger('click'); }
});
}
function setAsDefaultWidgets() {
piwikHelper.modalConfirm('#setAsDefaultWidgetsConfirm', {
yes: function(){ $('#dashboardWidgetsArea').dashboard('saveLayoutAsDefaultWidgetLayout'); }
});
}
| showEmptyDashboardNotification | identifier_name |
ipc_lista2.16.py | #EQUIPE 2
#Nahan Trindade Passos - 1615310021
#Ana Beatriz Frota - 1615310027
#
#
#
#
#
#
import math
print("Digite os termos da equacao ax2+bx+c") | b = float(input("Valor de B:\n"))
c = float(input("Valor de C:\n"))
delta = (math.pow(b,2) - (4*a*c))
if(delta<0):
print("A equacao nao possui raizes reais")
elif(delta == 0):
raiz = ((-1)*b + math.sqrt(delta))/(2*a)
print("A equacao possui apenas uma raiz",raiz)
else:
raiz1 = ((-1)*b + math.sqrt(delta))/(2*a)
raiz2 = ((-1)*b - math.sqrt(delta))/(2*a)
print("A equacao possui duas raizes")
print("Primeira raiz:",raiz1)
print("Segunda raiz:",raiz2) | a = float(input("Digite o valor de A:\n"))
if(a==0):
print("Nao e uma equacao de segundo grau")
else: | random_line_split |
ipc_lista2.16.py | #EQUIPE 2
#Nahan Trindade Passos - 1615310021
#Ana Beatriz Frota - 1615310027
#
#
#
#
#
#
import math
print("Digite os termos da equacao ax2+bx+c")
a = float(input("Digite o valor de A:\n"))
if(a==0):
print("Nao e uma equacao de segundo grau")
else:
| b = float(input("Valor de B:\n"))
c = float(input("Valor de C:\n"))
delta = (math.pow(b,2) - (4*a*c))
if(delta<0):
print("A equacao nao possui raizes reais")
elif(delta == 0):
raiz = ((-1)*b + math.sqrt(delta))/(2*a)
print("A equacao possui apenas uma raiz",raiz)
else:
raiz1 = ((-1)*b + math.sqrt(delta))/(2*a)
raiz2 = ((-1)*b - math.sqrt(delta))/(2*a)
print("A equacao possui duas raizes")
print("Primeira raiz:",raiz1)
print("Segunda raiz:",raiz2) | conditional_block | |
slope.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
slope.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class slope(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
AS_PERCENT = 'AS_PERCENT'
SCALE = 'SCALE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster analysis')
def name(self):
return 'slope'
def displayName(self):
return self.tr('Slope')
def defineCharacteristics(self):
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addParameter(ParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula (instead of the Horn's one)"),
False))
self.addParameter(ParameterBoolean(self.AS_PERCENT,
self.tr('Slope expressed as percent (instead of degrees)'), False))
self.addParameter(ParameterNumber(self.SCALE,
self.tr('Scale (ratio of vert. units to horiz.)'),
0.0, 99999999.999999, 1.0))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Slope')))
def getConsoleCommands(self):
arguments = ['slope']
arguments.append(str(self.getParameterValue(self.INPUT)))
output = str(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
arguments.append('-s')
arguments.append(str(self.getParameterValue(self.SCALE)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
if self.getParameterValue(self.ZEVENBERGEN):
arguments.append('-alg') |
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)] | arguments.append('ZevenbergenThorne')
if self.getParameterValue(self.AS_PERCENT):
arguments.append('-p') | random_line_split |
slope.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
slope.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class slope(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
AS_PERCENT = 'AS_PERCENT'
SCALE = 'SCALE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster analysis')
def name(self):
|
def displayName(self):
return self.tr('Slope')
def defineCharacteristics(self):
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addParameter(ParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula (instead of the Horn's one)"),
False))
self.addParameter(ParameterBoolean(self.AS_PERCENT,
self.tr('Slope expressed as percent (instead of degrees)'), False))
self.addParameter(ParameterNumber(self.SCALE,
self.tr('Scale (ratio of vert. units to horiz.)'),
0.0, 99999999.999999, 1.0))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Slope')))
def getConsoleCommands(self):
arguments = ['slope']
arguments.append(str(self.getParameterValue(self.INPUT)))
output = str(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
arguments.append('-s')
arguments.append(str(self.getParameterValue(self.SCALE)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
if self.getParameterValue(self.ZEVENBERGEN):
arguments.append('-alg')
arguments.append('ZevenbergenThorne')
if self.getParameterValue(self.AS_PERCENT):
arguments.append('-p')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
| return 'slope' | identifier_body |
slope.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
slope.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class | (GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
AS_PERCENT = 'AS_PERCENT'
SCALE = 'SCALE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster analysis')
def name(self):
return 'slope'
def displayName(self):
return self.tr('Slope')
def defineCharacteristics(self):
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addParameter(ParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula (instead of the Horn's one)"),
False))
self.addParameter(ParameterBoolean(self.AS_PERCENT,
self.tr('Slope expressed as percent (instead of degrees)'), False))
self.addParameter(ParameterNumber(self.SCALE,
self.tr('Scale (ratio of vert. units to horiz.)'),
0.0, 99999999.999999, 1.0))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Slope')))
def getConsoleCommands(self):
arguments = ['slope']
arguments.append(str(self.getParameterValue(self.INPUT)))
output = str(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
arguments.append('-s')
arguments.append(str(self.getParameterValue(self.SCALE)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
if self.getParameterValue(self.ZEVENBERGEN):
arguments.append('-alg')
arguments.append('ZevenbergenThorne')
if self.getParameterValue(self.AS_PERCENT):
arguments.append('-p')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
| slope | identifier_name |
slope.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
slope.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class slope(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
AS_PERCENT = 'AS_PERCENT'
SCALE = 'SCALE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster analysis')
def name(self):
return 'slope'
def displayName(self):
return self.tr('Slope')
def defineCharacteristics(self):
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addParameter(ParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula (instead of the Horn's one)"),
False))
self.addParameter(ParameterBoolean(self.AS_PERCENT,
self.tr('Slope expressed as percent (instead of degrees)'), False))
self.addParameter(ParameterNumber(self.SCALE,
self.tr('Scale (ratio of vert. units to horiz.)'),
0.0, 99999999.999999, 1.0))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Slope')))
def getConsoleCommands(self):
arguments = ['slope']
arguments.append(str(self.getParameterValue(self.INPUT)))
output = str(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
arguments.append('-s')
arguments.append(str(self.getParameterValue(self.SCALE)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
if self.getParameterValue(self.ZEVENBERGEN):
|
if self.getParameterValue(self.AS_PERCENT):
arguments.append('-p')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
| arguments.append('-alg')
arguments.append('ZevenbergenThorne') | conditional_block |
Tab.d.ts | import * as React from "react";
import ReactToolbox from "../index";
export interface TabTheme {
/**
* Added to the navigation tab element in case it's active.
*/
active?: string;
/**
* Added to the navigation tab element in case it's disabled.
*/
disabled?: string;
/**
* Added to the navigation tab element in case it's hidden.
*/
hidden?: string;
/**
* Added to the navigation tab element in case it's active.
*/
label?: string;
/**
* Class added when icon is set.
*/
withIcon?: string;
/**
* Class added when label is set.
*/
withText?: string;
}
export interface TabProps extends ReactToolbox.Props {
/**
* If true, the current component is visible.
*/
active?: boolean;
/**
* Additional class name to provide custom styling for the active tab.
*/
activeClassName?: string;
/**
* If true, the current component is not clickable.
* @default false
*/
disabled?: boolean;
/**
* If true, the current component is not visible.
* @default false
*/
hidden?: boolean;
/**
* Icon to be used in inner FontIcon.
*/
icon?: React.ReactNode;
/**
* Label text for navigation header. Required.
*/
label: string;
/**
* Callback function that is fired when the tab is activated.
*/
onActive?: Function;
/** | /**
* Additional properties passed to Tab root container.
*/
[key: string]: any;
}
export class Tab extends React.Component<TabProps, {}> { }
export default Tab; | * Classnames object defining the component style.
*/
theme?: TabTheme; | random_line_split |
Tab.d.ts | import * as React from "react";
import ReactToolbox from "../index";
export interface TabTheme {
/**
* Added to the navigation tab element in case it's active.
*/
active?: string;
/**
* Added to the navigation tab element in case it's disabled.
*/
disabled?: string;
/**
* Added to the navigation tab element in case it's hidden.
*/
hidden?: string;
/**
* Added to the navigation tab element in case it's active.
*/
label?: string;
/**
* Class added when icon is set.
*/
withIcon?: string;
/**
* Class added when label is set.
*/
withText?: string;
}
export interface TabProps extends ReactToolbox.Props {
/**
* If true, the current component is visible.
*/
active?: boolean;
/**
* Additional class name to provide custom styling for the active tab.
*/
activeClassName?: string;
/**
* If true, the current component is not clickable.
* @default false
*/
disabled?: boolean;
/**
* If true, the current component is not visible.
* @default false
*/
hidden?: boolean;
/**
* Icon to be used in inner FontIcon.
*/
icon?: React.ReactNode;
/**
* Label text for navigation header. Required.
*/
label: string;
/**
* Callback function that is fired when the tab is activated.
*/
onActive?: Function;
/**
* Classnames object defining the component style.
*/
theme?: TabTheme;
/**
* Additional properties passed to Tab root container.
*/
[key: string]: any;
}
export class | extends React.Component<TabProps, {}> { }
export default Tab;
| Tab | identifier_name |
index.js | /**
* @param {string} s
* @return {boolean}
*/
const checkValidString = function (s) {
let leftCount = 0, rightCount = 0, starCount = 0;
let idx = 0;
while (idx < s.length) |
idx = s.length - 1;
leftCount = rightCount = starCount = 0;
while (idx >= 0) {
let ch = s[idx--];
if (ch === ')') {
++rightCount;
} else if (ch === '*') {
++starCount;
} else { // ch === '('
++leftCount;
if (leftCount > rightCount + starCount) {
return false;
}
}
}
return true;
};
module.exports = checkValidString; | {
let ch = s[idx++];
if (ch === '(') {
++leftCount;
} else if (ch === '*') {
++starCount;
} else { // ch === ')'
++rightCount;
if (rightCount > leftCount + starCount) {
return false;
}
}
} | conditional_block |
index.js | /**
* @param {string} s
* @return {boolean}
*/
const checkValidString = function (s) {
let leftCount = 0, rightCount = 0, starCount = 0;
let idx = 0;
while (idx < s.length) {
let ch = s[idx++];
if (ch === '(') { | if (rightCount > leftCount + starCount) {
return false;
}
}
}
idx = s.length - 1;
leftCount = rightCount = starCount = 0;
while (idx >= 0) {
let ch = s[idx--];
if (ch === ')') {
++rightCount;
} else if (ch === '*') {
++starCount;
} else { // ch === '('
++leftCount;
if (leftCount > rightCount + starCount) {
return false;
}
}
}
return true;
};
module.exports = checkValidString; | ++leftCount;
} else if (ch === '*') {
++starCount;
} else { // ch === ')'
++rightCount; | random_line_split |
HelpDialog.js | /*
* ../../../..//localization/fr/HelpDialog.js
*
* Copyright (c) 2009-2018 The MathJax Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*************************************************************
*
* MathJax/localization/fr/HelpDialog.js
*
* Copyright (c) 2009-2018 The MathJax Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
MathJax.Localization.addTranslation("fr", "HelpDialog", {
version: "2.7.5",
isLoaded: true,
strings: {
Help: "Aide MathJax",
MathJax:
"*MathJax* est une biblioth\u00E8que JavaScript qui permet aux auteurs de pages d\u2019inclure des math\u00E9matiques dans leurs pages web. En tant que lecteur, vous n\u2019avez rien besoin de faire pour que cela fonctionne.",
Browsers:
"*Navigateurs*: MathJax fonctionne avec tous les navigateurs modernes y compris Internet Explorer 6, Firefox 3, Chrome 0.2, Safari 2, Opera 9.6 et leurs versions sup\u00E9rieures ainsi que la plupart des navigateurs pour mobiles et tablettes.",
Menu:
"*Menu math*: MathJax ajoute un menu contextuel aux \u00E9quations. Cliquez-droit ou Ctrl-cliquez sur n\u2019importe quelle formule math\u00E9matique pour acc\u00E9der au menu.",
ShowMath:
"*Afficher les maths comme* vous permet d\u2019afficher le balisage source de la formule pour copier-coller (comme MathML ou dans son format d\u2019origine).",
Settings:
"*Param\u00E8tres* vous donne le contr\u00F4le sur les fonctionnalit\u00E9s de MathJax, comme la taille des math\u00E9matiques, et le m\u00E9canisme utilis\u00E9 pour afficher les \u00E9quations.",
Language:
"*Langue* vous laisse s\u00E9lectionner la langue utilis\u00E9e par MathJax pour ses menus et ses messages d\u2019avertissement.",
Zoom:
"*Zoom des maths*: Si vous avez des difficult\u00E9s \u00E0 lire une \u00E9quation, MathJax peut l\u2019agrandir pour vous aider \u00E0 mieux la voir.",
Accessibilty:
"*Accessibilit\u00E9*: MathJax travaillera automatiquement avec les lecteurs d\u2019\u00E9cran pour rendre les math\u00E9matiques accessibles aux malvoyants.",
Fonts:
"*Polices*: MathJax utilisera certaines polices math\u00E9matiques si elles sont install\u00E9es sur votre ordinateur\u202F; sinon, il utilisera les polices trouv\u00E9es sur le web. Bien que ce ne soit pas obligatoire, des polices install\u00E9es localement acc\u00E9l\u00E9reront la composition. Nous vous sugg\u00E9rons d\u2019installer les [polices STIX](%1).",
CloseDialog: "Fermer la bo\u00EEte de dialogue d\u2019aide" | MathJax.Ajax.loadComplete("[MathJax]/localization/fr/HelpDialog.js"); | }
});
| random_line_split |
airline-seat-recline-normal.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _reactAddonsPureRenderMixin = require('react-addons-pure-render-mixin');
var _reactAddonsPureRenderMixin2 = _interopRequireDefault(_reactAddonsPureRenderMixin);
var _svgIcon = require('../../svg-icon');
var _svgIcon2 = _interopRequireDefault(_svgIcon);
function _interopRequireDefault(obj) |
var NotificationAirlineSeatReclineNormal = _react2.default.createClass({
displayName: 'NotificationAirlineSeatReclineNormal',
mixins: [_reactAddonsPureRenderMixin2.default],
render: function render() {
return _react2.default.createElement(
_svgIcon2.default,
this.props,
_react2.default.createElement('path', { d: 'M7.59 5.41c-.78-.78-.78-2.05 0-2.83.78-.78 2.05-.78 2.83 0 .78.78.78 2.05 0 2.83-.79.79-2.05.79-2.83 0zM6 16V7H4v9c0 2.76 2.24 5 5 5h6v-2H9c-1.66 0-3-1.34-3-3zm14 4.07L14.93 15H11.5v-3.68c1.4 1.15 3.6 2.16 5.5 2.16v-2.16c-1.66.02-3.61-.87-4.67-2.04l-1.4-1.55c-.19-.21-.43-.38-.69-.5-.29-.14-.62-.23-.96-.23h-.03C8.01 7 7 8.01 7 9.25V15c0 1.66 1.34 3 3 3h5.07l3.5 3.5L20 20.07z' })
);
}
});
exports.default = NotificationAirlineSeatReclineNormal;
module.exports = exports['default']; | { return obj && obj.__esModule ? obj : { default: obj }; } | identifier_body |
airline-seat-recline-normal.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _reactAddonsPureRenderMixin = require('react-addons-pure-render-mixin');
var _reactAddonsPureRenderMixin2 = _interopRequireDefault(_reactAddonsPureRenderMixin);
var _svgIcon = require('../../svg-icon');
var _svgIcon2 = _interopRequireDefault(_svgIcon);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var NotificationAirlineSeatReclineNormal = _react2.default.createClass({
displayName: 'NotificationAirlineSeatReclineNormal',
mixins: [_reactAddonsPureRenderMixin2.default],
render: function render() {
return _react2.default.createElement(
_svgIcon2.default, | this.props,
_react2.default.createElement('path', { d: 'M7.59 5.41c-.78-.78-.78-2.05 0-2.83.78-.78 2.05-.78 2.83 0 .78.78.78 2.05 0 2.83-.79.79-2.05.79-2.83 0zM6 16V7H4v9c0 2.76 2.24 5 5 5h6v-2H9c-1.66 0-3-1.34-3-3zm14 4.07L14.93 15H11.5v-3.68c1.4 1.15 3.6 2.16 5.5 2.16v-2.16c-1.66.02-3.61-.87-4.67-2.04l-1.4-1.55c-.19-.21-.43-.38-.69-.5-.29-.14-.62-.23-.96-.23h-.03C8.01 7 7 8.01 7 9.25V15c0 1.66 1.34 3 3 3h5.07l3.5 3.5L20 20.07z' })
);
}
});
exports.default = NotificationAirlineSeatReclineNormal;
module.exports = exports['default']; | random_line_split | |
airline-seat-recline-normal.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _reactAddonsPureRenderMixin = require('react-addons-pure-render-mixin');
var _reactAddonsPureRenderMixin2 = _interopRequireDefault(_reactAddonsPureRenderMixin);
var _svgIcon = require('../../svg-icon');
var _svgIcon2 = _interopRequireDefault(_svgIcon);
function | (obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var NotificationAirlineSeatReclineNormal = _react2.default.createClass({
displayName: 'NotificationAirlineSeatReclineNormal',
mixins: [_reactAddonsPureRenderMixin2.default],
render: function render() {
return _react2.default.createElement(
_svgIcon2.default,
this.props,
_react2.default.createElement('path', { d: 'M7.59 5.41c-.78-.78-.78-2.05 0-2.83.78-.78 2.05-.78 2.83 0 .78.78.78 2.05 0 2.83-.79.79-2.05.79-2.83 0zM6 16V7H4v9c0 2.76 2.24 5 5 5h6v-2H9c-1.66 0-3-1.34-3-3zm14 4.07L14.93 15H11.5v-3.68c1.4 1.15 3.6 2.16 5.5 2.16v-2.16c-1.66.02-3.61-.87-4.67-2.04l-1.4-1.55c-.19-.21-.43-.38-.69-.5-.29-.14-.62-.23-.96-.23h-.03C8.01 7 7 8.01 7 9.25V15c0 1.66 1.34 3 3 3h5.07l3.5 3.5L20 20.07z' })
);
}
});
exports.default = NotificationAirlineSeatReclineNormal;
module.exports = exports['default']; | _interopRequireDefault | identifier_name |
WebpackRunner.ts | import MemoryFS = require('memory-fs');
import path = require('path');
import webpack = require('webpack');
import logger from './logger';
export type WebpackBundle = { [key: string]: Buffer };
export type Options = {
config?: webpack.Configuration;
packages?: string[];
includes?: string[];
basedir?: string;
};
export interface WebpackBaseConfiguration extends webpack.Configuration {
context: string;
entry: string;
output: {
path: string;
filename: string;
};
resolve: webpack.Resolve;
resolveLoader: webpack.ResolveLoader;
}
type OnComplete = (error?: Error, bundle?: WebpackBundle, stats?: webpack.Stats) => void;
interface WebpackCompiler extends webpack.Compiler {
inputFileSystem: any;
resolvers: {
normal: any;
loader: any;
context: any;
};
}
const log = logger('webpack');
export default class WebpackRunner {
private readonly memfs: MemoryFS;
private readonly config: WebpackBaseConfiguration;
constructor(memfs: MemoryFS, config: WebpackBaseConfiguration) {
this.memfs = memfs;
this.config = config;
}
public run(source: string | Buffer): Promise<[WebpackBundle, webpack.Stats]> {
return new Promise((resolve, reject) => {
log.debug('Executing script...');
this.runAsync(source, (error, bundle, stats) => {
if (error) {
log.error('Failed to execute script.');
reject(error);
} else if (stats && stats.hasErrors()) {
log.error('Script finished with errors.');
reject(stats.toString());
} else if (bundle && stats) {
log.debug('Successfully compiled bundle.');
resolve([bundle, stats]);
} else {
log.error('Failed to execute script.');
reject(new Error('Unknown error.'));
}
});
});
}
public | (): object {
return {
config: Object.freeze(Object.assign({}, this.config)),
node_modules: this.nodeModulesInContext().sort()
};
}
public toString(): string {
return JSON.stringify(this.toJSON(), null, 2);
}
private runAsync(source: string | Buffer, onComplete: OnComplete): void {
const entry = path.join(this.config.context, this.config.entry);
this.memfs.mkdirpSync(path.dirname(entry));
this.memfs.writeFileSync(entry, source);
const outDir = this.config.output.path;
const compiler = webpack(this.config) as WebpackCompiler;
compiler.inputFileSystem = this.memfs;
compiler.outputFileSystem = this.memfs;
compiler.resolvers.normal.fileSystem = this.memfs;
compiler.resolvers.loader.fileSystem = this.memfs;
compiler.resolvers.context.fileSystem = this.memfs;
compiler.run((error, stats) => {
if (error) return onComplete(error);
const files = this.memfs.readdirSync(outDir);
const bundle = files.reduce(
(object, file) =>
Object.assign(object, {
[file]: this.memfs.readFileSync(path.join(outDir, file))
}),
{} as WebpackBundle
);
this.memfs.rmdirSync(outDir);
onComplete(undefined, bundle, stats);
});
}
private nodeModulesInContext(): string[] {
return this.memfs.readdirSync(path.join(this.config.context, 'node_modules'));
}
}
| toJSON | identifier_name |
WebpackRunner.ts | import MemoryFS = require('memory-fs');
import path = require('path');
import webpack = require('webpack');
import logger from './logger';
export type WebpackBundle = { [key: string]: Buffer };
export type Options = {
config?: webpack.Configuration;
packages?: string[];
includes?: string[];
basedir?: string;
};
export interface WebpackBaseConfiguration extends webpack.Configuration {
context: string;
entry: string;
output: {
path: string;
filename: string;
};
resolve: webpack.Resolve;
resolveLoader: webpack.ResolveLoader;
}
type OnComplete = (error?: Error, bundle?: WebpackBundle, stats?: webpack.Stats) => void;
interface WebpackCompiler extends webpack.Compiler {
inputFileSystem: any;
resolvers: {
normal: any;
loader: any;
context: any;
};
}
const log = logger('webpack');
export default class WebpackRunner {
private readonly memfs: MemoryFS;
private readonly config: WebpackBaseConfiguration;
constructor(memfs: MemoryFS, config: WebpackBaseConfiguration) {
this.memfs = memfs;
this.config = config;
}
| return new Promise((resolve, reject) => {
log.debug('Executing script...');
this.runAsync(source, (error, bundle, stats) => {
if (error) {
log.error('Failed to execute script.');
reject(error);
} else if (stats && stats.hasErrors()) {
log.error('Script finished with errors.');
reject(stats.toString());
} else if (bundle && stats) {
log.debug('Successfully compiled bundle.');
resolve([bundle, stats]);
} else {
log.error('Failed to execute script.');
reject(new Error('Unknown error.'));
}
});
});
}
public toJSON(): object {
return {
config: Object.freeze(Object.assign({}, this.config)),
node_modules: this.nodeModulesInContext().sort()
};
}
public toString(): string {
return JSON.stringify(this.toJSON(), null, 2);
}
private runAsync(source: string | Buffer, onComplete: OnComplete): void {
const entry = path.join(this.config.context, this.config.entry);
this.memfs.mkdirpSync(path.dirname(entry));
this.memfs.writeFileSync(entry, source);
const outDir = this.config.output.path;
const compiler = webpack(this.config) as WebpackCompiler;
compiler.inputFileSystem = this.memfs;
compiler.outputFileSystem = this.memfs;
compiler.resolvers.normal.fileSystem = this.memfs;
compiler.resolvers.loader.fileSystem = this.memfs;
compiler.resolvers.context.fileSystem = this.memfs;
compiler.run((error, stats) => {
if (error) return onComplete(error);
const files = this.memfs.readdirSync(outDir);
const bundle = files.reduce(
(object, file) =>
Object.assign(object, {
[file]: this.memfs.readFileSync(path.join(outDir, file))
}),
{} as WebpackBundle
);
this.memfs.rmdirSync(outDir);
onComplete(undefined, bundle, stats);
});
}
private nodeModulesInContext(): string[] {
return this.memfs.readdirSync(path.join(this.config.context, 'node_modules'));
}
} | public run(source: string | Buffer): Promise<[WebpackBundle, webpack.Stats]> { | random_line_split |
WebpackRunner.ts | import MemoryFS = require('memory-fs');
import path = require('path');
import webpack = require('webpack');
import logger from './logger';
export type WebpackBundle = { [key: string]: Buffer };
export type Options = {
config?: webpack.Configuration;
packages?: string[];
includes?: string[];
basedir?: string;
};
export interface WebpackBaseConfiguration extends webpack.Configuration {
context: string;
entry: string;
output: {
path: string;
filename: string;
};
resolve: webpack.Resolve;
resolveLoader: webpack.ResolveLoader;
}
type OnComplete = (error?: Error, bundle?: WebpackBundle, stats?: webpack.Stats) => void;
interface WebpackCompiler extends webpack.Compiler {
inputFileSystem: any;
resolvers: {
normal: any;
loader: any;
context: any;
};
}
const log = logger('webpack');
export default class WebpackRunner {
private readonly memfs: MemoryFS;
private readonly config: WebpackBaseConfiguration;
constructor(memfs: MemoryFS, config: WebpackBaseConfiguration) {
this.memfs = memfs;
this.config = config;
}
public run(source: string | Buffer): Promise<[WebpackBundle, webpack.Stats]> {
return new Promise((resolve, reject) => {
log.debug('Executing script...');
this.runAsync(source, (error, bundle, stats) => {
if (error) {
log.error('Failed to execute script.');
reject(error);
} else if (stats && stats.hasErrors()) {
log.error('Script finished with errors.');
reject(stats.toString());
} else if (bundle && stats) {
log.debug('Successfully compiled bundle.');
resolve([bundle, stats]);
} else {
log.error('Failed to execute script.');
reject(new Error('Unknown error.'));
}
});
});
}
public toJSON(): object {
return {
config: Object.freeze(Object.assign({}, this.config)),
node_modules: this.nodeModulesInContext().sort()
};
}
public toString(): string {
return JSON.stringify(this.toJSON(), null, 2);
}
private runAsync(source: string | Buffer, onComplete: OnComplete): void {
const entry = path.join(this.config.context, this.config.entry);
this.memfs.mkdirpSync(path.dirname(entry));
this.memfs.writeFileSync(entry, source);
const outDir = this.config.output.path;
const compiler = webpack(this.config) as WebpackCompiler;
compiler.inputFileSystem = this.memfs;
compiler.outputFileSystem = this.memfs;
compiler.resolvers.normal.fileSystem = this.memfs;
compiler.resolvers.loader.fileSystem = this.memfs;
compiler.resolvers.context.fileSystem = this.memfs;
compiler.run((error, stats) => {
if (error) return onComplete(error);
const files = this.memfs.readdirSync(outDir);
const bundle = files.reduce(
(object, file) =>
Object.assign(object, {
[file]: this.memfs.readFileSync(path.join(outDir, file))
}),
{} as WebpackBundle
);
this.memfs.rmdirSync(outDir);
onComplete(undefined, bundle, stats);
});
}
private nodeModulesInContext(): string[] |
}
| {
return this.memfs.readdirSync(path.join(this.config.context, 'node_modules'));
} | identifier_body |
uStripDesign.py | # -*- coding: utf-8 -*-
# @Author: Marco Benzi <marco.benzi@alumnos.usm.cl>
# @Date: 2015-06-07 19:44:12
# @Last Modified 2015-06-09
# @Last Modified time: 2015-06-09 16:07:05
# ==========================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==========================================================================
import math
"""
Speed of light constant
"""
c = 3E8
"""
Vacuum permittivity
"""
e0 = 8.8541E-12
"""
Vacuum permeability
"""
u0 = 4E-7*math.pi
def getEffectivePermitivity(WHratio, er):
"""
Returns the effective permitivity for a given W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `er` : Relative permitivity of the dielectric.
"""
if WHratio <= 1:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5) + 0.04*(1-WHratio)**2)*(er -1)/2
else:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5))*(er -1)/2
def getAuxVarA(Zo,er):
"""
Returns the auxiliary variable
A = (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
def getAuxVarB(Zo,er):
"""
Returns the auxiliary variable
B = (377*math.pi)/(2*Zo*math.sqrt(er))
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (377*math.pi)/(2*Zo*math.sqrt(er))
def getWHRatioA(Zo,er):
"""
Returns the W/H ratio for W/H < 2. If the result is > 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
A = getAuxVarA(Zo,er)
return (8*math.e**A)/(math.e**(2*A) - 2)
def getWHRatioB(Zo,er):
"""
Returns the W/H ratio for W/H > 2. If the result is < 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
B = getAuxVarB(Zo,er)
return (2/math.pi)*(B-1 - math.log(2*B - 1) + (er - 1)*(math.log(B-1) + 0.39 - 0.61/er)/(2*er))
def getCharacteristicImpedance(WHratio, ef):
"""
Returns the characteristic impedance of the medium, based on the effective
permitivity and W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `ef` : Effective permitivity of the dielectric.
"""
if WHratio <= 1:
return (60/math.sqrt(ef))*math.log(8/WHratio + WHratio/4)
else:
return (120*math.pi/math.sqrt(ef))/(WHratio + 1.393 + 0.667*math.log(WHratio +1.444))
def getWHRatio(Zo,er):
"""
Returns the W/H ratio, after trying with the two possible set of solutions,
for when W/H < 2 or else. When no solution, returns zero.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
efa = er
efb = er
Zoa = Zo
Zob = Zo
while 1:
rA = getWHRatioA(Zoa,efa)
rB = getWHRatioB(Zob,efb)
if rA < 2:
return rA
if rB > 2:
return rB
Zoa = math.sqrt(efa)*Zoa
Zob = math.sqrt(efb)*Zob
def getCorrectedWidth(W,H,t):
"""
For significant conductor thickness, this returns the corrected width.
Paramenters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
"""
if t < H and t < W/2:
if W/H <= math.pi/2:
return W + (1 + math.log(2*H/t))*(t/math.pi)
else:
return W + (1 + math.log(4*math.pi*H/t))*(t/math.pi)
else:
print "The conductor is too thick!!"
def getConductorLoss(W,H,t,sigma,f,Zo):
"""
Returns the conductor loss in [Np/m].
Parameters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
- `sigma` : Conductance of medium
- `f` : Operating frequency
- `Zo` : Characteristic impedance
"""
We = getCorrectedWidth(W,H,t)
P = 1 - (We/4/H)**2
Rs = math.sqrt((math.pi*f*u0)/sigma)
Q = 1 + H/We + (math.log((2*H)/t)-t/W)*H/(We*math.pi)
if W/H <= 1/(2*math.pi):
return (1 + H/We + (math.log(4*pi*W/t) + t/W)*H/(math.pi*We))*(8.68*Rs*P)/(2*pi*Zo*H)
elif W/H <= 2:
return (8.68*Rs*P*Q)/(2*math.pi*Zo*H)
else:
return ((8.68*Rs*Q)/(Zo*H))*(We/H + (We/math.pi/H)/(We/2/H)+0.94)*((H/We + 2*math.log(We/2/H + 0.94)/math.pi)**(-2))
def getDielectricLoss(er,ef,tanD,f):
| """
Returns the dielectric loss in [dB/cm].
Paramenters:
- `er` : Relative permitivity of the dielectric
- `ef` : Effective permitivity
- `tanD` : tan \delta
- `f` : Operating frequency
"""
lam = c/math.sqrt(ef)/f
return 27.3*(er*(ef-1)*tanD)/(lam*math.sqrt(er)*(er-1)) | identifier_body | |
uStripDesign.py | # -*- coding: utf-8 -*-
# @Author: Marco Benzi <marco.benzi@alumnos.usm.cl>
# @Date: 2015-06-07 19:44:12
# @Last Modified 2015-06-09
# @Last Modified time: 2015-06-09 16:07:05
# ==========================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==========================================================================
import math
"""
Speed of light constant
"""
c = 3E8
"""
Vacuum permittivity
"""
e0 = 8.8541E-12
"""
Vacuum permeability
"""
u0 = 4E-7*math.pi
def getEffectivePermitivity(WHratio, er):
"""
Returns the effective permitivity for a given W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `er` : Relative permitivity of the dielectric.
"""
if WHratio <= 1:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5) + 0.04*(1-WHratio)**2)*(er -1)/2
else:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5))*(er -1)/2
def getAuxVarA(Zo,er):
"""
Returns the auxiliary variable
A = (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
def getAuxVarB(Zo,er):
"""
Returns the auxiliary variable
B = (377*math.pi)/(2*Zo*math.sqrt(er))
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (377*math.pi)/(2*Zo*math.sqrt(er))
def getWHRatioA(Zo,er):
"""
Returns the W/H ratio for W/H < 2. If the result is > 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
A = getAuxVarA(Zo,er)
return (8*math.e**A)/(math.e**(2*A) - 2)
def getWHRatioB(Zo,er):
"""
Returns the W/H ratio for W/H > 2. If the result is < 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
B = getAuxVarB(Zo,er)
return (2/math.pi)*(B-1 - math.log(2*B - 1) + (er - 1)*(math.log(B-1) + 0.39 - 0.61/er)/(2*er))
def getCharacteristicImpedance(WHratio, ef):
"""
Returns the characteristic impedance of the medium, based on the effective
permitivity and W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `ef` : Effective permitivity of the dielectric.
"""
if WHratio <= 1:
return (60/math.sqrt(ef))*math.log(8/WHratio + WHratio/4)
else:
return (120*math.pi/math.sqrt(ef))/(WHratio + 1.393 + 0.667*math.log(WHratio +1.444))
def getWHRatio(Zo,er):
"""
Returns the W/H ratio, after trying with the two possible set of solutions,
for when W/H < 2 or else. When no solution, returns zero.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
efa = er
efb = er
Zoa = Zo
Zob = Zo
while 1:
rA = getWHRatioA(Zoa,efa)
rB = getWHRatioB(Zob,efb)
if rA < 2:
return rA
if rB > 2:
return rB
Zoa = math.sqrt(efa)*Zoa
Zob = math.sqrt(efb)*Zob
def getCorrectedWidth(W,H,t):
"""
For significant conductor thickness, this returns the corrected width.
Paramenters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
""" | else:
return W + (1 + math.log(4*math.pi*H/t))*(t/math.pi)
else:
print "The conductor is too thick!!"
def getConductorLoss(W,H,t,sigma,f,Zo):
"""
Returns the conductor loss in [Np/m].
Parameters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
- `sigma` : Conductance of medium
- `f` : Operating frequency
- `Zo` : Characteristic impedance
"""
We = getCorrectedWidth(W,H,t)
P = 1 - (We/4/H)**2
Rs = math.sqrt((math.pi*f*u0)/sigma)
Q = 1 + H/We + (math.log((2*H)/t)-t/W)*H/(We*math.pi)
if W/H <= 1/(2*math.pi):
return (1 + H/We + (math.log(4*pi*W/t) + t/W)*H/(math.pi*We))*(8.68*Rs*P)/(2*pi*Zo*H)
elif W/H <= 2:
return (8.68*Rs*P*Q)/(2*math.pi*Zo*H)
else:
return ((8.68*Rs*Q)/(Zo*H))*(We/H + (We/math.pi/H)/(We/2/H)+0.94)*((H/We + 2*math.log(We/2/H + 0.94)/math.pi)**(-2))
def getDielectricLoss(er,ef,tanD,f):
"""
Returns the dielectric loss in [dB/cm].
Paramenters:
- `er` : Relative permitivity of the dielectric
- `ef` : Effective permitivity
- `tanD` : tan \delta
- `f` : Operating frequency
"""
lam = c/math.sqrt(ef)/f
return 27.3*(er*(ef-1)*tanD)/(lam*math.sqrt(er)*(er-1)) | if t < H and t < W/2:
if W/H <= math.pi/2:
return W + (1 + math.log(2*H/t))*(t/math.pi) | random_line_split |
uStripDesign.py | # -*- coding: utf-8 -*-
# @Author: Marco Benzi <marco.benzi@alumnos.usm.cl>
# @Date: 2015-06-07 19:44:12
# @Last Modified 2015-06-09
# @Last Modified time: 2015-06-09 16:07:05
# ==========================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==========================================================================
import math
"""
Speed of light constant
"""
c = 3E8
"""
Vacuum permittivity
"""
e0 = 8.8541E-12
"""
Vacuum permeability
"""
u0 = 4E-7*math.pi
def getEffectivePermitivity(WHratio, er):
"""
Returns the effective permitivity for a given W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `er` : Relative permitivity of the dielectric.
"""
if WHratio <= 1:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5) + 0.04*(1-WHratio)**2)*(er -1)/2
else:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5))*(er -1)/2
def getAuxVarA(Zo,er):
"""
Returns the auxiliary variable
A = (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
def getAuxVarB(Zo,er):
"""
Returns the auxiliary variable
B = (377*math.pi)/(2*Zo*math.sqrt(er))
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (377*math.pi)/(2*Zo*math.sqrt(er))
def getWHRatioA(Zo,er):
"""
Returns the W/H ratio for W/H < 2. If the result is > 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
A = getAuxVarA(Zo,er)
return (8*math.e**A)/(math.e**(2*A) - 2)
def getWHRatioB(Zo,er):
"""
Returns the W/H ratio for W/H > 2. If the result is < 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
B = getAuxVarB(Zo,er)
return (2/math.pi)*(B-1 - math.log(2*B - 1) + (er - 1)*(math.log(B-1) + 0.39 - 0.61/er)/(2*er))
def getCharacteristicImpedance(WHratio, ef):
"""
Returns the characteristic impedance of the medium, based on the effective
permitivity and W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `ef` : Effective permitivity of the dielectric.
"""
if WHratio <= 1:
return (60/math.sqrt(ef))*math.log(8/WHratio + WHratio/4)
else:
return (120*math.pi/math.sqrt(ef))/(WHratio + 1.393 + 0.667*math.log(WHratio +1.444))
def getWHRatio(Zo,er):
"""
Returns the W/H ratio, after trying with the two possible set of solutions,
for when W/H < 2 or else. When no solution, returns zero.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
efa = er
efb = er
Zoa = Zo
Zob = Zo
while 1:
|
def getCorrectedWidth(W,H,t):
"""
For significant conductor thickness, this returns the corrected width.
Paramenters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
"""
if t < H and t < W/2:
if W/H <= math.pi/2:
return W + (1 + math.log(2*H/t))*(t/math.pi)
else:
return W + (1 + math.log(4*math.pi*H/t))*(t/math.pi)
else:
print "The conductor is too thick!!"
def getConductorLoss(W,H,t,sigma,f,Zo):
"""
Returns the conductor loss in [Np/m].
Parameters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
- `sigma` : Conductance of medium
- `f` : Operating frequency
- `Zo` : Characteristic impedance
"""
We = getCorrectedWidth(W,H,t)
P = 1 - (We/4/H)**2
Rs = math.sqrt((math.pi*f*u0)/sigma)
Q = 1 + H/We + (math.log((2*H)/t)-t/W)*H/(We*math.pi)
if W/H <= 1/(2*math.pi):
return (1 + H/We + (math.log(4*pi*W/t) + t/W)*H/(math.pi*We))*(8.68*Rs*P)/(2*pi*Zo*H)
elif W/H <= 2:
return (8.68*Rs*P*Q)/(2*math.pi*Zo*H)
else:
return ((8.68*Rs*Q)/(Zo*H))*(We/H + (We/math.pi/H)/(We/2/H)+0.94)*((H/We + 2*math.log(We/2/H + 0.94)/math.pi)**(-2))
def getDielectricLoss(er,ef,tanD,f):
"""
Returns the dielectric loss in [dB/cm].
Paramenters:
- `er` : Relative permitivity of the dielectric
- `ef` : Effective permitivity
- `tanD` : tan \delta
- `f` : Operating frequency
"""
lam = c/math.sqrt(ef)/f
return 27.3*(er*(ef-1)*tanD)/(lam*math.sqrt(er)*(er-1)) | rA = getWHRatioA(Zoa,efa)
rB = getWHRatioB(Zob,efb)
if rA < 2:
return rA
if rB > 2:
return rB
Zoa = math.sqrt(efa)*Zoa
Zob = math.sqrt(efb)*Zob | conditional_block |
uStripDesign.py | # -*- coding: utf-8 -*-
# @Author: Marco Benzi <marco.benzi@alumnos.usm.cl>
# @Date: 2015-06-07 19:44:12
# @Last Modified 2015-06-09
# @Last Modified time: 2015-06-09 16:07:05
# ==========================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==========================================================================
import math
"""
Speed of light constant
"""
c = 3E8
"""
Vacuum permittivity
"""
e0 = 8.8541E-12
"""
Vacuum permeability
"""
u0 = 4E-7*math.pi
def getEffectivePermitivity(WHratio, er):
"""
Returns the effective permitivity for a given W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `er` : Relative permitivity of the dielectric.
"""
if WHratio <= 1:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5) + 0.04*(1-WHratio)**2)*(er -1)/2
else:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5))*(er -1)/2
def getAuxVarA(Zo,er):
"""
Returns the auxiliary variable
A = (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
def getAuxVarB(Zo,er):
"""
Returns the auxiliary variable
B = (377*math.pi)/(2*Zo*math.sqrt(er))
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (377*math.pi)/(2*Zo*math.sqrt(er))
def getWHRatioA(Zo,er):
"""
Returns the W/H ratio for W/H < 2. If the result is > 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
A = getAuxVarA(Zo,er)
return (8*math.e**A)/(math.e**(2*A) - 2)
def getWHRatioB(Zo,er):
"""
Returns the W/H ratio for W/H > 2. If the result is < 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
B = getAuxVarB(Zo,er)
return (2/math.pi)*(B-1 - math.log(2*B - 1) + (er - 1)*(math.log(B-1) + 0.39 - 0.61/er)/(2*er))
def getCharacteristicImpedance(WHratio, ef):
"""
Returns the characteristic impedance of the medium, based on the effective
permitivity and W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `ef` : Effective permitivity of the dielectric.
"""
if WHratio <= 1:
return (60/math.sqrt(ef))*math.log(8/WHratio + WHratio/4)
else:
return (120*math.pi/math.sqrt(ef))/(WHratio + 1.393 + 0.667*math.log(WHratio +1.444))
def getWHRatio(Zo,er):
"""
Returns the W/H ratio, after trying with the two possible set of solutions,
for when W/H < 2 or else. When no solution, returns zero.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
efa = er
efb = er
Zoa = Zo
Zob = Zo
while 1:
rA = getWHRatioA(Zoa,efa)
rB = getWHRatioB(Zob,efb)
if rA < 2:
return rA
if rB > 2:
return rB
Zoa = math.sqrt(efa)*Zoa
Zob = math.sqrt(efb)*Zob
def | (W,H,t):
"""
For significant conductor thickness, this returns the corrected width.
Paramenters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
"""
if t < H and t < W/2:
if W/H <= math.pi/2:
return W + (1 + math.log(2*H/t))*(t/math.pi)
else:
return W + (1 + math.log(4*math.pi*H/t))*(t/math.pi)
else:
print "The conductor is too thick!!"
def getConductorLoss(W,H,t,sigma,f,Zo):
"""
Returns the conductor loss in [Np/m].
Parameters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
- `sigma` : Conductance of medium
- `f` : Operating frequency
- `Zo` : Characteristic impedance
"""
We = getCorrectedWidth(W,H,t)
P = 1 - (We/4/H)**2
Rs = math.sqrt((math.pi*f*u0)/sigma)
Q = 1 + H/We + (math.log((2*H)/t)-t/W)*H/(We*math.pi)
if W/H <= 1/(2*math.pi):
return (1 + H/We + (math.log(4*pi*W/t) + t/W)*H/(math.pi*We))*(8.68*Rs*P)/(2*pi*Zo*H)
elif W/H <= 2:
return (8.68*Rs*P*Q)/(2*math.pi*Zo*H)
else:
return ((8.68*Rs*Q)/(Zo*H))*(We/H + (We/math.pi/H)/(We/2/H)+0.94)*((H/We + 2*math.log(We/2/H + 0.94)/math.pi)**(-2))
def getDielectricLoss(er,ef,tanD,f):
"""
Returns the dielectric loss in [dB/cm].
Paramenters:
- `er` : Relative permitivity of the dielectric
- `ef` : Effective permitivity
- `tanD` : tan \delta
- `f` : Operating frequency
"""
lam = c/math.sqrt(ef)/f
return 27.3*(er*(ef-1)*tanD)/(lam*math.sqrt(er)*(er-1)) | getCorrectedWidth | identifier_name |
tflow.py | import uuid
from typing import Optional, Union
from mitmproxy import connection
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import tcp
from mitmproxy import websocket
from mitmproxy.test.tutils import treq, tresp
from wsproto.frame_protocol import Opcode
def ttcpflow(client_conn=True, server_conn=True, messages=True, err=None) -> tcp.TCPFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
|
if messages is True:
messages = [
tcp.TCPMessage(True, b"hello", 946681204.2),
tcp.TCPMessage(False, b"it's me", 946681204.5),
]
if err is True:
err = terr()
f = tcp.TCPFlow(client_conn, server_conn)
f.messages = messages
f.error = err
f.live = True
return f
def twebsocketflow(messages=True, err=None, close_code=None, close_reason='') -> http.HTTPFlow:
flow = http.HTTPFlow(tclient_conn(), tserver_conn())
flow.request = http.Request(
"example.com",
80,
b"GET",
b"http",
b"example.com",
b"/ws",
b"HTTP/1.1",
headers=http.Headers(
connection="upgrade",
upgrade="websocket",
sec_websocket_version="13",
sec_websocket_key="1234",
),
content=b'',
trailers=None,
timestamp_start=946681200,
timestamp_end=946681201,
)
flow.response = http.Response(
b"HTTP/1.1",
101,
reason=b"Switching Protocols",
headers=http.Headers(
connection='upgrade',
upgrade='websocket',
sec_websocket_accept=b'',
),
content=b'',
trailers=None,
timestamp_start=946681202,
timestamp_end=946681203,
)
flow.websocket = twebsocket()
flow.websocket.close_reason = close_reason
if close_code is not None:
flow.websocket.close_code = close_code
else:
if err is True:
# ABNORMAL_CLOSURE
flow.websocket.close_code = 1006
else:
# NORMAL_CLOSURE
flow.websocket.close_code = 1000
flow.live = True
return flow
def tflow(
*,
client_conn: Optional[connection.Client] = None,
server_conn: Optional[connection.Server] = None,
req: Optional[http.Request] = None,
resp: Union[bool, http.Response] = False,
err: Union[bool, flow.Error] = False,
ws: Union[bool, websocket.WebSocketData] = False,
live: bool = True,
) -> http.HTTPFlow:
"""Create a flow for testing."""
if client_conn is None:
client_conn = tclient_conn()
if server_conn is None:
server_conn = tserver_conn()
if req is None:
req = treq()
if resp is True:
resp = tresp()
if err is True:
err = terr()
if ws is True:
ws = twebsocket()
assert resp is False or isinstance(resp, http.Response)
assert err is False or isinstance(err, flow.Error)
assert ws is False or isinstance(ws, websocket.WebSocketData)
f = http.HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp or None
f.error = err or None
f.websocket = ws or None
f.live = live
return f
class DummyFlow(flow.Flow):
"""A flow that is neither HTTP nor TCP."""
def __init__(self, client_conn, server_conn, live=None):
super().__init__("dummy", client_conn, server_conn, live)
def tdummyflow(client_conn=True, server_conn=True, err=None) -> DummyFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if err is True:
err = terr()
f = DummyFlow(client_conn, server_conn)
f.error = err
f.live = True
return f
def tclient_conn() -> connection.Client:
c = connection.Client.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
mitmcert=None,
tls_established=True,
timestamp_start=946681200,
timestamp_tls_setup=946681201,
timestamp_end=946681206,
sni="address",
cipher_name="cipher",
alpn=b"http/1.1",
tls_version="TLSv1.2",
tls_extensions=[(0x00, bytes.fromhex("000e00000b6578616d"))],
state=0,
sockname=("", 0),
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_list=[],
))
return c
def tserver_conn() -> connection.Server:
c = connection.Server.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),
ip_address=("192.168.0.1", 22),
timestamp_start=946681202,
timestamp_tcp_setup=946681203,
timestamp_tls_setup=946681204,
timestamp_end=946681205,
tls_established=True,
sni="address",
alpn=None,
tls_version="TLSv1.2",
via=None,
state=0,
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_name=None,
cipher_list=[],
via2=None,
))
return c
def terr(content: str = "error") -> flow.Error:
err = flow.Error(content, 946681207)
return err
def twebsocket(messages: bool = True) -> websocket.WebSocketData:
ws = websocket.WebSocketData()
if messages:
ws.messages = [
websocket.WebSocketMessage(Opcode.BINARY, True, b"hello binary", 946681203),
websocket.WebSocketMessage(Opcode.TEXT, True, b"hello text", 946681204),
websocket.WebSocketMessage(Opcode.TEXT, False, b"it's me", 946681205),
]
ws.close_reason = "Close Reason"
ws.close_code = 1000
ws.closed_by_client = False
ws.timestamp_end = 946681205
return ws
| server_conn = tserver_conn() | conditional_block |
tflow.py | import uuid
from typing import Optional, Union
from mitmproxy import connection
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import tcp
from mitmproxy import websocket
from mitmproxy.test.tutils import treq, tresp
from wsproto.frame_protocol import Opcode
def ttcpflow(client_conn=True, server_conn=True, messages=True, err=None) -> tcp.TCPFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if messages is True:
messages = [
tcp.TCPMessage(True, b"hello", 946681204.2),
tcp.TCPMessage(False, b"it's me", 946681204.5),
]
if err is True:
err = terr()
f = tcp.TCPFlow(client_conn, server_conn)
f.messages = messages
f.error = err
f.live = True
return f
def twebsocketflow(messages=True, err=None, close_code=None, close_reason='') -> http.HTTPFlow:
|
def tflow(
*,
client_conn: Optional[connection.Client] = None,
server_conn: Optional[connection.Server] = None,
req: Optional[http.Request] = None,
resp: Union[bool, http.Response] = False,
err: Union[bool, flow.Error] = False,
ws: Union[bool, websocket.WebSocketData] = False,
live: bool = True,
) -> http.HTTPFlow:
"""Create a flow for testing."""
if client_conn is None:
client_conn = tclient_conn()
if server_conn is None:
server_conn = tserver_conn()
if req is None:
req = treq()
if resp is True:
resp = tresp()
if err is True:
err = terr()
if ws is True:
ws = twebsocket()
assert resp is False or isinstance(resp, http.Response)
assert err is False or isinstance(err, flow.Error)
assert ws is False or isinstance(ws, websocket.WebSocketData)
f = http.HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp or None
f.error = err or None
f.websocket = ws or None
f.live = live
return f
class DummyFlow(flow.Flow):
"""A flow that is neither HTTP nor TCP."""
def __init__(self, client_conn, server_conn, live=None):
super().__init__("dummy", client_conn, server_conn, live)
def tdummyflow(client_conn=True, server_conn=True, err=None) -> DummyFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if err is True:
err = terr()
f = DummyFlow(client_conn, server_conn)
f.error = err
f.live = True
return f
def tclient_conn() -> connection.Client:
c = connection.Client.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
mitmcert=None,
tls_established=True,
timestamp_start=946681200,
timestamp_tls_setup=946681201,
timestamp_end=946681206,
sni="address",
cipher_name="cipher",
alpn=b"http/1.1",
tls_version="TLSv1.2",
tls_extensions=[(0x00, bytes.fromhex("000e00000b6578616d"))],
state=0,
sockname=("", 0),
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_list=[],
))
return c
def tserver_conn() -> connection.Server:
c = connection.Server.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),
ip_address=("192.168.0.1", 22),
timestamp_start=946681202,
timestamp_tcp_setup=946681203,
timestamp_tls_setup=946681204,
timestamp_end=946681205,
tls_established=True,
sni="address",
alpn=None,
tls_version="TLSv1.2",
via=None,
state=0,
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_name=None,
cipher_list=[],
via2=None,
))
return c
def terr(content: str = "error") -> flow.Error:
err = flow.Error(content, 946681207)
return err
def twebsocket(messages: bool = True) -> websocket.WebSocketData:
ws = websocket.WebSocketData()
if messages:
ws.messages = [
websocket.WebSocketMessage(Opcode.BINARY, True, b"hello binary", 946681203),
websocket.WebSocketMessage(Opcode.TEXT, True, b"hello text", 946681204),
websocket.WebSocketMessage(Opcode.TEXT, False, b"it's me", 946681205),
]
ws.close_reason = "Close Reason"
ws.close_code = 1000
ws.closed_by_client = False
ws.timestamp_end = 946681205
return ws
| flow = http.HTTPFlow(tclient_conn(), tserver_conn())
flow.request = http.Request(
"example.com",
80,
b"GET",
b"http",
b"example.com",
b"/ws",
b"HTTP/1.1",
headers=http.Headers(
connection="upgrade",
upgrade="websocket",
sec_websocket_version="13",
sec_websocket_key="1234",
),
content=b'',
trailers=None,
timestamp_start=946681200,
timestamp_end=946681201,
)
flow.response = http.Response(
b"HTTP/1.1",
101,
reason=b"Switching Protocols",
headers=http.Headers(
connection='upgrade',
upgrade='websocket',
sec_websocket_accept=b'',
),
content=b'',
trailers=None,
timestamp_start=946681202,
timestamp_end=946681203,
)
flow.websocket = twebsocket()
flow.websocket.close_reason = close_reason
if close_code is not None:
flow.websocket.close_code = close_code
else:
if err is True:
# ABNORMAL_CLOSURE
flow.websocket.close_code = 1006
else:
# NORMAL_CLOSURE
flow.websocket.close_code = 1000
flow.live = True
return flow | identifier_body |
tflow.py | import uuid
from typing import Optional, Union
from mitmproxy import connection
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import tcp
from mitmproxy import websocket
from mitmproxy.test.tutils import treq, tresp
from wsproto.frame_protocol import Opcode
def ttcpflow(client_conn=True, server_conn=True, messages=True, err=None) -> tcp.TCPFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if messages is True:
messages = [
tcp.TCPMessage(True, b"hello", 946681204.2),
tcp.TCPMessage(False, b"it's me", 946681204.5),
]
if err is True:
err = terr()
f = tcp.TCPFlow(client_conn, server_conn)
f.messages = messages
f.error = err
f.live = True
return f
def | (messages=True, err=None, close_code=None, close_reason='') -> http.HTTPFlow:
flow = http.HTTPFlow(tclient_conn(), tserver_conn())
flow.request = http.Request(
"example.com",
80,
b"GET",
b"http",
b"example.com",
b"/ws",
b"HTTP/1.1",
headers=http.Headers(
connection="upgrade",
upgrade="websocket",
sec_websocket_version="13",
sec_websocket_key="1234",
),
content=b'',
trailers=None,
timestamp_start=946681200,
timestamp_end=946681201,
)
flow.response = http.Response(
b"HTTP/1.1",
101,
reason=b"Switching Protocols",
headers=http.Headers(
connection='upgrade',
upgrade='websocket',
sec_websocket_accept=b'',
),
content=b'',
trailers=None,
timestamp_start=946681202,
timestamp_end=946681203,
)
flow.websocket = twebsocket()
flow.websocket.close_reason = close_reason
if close_code is not None:
flow.websocket.close_code = close_code
else:
if err is True:
# ABNORMAL_CLOSURE
flow.websocket.close_code = 1006
else:
# NORMAL_CLOSURE
flow.websocket.close_code = 1000
flow.live = True
return flow
def tflow(
*,
client_conn: Optional[connection.Client] = None,
server_conn: Optional[connection.Server] = None,
req: Optional[http.Request] = None,
resp: Union[bool, http.Response] = False,
err: Union[bool, flow.Error] = False,
ws: Union[bool, websocket.WebSocketData] = False,
live: bool = True,
) -> http.HTTPFlow:
"""Create a flow for testing."""
if client_conn is None:
client_conn = tclient_conn()
if server_conn is None:
server_conn = tserver_conn()
if req is None:
req = treq()
if resp is True:
resp = tresp()
if err is True:
err = terr()
if ws is True:
ws = twebsocket()
assert resp is False or isinstance(resp, http.Response)
assert err is False or isinstance(err, flow.Error)
assert ws is False or isinstance(ws, websocket.WebSocketData)
f = http.HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp or None
f.error = err or None
f.websocket = ws or None
f.live = live
return f
class DummyFlow(flow.Flow):
"""A flow that is neither HTTP nor TCP."""
def __init__(self, client_conn, server_conn, live=None):
super().__init__("dummy", client_conn, server_conn, live)
def tdummyflow(client_conn=True, server_conn=True, err=None) -> DummyFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if err is True:
err = terr()
f = DummyFlow(client_conn, server_conn)
f.error = err
f.live = True
return f
def tclient_conn() -> connection.Client:
c = connection.Client.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
mitmcert=None,
tls_established=True,
timestamp_start=946681200,
timestamp_tls_setup=946681201,
timestamp_end=946681206,
sni="address",
cipher_name="cipher",
alpn=b"http/1.1",
tls_version="TLSv1.2",
tls_extensions=[(0x00, bytes.fromhex("000e00000b6578616d"))],
state=0,
sockname=("", 0),
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_list=[],
))
return c
def tserver_conn() -> connection.Server:
c = connection.Server.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),
ip_address=("192.168.0.1", 22),
timestamp_start=946681202,
timestamp_tcp_setup=946681203,
timestamp_tls_setup=946681204,
timestamp_end=946681205,
tls_established=True,
sni="address",
alpn=None,
tls_version="TLSv1.2",
via=None,
state=0,
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_name=None,
cipher_list=[],
via2=None,
))
return c
def terr(content: str = "error") -> flow.Error:
err = flow.Error(content, 946681207)
return err
def twebsocket(messages: bool = True) -> websocket.WebSocketData:
ws = websocket.WebSocketData()
if messages:
ws.messages = [
websocket.WebSocketMessage(Opcode.BINARY, True, b"hello binary", 946681203),
websocket.WebSocketMessage(Opcode.TEXT, True, b"hello text", 946681204),
websocket.WebSocketMessage(Opcode.TEXT, False, b"it's me", 946681205),
]
ws.close_reason = "Close Reason"
ws.close_code = 1000
ws.closed_by_client = False
ws.timestamp_end = 946681205
return ws
| twebsocketflow | identifier_name |
tflow.py | import uuid
from typing import Optional, Union
from mitmproxy import connection
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import tcp
from mitmproxy import websocket
from mitmproxy.test.tutils import treq, tresp
from wsproto.frame_protocol import Opcode
def ttcpflow(client_conn=True, server_conn=True, messages=True, err=None) -> tcp.TCPFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if messages is True:
messages = [
tcp.TCPMessage(True, b"hello", 946681204.2),
tcp.TCPMessage(False, b"it's me", 946681204.5),
]
if err is True:
err = terr()
f = tcp.TCPFlow(client_conn, server_conn)
f.messages = messages
f.error = err
f.live = True
return f
def twebsocketflow(messages=True, err=None, close_code=None, close_reason='') -> http.HTTPFlow:
flow = http.HTTPFlow(tclient_conn(), tserver_conn())
flow.request = http.Request(
"example.com",
80,
b"GET", | b"http",
b"example.com",
b"/ws",
b"HTTP/1.1",
headers=http.Headers(
connection="upgrade",
upgrade="websocket",
sec_websocket_version="13",
sec_websocket_key="1234",
),
content=b'',
trailers=None,
timestamp_start=946681200,
timestamp_end=946681201,
)
flow.response = http.Response(
b"HTTP/1.1",
101,
reason=b"Switching Protocols",
headers=http.Headers(
connection='upgrade',
upgrade='websocket',
sec_websocket_accept=b'',
),
content=b'',
trailers=None,
timestamp_start=946681202,
timestamp_end=946681203,
)
flow.websocket = twebsocket()
flow.websocket.close_reason = close_reason
if close_code is not None:
flow.websocket.close_code = close_code
else:
if err is True:
# ABNORMAL_CLOSURE
flow.websocket.close_code = 1006
else:
# NORMAL_CLOSURE
flow.websocket.close_code = 1000
flow.live = True
return flow
def tflow(
*,
client_conn: Optional[connection.Client] = None,
server_conn: Optional[connection.Server] = None,
req: Optional[http.Request] = None,
resp: Union[bool, http.Response] = False,
err: Union[bool, flow.Error] = False,
ws: Union[bool, websocket.WebSocketData] = False,
live: bool = True,
) -> http.HTTPFlow:
"""Create a flow for testing."""
if client_conn is None:
client_conn = tclient_conn()
if server_conn is None:
server_conn = tserver_conn()
if req is None:
req = treq()
if resp is True:
resp = tresp()
if err is True:
err = terr()
if ws is True:
ws = twebsocket()
assert resp is False or isinstance(resp, http.Response)
assert err is False or isinstance(err, flow.Error)
assert ws is False or isinstance(ws, websocket.WebSocketData)
f = http.HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp or None
f.error = err or None
f.websocket = ws or None
f.live = live
return f
class DummyFlow(flow.Flow):
"""A flow that is neither HTTP nor TCP."""
def __init__(self, client_conn, server_conn, live=None):
super().__init__("dummy", client_conn, server_conn, live)
def tdummyflow(client_conn=True, server_conn=True, err=None) -> DummyFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if err is True:
err = terr()
f = DummyFlow(client_conn, server_conn)
f.error = err
f.live = True
return f
def tclient_conn() -> connection.Client:
c = connection.Client.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
mitmcert=None,
tls_established=True,
timestamp_start=946681200,
timestamp_tls_setup=946681201,
timestamp_end=946681206,
sni="address",
cipher_name="cipher",
alpn=b"http/1.1",
tls_version="TLSv1.2",
tls_extensions=[(0x00, bytes.fromhex("000e00000b6578616d"))],
state=0,
sockname=("", 0),
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_list=[],
))
return c
def tserver_conn() -> connection.Server:
c = connection.Server.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),
ip_address=("192.168.0.1", 22),
timestamp_start=946681202,
timestamp_tcp_setup=946681203,
timestamp_tls_setup=946681204,
timestamp_end=946681205,
tls_established=True,
sni="address",
alpn=None,
tls_version="TLSv1.2",
via=None,
state=0,
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_name=None,
cipher_list=[],
via2=None,
))
return c
def terr(content: str = "error") -> flow.Error:
err = flow.Error(content, 946681207)
return err
def twebsocket(messages: bool = True) -> websocket.WebSocketData:
ws = websocket.WebSocketData()
if messages:
ws.messages = [
websocket.WebSocketMessage(Opcode.BINARY, True, b"hello binary", 946681203),
websocket.WebSocketMessage(Opcode.TEXT, True, b"hello text", 946681204),
websocket.WebSocketMessage(Opcode.TEXT, False, b"it's me", 946681205),
]
ws.close_reason = "Close Reason"
ws.close_code = 1000
ws.closed_by_client = False
ws.timestamp_end = 946681205
return ws | random_line_split | |
num.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `Finite<T>` struct.
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use num_traits::Float;
use std::default::Default;
use std::ops::Deref;
/// Encapsulates the IDL restricted float type.
#[derive(Clone, Copy, Eq, JSTraceable, PartialEq)]
pub struct Finite<T: Float>(T); | impl<T: Float> Finite<T> {
/// Create a new `Finite<T: Float>` safely.
pub fn new(value: T) -> Option<Finite<T>> {
if value.is_finite() {
Some(Finite(value))
} else {
None
}
}
/// Create a new `Finite<T: Float>`.
#[inline]
pub fn wrap(value: T) -> Finite<T> {
assert!(value.is_finite(),
"Finite<T> doesn't encapsulate unrestricted value.");
Finite(value)
}
}
impl<T: Float> Deref for Finite<T> {
type Target = T;
fn deref(&self) -> &T {
let &Finite(ref value) = self;
value
}
}
impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
(**self).size_of(ops)
}
}
impl<T: Float + Default> Default for Finite<T> {
fn default() -> Finite<T> {
Finite::wrap(T::default())
}
} | random_line_split | |
num.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `Finite<T>` struct.
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use num_traits::Float;
use std::default::Default;
use std::ops::Deref;
/// Encapsulates the IDL restricted float type.
#[derive(Clone, Copy, Eq, JSTraceable, PartialEq)]
pub struct | <T: Float>(T);
impl<T: Float> Finite<T> {
/// Create a new `Finite<T: Float>` safely.
pub fn new(value: T) -> Option<Finite<T>> {
if value.is_finite() {
Some(Finite(value))
} else {
None
}
}
/// Create a new `Finite<T: Float>`.
#[inline]
pub fn wrap(value: T) -> Finite<T> {
assert!(value.is_finite(),
"Finite<T> doesn't encapsulate unrestricted value.");
Finite(value)
}
}
impl<T: Float> Deref for Finite<T> {
type Target = T;
fn deref(&self) -> &T {
let &Finite(ref value) = self;
value
}
}
impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
(**self).size_of(ops)
}
}
impl<T: Float + Default> Default for Finite<T> {
fn default() -> Finite<T> {
Finite::wrap(T::default())
}
}
| Finite | identifier_name |
num.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `Finite<T>` struct.
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use num_traits::Float;
use std::default::Default;
use std::ops::Deref;
/// Encapsulates the IDL restricted float type.
#[derive(Clone, Copy, Eq, JSTraceable, PartialEq)]
pub struct Finite<T: Float>(T);
impl<T: Float> Finite<T> {
/// Create a new `Finite<T: Float>` safely.
pub fn new(value: T) -> Option<Finite<T>> {
if value.is_finite() {
Some(Finite(value))
} else {
None
}
}
/// Create a new `Finite<T: Float>`.
#[inline]
pub fn wrap(value: T) -> Finite<T> |
}
impl<T: Float> Deref for Finite<T> {
type Target = T;
fn deref(&self) -> &T {
let &Finite(ref value) = self;
value
}
}
impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
(**self).size_of(ops)
}
}
impl<T: Float + Default> Default for Finite<T> {
fn default() -> Finite<T> {
Finite::wrap(T::default())
}
}
| {
assert!(value.is_finite(),
"Finite<T> doesn't encapsulate unrestricted value.");
Finite(value)
} | identifier_body |
num.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `Finite<T>` struct.
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use num_traits::Float;
use std::default::Default;
use std::ops::Deref;
/// Encapsulates the IDL restricted float type.
#[derive(Clone, Copy, Eq, JSTraceable, PartialEq)]
pub struct Finite<T: Float>(T);
impl<T: Float> Finite<T> {
/// Create a new `Finite<T: Float>` safely.
pub fn new(value: T) -> Option<Finite<T>> {
if value.is_finite() {
Some(Finite(value))
} else |
}
/// Create a new `Finite<T: Float>`.
#[inline]
pub fn wrap(value: T) -> Finite<T> {
assert!(value.is_finite(),
"Finite<T> doesn't encapsulate unrestricted value.");
Finite(value)
}
}
impl<T: Float> Deref for Finite<T> {
type Target = T;
fn deref(&self) -> &T {
let &Finite(ref value) = self;
value
}
}
impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
(**self).size_of(ops)
}
}
impl<T: Float + Default> Default for Finite<T> {
fn default() -> Finite<T> {
Finite::wrap(T::default())
}
}
| {
None
} | conditional_block |
cartoonmad.py | """The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
import re
from urllib.parse import parse_qsl
from cmdlr.analyzer import BaseAnalyzer
from cmdlr.autil import fetch
class Analyzer(BaseAnalyzer):
"""The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
entry_patterns = [
re.compile(
r'^https?://(?:www.)?cartoonmad.com/comic/(\d+)(?:\.html)?$'
),
]
def entry_normalizer(self, url):
"""Normalize all possible entry url to single one form."""
match = self.entry_patterns[0].search(url)
id = match.group(1)
return 'https://www.cartoonmad.com/comic/{}.html'.format(id)
@staticmethod
def __extract_name(fetch_result):
return fetch_result.soup.title.string.split(' - ')[0]
@staticmethod
def __extract_volumes(fetch_result):
a_tags = (fetch_result.soup
.find('legend', string=re.compile('漫畫線上觀看'))
.parent
.find_all(href=re.compile(r'^/comic/')))
return {a.string: fetch_result.absurl(a.get('href'))
for a in a_tags}
@staticmethod
def __extract_finished(fetch_result):
return (True
if fetch_result.soup.find('img', src='/image/chap9.gif')
else False)
@staticmethod
def __extract_description(fetch_result):
return (fetch_result.soup
.find('fieldset', id='info').td.get_text().strip())
@staticmethod
def __extract_authors(fetch_result):
return [fetch_result.soup
.find(string=re.compile('作者:'))
.string.split(':')[1].strip()]
async def get_comic_info(self, url, request, **unused):
"""Get comic info."""
fetch_result = await fetch(url, request, encoding='big5')
return {
'name': self.__extract_name(fetch_result),
'volumes': self.__extract_volumes(fetch_result),
'description': self.__extract_description(fetch_result),
'authors': self.__extract_authors(fetch_result),
'finished': self.__extract_finished(fetch_result),
}
@staticmethod
def __get_imgurl_func(soup, absurl):
# print(soup.find('img', src=re.compile('comicpic.asp')))
src = soup.find('img', src=re.compile(r'comicpic.asp'))['src']
abspath, qs_string = absurl(src).split('?', maxsplit=1)
qs = dict(parse_qsl(qs_string))
file_parts = qs['file'].split('/')
file_parts[-1] = '{:0>3}'
qs['file'] = '/'.join(file_parts)
qs_tpl = '&'.join(['{}={}'.format(key, value)
for key, value in qs.items()])
abspath_tpl = '{}?{}'.format(abspath, qs_tpl)
def get_imgurl(page_number):
return abspath_tpl.format(page_number)
return get_imgurl
async def save_volume_images(self, url, request, save_image, **unused):
"""Get all images in | one volume."""
soup, absurl = await fetch(url, request, encoding='big5')
get_img_url = self.__get_imgurl_func(soup, absurl)
page_count = len(soup.find_all('option', value=True))
for page_num in range(1, page_count + 1):
save_image(
page_num,
url=get_img_url(page_num),
headers={'Referer': url},
)
| identifier_body | |
cartoonmad.py | """The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
import re
from urllib.parse import parse_qsl
from cmdlr.analyzer import BaseAnalyzer
from cmdlr.autil import fetch
class Analyzer(BaseAnalyzer):
"""The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
entry_patterns = [
re.compile(
r'^https?://(?:www.)?cartoonmad.com/comic/(\d+)(?:\.html)?$'
),
]
def entry_normalizer(self, url):
"""Normalize all possible entry url to single one form."""
match = self.entry_patterns[0].search(url)
id = match.group(1)
return 'https://www.cartoonmad.com/comic/{}.html'.format(id)
@staticmethod
def __extract_name(fetch_result):
return fetch_result.soup.title.string.split(' - ')[0]
@staticmethod
def __extract_volumes(fetch_result):
a_tags = (fetch_result.soup
.find('legend', string=re.compile('漫畫線上觀看'))
.parent
.find_all(href=re.compile(r'^/comic/')))
return {a.string: fetch_result.absurl(a.get('href'))
for a in a_tags}
@staticmethod
def __extract_finished(fetch_result):
return (True
if fetch_result.soup.find('img', src='/image/chap9.gif')
else False)
@staticmethod
def __extract_description(fetch_result):
return (fetch_result.soup
.find('fieldset', id='info').td.get_text().strip())
@staticmethod
def __extract_authors(fetch_result):
return [fetch_result.soup
.find(string=re.compile('作者:'))
.string.split(':')[1].strip()]
async def get_comic_info(self, url, request, **unused):
"""Get comic info."""
fetch_result = await fetch(url, request, encoding='big5')
return {
'name': self.__extract_name(fetch_result),
'volumes': self.__extract_volumes(fetch_result),
'description': self.__extract_description(fetch_result),
'authors': self.__extract_authors(fetch_result),
'finished': self.__extract_finished(fetch_result),
}
@staticmethod
def __get_imgurl_func(soup, absurl):
# print(soup.find('img', src=re.compile('comicpic.asp')))
src = soup.find('img', src=re.compile(r'comicpic.asp'))['src']
abspath, qs_string = absurl(src).split('?', maxsplit=1)
qs = dict(parse_qsl(qs_string))
file_parts = qs['file'].split('/')
file_parts[-1] = '{:0>3}'
qs['file'] = '/'.join(file_parts)
qs_tpl = '&'.join(['{}={}'.format(key, value)
for key, value in qs.items()])
abspath_tpl = '{}?{}'.format(abspath, qs_tpl)
def get_imgurl(page_number):
return abspath_tpl.format(page_number)
return get_imgurl
async def save_volume_images(self, url, request, save_image, **unused):
"""Get all images in one volume."""
soup, absurl = await fetch(url, request, encoding='big5')
get_img_url = self.__get_imgurl_func(soup, absurl)
page_count = len(soup.find_all('option', value=True))
for page_num in range(1, page_count + 1):
save_image(
| page_num,
url=get_img_url(page_num),
headers={'Referer': url},
)
| conditional_block | |
cartoonmad.py | """The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
import re
from urllib.parse import parse_qsl
from cmdlr.analyzer import BaseAnalyzer
from cmdlr.autil import fetch
class Analyzer(BaseAnalyzer):
"""The www.cartoonmad.com analyzer. | [Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
entry_patterns = [
re.compile(
r'^https?://(?:www.)?cartoonmad.com/comic/(\d+)(?:\.html)?$'
),
]
def entry_normalizer(self, url):
"""Normalize all possible entry url to single one form."""
match = self.entry_patterns[0].search(url)
id = match.group(1)
return 'https://www.cartoonmad.com/comic/{}.html'.format(id)
@staticmethod
def __extract_name(fetch_result):
return fetch_result.soup.title.string.split(' - ')[0]
@staticmethod
def __extract_volumes(fetch_result):
a_tags = (fetch_result.soup
.find('legend', string=re.compile('漫畫線上觀看'))
.parent
.find_all(href=re.compile(r'^/comic/')))
return {a.string: fetch_result.absurl(a.get('href'))
for a in a_tags}
@staticmethod
def __extract_finished(fetch_result):
return (True
if fetch_result.soup.find('img', src='/image/chap9.gif')
else False)
@staticmethod
def __extract_description(fetch_result):
return (fetch_result.soup
.find('fieldset', id='info').td.get_text().strip())
@staticmethod
def __extract_authors(fetch_result):
return [fetch_result.soup
.find(string=re.compile('作者:'))
.string.split(':')[1].strip()]
async def get_comic_info(self, url, request, **unused):
"""Get comic info."""
fetch_result = await fetch(url, request, encoding='big5')
return {
'name': self.__extract_name(fetch_result),
'volumes': self.__extract_volumes(fetch_result),
'description': self.__extract_description(fetch_result),
'authors': self.__extract_authors(fetch_result),
'finished': self.__extract_finished(fetch_result),
}
@staticmethod
def __get_imgurl_func(soup, absurl):
# print(soup.find('img', src=re.compile('comicpic.asp')))
src = soup.find('img', src=re.compile(r'comicpic.asp'))['src']
abspath, qs_string = absurl(src).split('?', maxsplit=1)
qs = dict(parse_qsl(qs_string))
file_parts = qs['file'].split('/')
file_parts[-1] = '{:0>3}'
qs['file'] = '/'.join(file_parts)
qs_tpl = '&'.join(['{}={}'.format(key, value)
for key, value in qs.items()])
abspath_tpl = '{}?{}'.format(abspath, qs_tpl)
def get_imgurl(page_number):
return abspath_tpl.format(page_number)
return get_imgurl
async def save_volume_images(self, url, request, save_image, **unused):
"""Get all images in one volume."""
soup, absurl = await fetch(url, request, encoding='big5')
get_img_url = self.__get_imgurl_func(soup, absurl)
page_count = len(soup.find_all('option', value=True))
for page_num in range(1, page_count + 1):
save_image(
page_num,
url=get_img_url(page_num),
headers={'Referer': url},
) | random_line_split | |
cartoonmad.py | """The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
import re
from urllib.parse import parse_qsl
from cmdlr.analyzer import BaseAnalyzer
from cmdlr.autil import fetch
class Analyzer(BaseAnalyzer):
"""The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
entry_patterns = [
re.compile(
r'^https?://(?:www.)?cartoonmad.com/comic/(\d+)(?:\.html)?$'
),
]
def entry_normalizer(self, url):
"""Normalize all possible entry url to single one form."""
match = self.entry_patterns[0].search(url)
id = match.group(1)
return 'https://www.cartoonmad.com/comic/{}.html'.format(id)
@staticmethod
def __extract_name(fetch_result):
return fetch_result.soup.title.string.split(' - ')[0]
@staticmethod
def __extract_volumes(fetch_result):
a_tags = (fetch_result.soup
.find('legend', string=re.compile('漫畫線上觀看'))
.parent
.find_all(href=re.compile(r'^/comic/')))
return {a.string: fetch_result.absurl(a.get('href'))
for a in a_tags}
@staticmethod
def __extract_finished(fetch_result):
return (True
if fetch_result.soup.find('img', src='/image/chap9.gif')
else False)
@staticmethod
def __extract_description(fetch_result):
return (fetch_result.soup
.find('fieldset', id='info').td.get_text().strip())
@staticmethod
def __extract_authors(fetch_result):
return [fetch_result.soup
.find(string=re.compile('作者:'))
.string.split(':')[1].strip()]
async def get_comic_info(self, | **unused):
"""Get comic info."""
fetch_result = await fetch(url, request, encoding='big5')
return {
'name': self.__extract_name(fetch_result),
'volumes': self.__extract_volumes(fetch_result),
'description': self.__extract_description(fetch_result),
'authors': self.__extract_authors(fetch_result),
'finished': self.__extract_finished(fetch_result),
}
@staticmethod
def __get_imgurl_func(soup, absurl):
# print(soup.find('img', src=re.compile('comicpic.asp')))
src = soup.find('img', src=re.compile(r'comicpic.asp'))['src']
abspath, qs_string = absurl(src).split('?', maxsplit=1)
qs = dict(parse_qsl(qs_string))
file_parts = qs['file'].split('/')
file_parts[-1] = '{:0>3}'
qs['file'] = '/'.join(file_parts)
qs_tpl = '&'.join(['{}={}'.format(key, value)
for key, value in qs.items()])
abspath_tpl = '{}?{}'.format(abspath, qs_tpl)
def get_imgurl(page_number):
return abspath_tpl.format(page_number)
return get_imgurl
async def save_volume_images(self, url, request, save_image, **unused):
"""Get all images in one volume."""
soup, absurl = await fetch(url, request, encoding='big5')
get_img_url = self.__get_imgurl_func(soup, absurl)
page_count = len(soup.find_all('option', value=True))
for page_num in range(1, page_count + 1):
save_image(
page_num,
url=get_img_url(page_num),
headers={'Referer': url},
)
| url, request, | identifier_name |
alloc_support.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Support for the `alloc` crate, when available.
use core::mem::MaybeUninit;
use core::pin::Pin;
use alloc::boxed::Box;
use alloc::rc::Rc;
use alloc::sync::Arc;
use crate::move_ref::DerefMove;
use crate::move_ref::MoveRef;
use crate::new::EmplaceUnpinned;
use crate::new::TryNew;
use crate::slot::DroppingSlot;
unsafe impl<T> DerefMove for Box<T> {
type Storage = Box<MaybeUninit<T>>;
#[inline]
fn | <'frame>(
self,
storage: DroppingSlot<'frame, Self::Storage>,
) -> MoveRef<'frame, Self::Target>
where
Self: 'frame,
{
let cast =
unsafe { Box::from_raw(Box::into_raw(self).cast::<MaybeUninit<T>>()) };
let (storage, drop_flag) = storage.put(cast);
unsafe { MoveRef::new_unchecked(storage.assume_init_mut(), drop_flag) }
}
}
impl<T> EmplaceUnpinned<T> for Pin<Box<T>> {
fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> {
let mut uninit = Box::new(MaybeUninit::<T>::uninit());
unsafe {
let pinned = Pin::new_unchecked(&mut *uninit);
n.try_new(pinned)?;
Ok(Pin::new_unchecked(Box::from_raw(
Box::into_raw(uninit).cast::<T>(),
)))
}
}
}
impl<T> EmplaceUnpinned<T> for Pin<Rc<T>> {
fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> {
let uninit = Rc::new(MaybeUninit::<T>::uninit());
unsafe {
let pinned = Pin::new_unchecked(&mut *(Rc::as_ptr(&uninit) as *mut _));
n.try_new(pinned)?;
Ok(Pin::new_unchecked(Rc::from_raw(
Rc::into_raw(uninit).cast::<T>(),
)))
}
}
}
impl<T> EmplaceUnpinned<T> for Pin<Arc<T>> {
fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> {
let uninit = Arc::new(MaybeUninit::<T>::uninit());
unsafe {
let pinned = Pin::new_unchecked(&mut *(Arc::as_ptr(&uninit) as *mut _));
n.try_new(pinned)?;
Ok(Pin::new_unchecked(Arc::from_raw(
Arc::into_raw(uninit).cast::<T>(),
)))
}
}
}
| deref_move | identifier_name |
alloc_support.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Support for the `alloc` crate, when available.
use core::mem::MaybeUninit;
use core::pin::Pin;
use alloc::boxed::Box;
use alloc::rc::Rc;
use alloc::sync::Arc;
use crate::move_ref::DerefMove;
use crate::move_ref::MoveRef;
use crate::new::EmplaceUnpinned;
use crate::new::TryNew;
use crate::slot::DroppingSlot;
unsafe impl<T> DerefMove for Box<T> {
type Storage = Box<MaybeUninit<T>>;
#[inline]
fn deref_move<'frame>(
self,
storage: DroppingSlot<'frame, Self::Storage>,
) -> MoveRef<'frame, Self::Target>
where
Self: 'frame,
{
let cast =
unsafe { Box::from_raw(Box::into_raw(self).cast::<MaybeUninit<T>>()) };
let (storage, drop_flag) = storage.put(cast);
unsafe { MoveRef::new_unchecked(storage.assume_init_mut(), drop_flag) }
}
}
impl<T> EmplaceUnpinned<T> for Pin<Box<T>> {
fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> {
let mut uninit = Box::new(MaybeUninit::<T>::uninit());
unsafe {
let pinned = Pin::new_unchecked(&mut *uninit);
n.try_new(pinned)?;
Ok(Pin::new_unchecked(Box::from_raw(
Box::into_raw(uninit).cast::<T>(),
)))
}
}
}
impl<T> EmplaceUnpinned<T> for Pin<Rc<T>> {
fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> {
let uninit = Rc::new(MaybeUninit::<T>::uninit());
unsafe {
let pinned = Pin::new_unchecked(&mut *(Rc::as_ptr(&uninit) as *mut _));
n.try_new(pinned)?;
Ok(Pin::new_unchecked(Rc::from_raw(
Rc::into_raw(uninit).cast::<T>(),
)))
}
}
}
impl<T> EmplaceUnpinned<T> for Pin<Arc<T>> {
fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> |
}
| {
let uninit = Arc::new(MaybeUninit::<T>::uninit());
unsafe {
let pinned = Pin::new_unchecked(&mut *(Arc::as_ptr(&uninit) as *mut _));
n.try_new(pinned)?;
Ok(Pin::new_unchecked(Arc::from_raw(
Arc::into_raw(uninit).cast::<T>(),
)))
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.