prompt large_stringlengths 70 991k | completion large_stringlengths 0 1.02k |
|---|---|
<|file_name|>mac_tray.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# coding:utf-8
import os
import sys
current_path = os.path.dirname(os.path.abspath(__file__))
helper_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, os.pardir, 'data', 'launcher', 'helper'))
if __name__ == "__main__":
default_path = os.path.abspath(os.path.join(current_path, os.pardir))
noarch_lib = os.path.abspath(os.path.join(default_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
osx_lib = os.path.join(default_path, 'lib', 'darwin')
sys.path.append(osx_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/3.8/Extras/lib/python/PyObjC"
sys.path.append(extra_lib)
from config import config
import module_init
import subprocess
import webbrowser
from xlog import getLogger
xlog = getLogger("launcher")
import AppKit
import SystemConfiguration
from PyObjCTools import AppHelper
class MacTrayObject(AppKit.NSObject):
def __init__(self):
pass
def applicationDidFinishLaunching_(self, notification):
setupHelper()
loadConfig()
self.setupUI()
self.registerObserver()
def setupUI(self):
self.statusbar = AppKit.NSStatusBar.systemStatusBar()
self.statusitem = self.statusbar.statusItemWithLength_(
AppKit.NSSquareStatusItemLength) # NSSquareStatusItemLength #NSVariableStatusItemLength
# Set initial image icon
icon_path = os.path.join(current_path, "web_ui", "favicon-mac.ico")
image = AppKit.NSImage.alloc().initByReferencingFile_(icon_path)
image.setScalesWhenResized_(True)
image.setSize_((20, 20))
self.statusitem.setImage_(image)
# Let it highlight upon clicking
self.statusitem.setHighlightMode_(1)
self.statusitem.setToolTip_("XX-Net")
# Get current selected mode
proxyState = getProxyState(currentService)
# Build a very simple menu
self.menu = AppKit.NSMenu.alloc().initWithTitle_('XX-Net')
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Config', 'config:', '')
self.menu.addItem_(menuitem)
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(getCurrentServiceMenuItemTitle(), None, '')
self.menu.addItem_(menuitem)
self.currentServiceMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Auto GAEProxy',
'enableAutoProxy:', '')
if proxyState == 'pac':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.autoGaeProxyMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global GAEProxy',
'enableGlobalProxy:', '')
if proxyState == 'gae':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalGaeProxyMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global X-Tunnel',
'enableGlobalXTunnel:', '')
if proxyState == 'x_tunnel':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalXTunnelMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global Smart-Router',
'enableGlobalSmartRouter:', '')
if proxyState == 'smart_router':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalSmartRouterMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Disable GAEProxy', 'disableProxy:',
'')
if proxyState == 'disable':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.disableGaeProxyMenuItem = menuitem
# Reset Menu Item
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Reset Each Module',
'restartEachModule:', '')
self.menu.addItem_(menuitem)
# Default event
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'windowWillClose:', '')
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
# Hide dock icon
AppKit.NSApp.setActivationPolicy_(AppKit.NSApplicationActivationPolicyProhibited)
def updateStatusBarMenu(self):
self.currentServiceMenuItem.setTitle_(getCurrentServiceMenuItemTitle())
# Remove Tick before All Menu Items
self.autoGaeProxyMenuItem.setState_(AppKit.NSOffState)
self.globalGaeProxyMenuItem.setState_(AppKit.NSOffState)
self.globalXTunnelMenuItem.setState_(AppKit.NSOffState)
self.globalSmartRouterMenuItem.setState_(AppKit.NSOffState)
self.disableGaeProxyMenuItem.setState_(AppKit.NSOffState)
# Get current selected mode
proxyState = getProxyState(currentService)
# Update Tick before Menu Item
if proxyState == 'pac':
self.autoGaeProxyMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'gae':
self.globalGaeProxyMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'x_tunnel':
self.globalXTunnelMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'smart_router':
self.globalSmartRouterMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'disable':
self.disableGaeProxyMenuItem.setState_(AppKit.NSOnState)
# Trigger autovalidation
self.menu.update()
def validateMenuItem_(self, menuItem):
return currentService or (menuItem != self.autoGaeProxyMenuItem and
menuItem != self.globalGaeProxyMenuItem and
menuItem != self.globalXTunnelMenuItem and
menuItem != self.globalSmartRouterMenuItem and
menuItem != self.disableGaeProxyMenuItem)
def presentAlert_withTitle_(self, msg, title):
self.performSelectorOnMainThread_withObject_waitUntilDone_('presentAlertWithInfo:', [title, msg], True)
return self.alertReturn
def presentAlertWithInfo_(self, info):
alert = AppKit.NSAlert.alloc().init()
alert.setMessageText_(info[0])
alert.setInformativeText_(info[1])
alert.addButtonWithTitle_("OK")
alert.addButtonWithTitle_("Cancel")
self.alertReturn = alert.runModal() == AppKit.NSAlertFirstButtonReturn
def registerObserver(self):
nc = AppKit.NSWorkspace.sharedWorkspace().notificationCenter()
nc.addObserver_selector_name_object_(self, 'windowWillClose:', AppKit.NSWorkspaceWillPowerOffNotification, None)
def windowWillClose_(self, notification):
executeResult = subprocess.check_output(['networksetup', '-listallnetworkservices'])
services = executeResult.split(b'\n')
services = [service for service in services if service and service.find(b'*') == -1 and getProxyState(
service) != 'disable'] # Remove disabled services and empty lines
if len(services) > 0:
try:
list(map(helperDisableAutoProxy, services))
list(map(helperDisableGlobalProxy, services))
except:
disableAutoProxyCommand = ';'.join(map(getDisableAutoProxyCommand, services))
disableGlobalProxyCommand = ';'.join(map(getDisableGlobalProxyCommand, services))
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, disableGlobalProxyCommand)
xlog.info("try disable proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
module_init.stop_all()
os._exit(0)
AppKit.NSApp.terminate_(self)
def config_(self, notification):
host_port = config.control_port
webbrowser.open_new("http://127.0.0.1:%s/" % host_port)
def restartEachModule_(self, _):
module_init.stop_all()
module_init.start_all_auto()
def enableAutoProxy_(self, _):
try:
helperDisableGlobalProxy(currentService)
helperEnableAutoProxy(currentService)
except:
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
enableAutoProxyCommand = getEnableAutoProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableGlobalProxyCommand, enableAutoProxyCommand)
xlog.info("try enable auto proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "pac"
config.save()
self.updateStatusBarMenu()
def enableGlobalProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableGlobalProxyCommand = getEnableGlobalProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, enableGlobalProxyCommand)
xlog.info("try enable global proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "gae"
config.save()
self.updateStatusBarMenu()
def enableGlobalXTunnel_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableXTunnelProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableXTunnelProxyCommand = getEnableXTunnelProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, enableXTunnelProxyCommand)
xlog.info("try enable global x-tunnel proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "x_tunnel"
config.save()
self.updateStatusBarMenu()
def enableGlobalSmartRouter_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableSmartRouterProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableSmartRouterCommand = getEnableSmartRouterProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, enableSmartRouterCommand)
xlog.info("try enable global smart-router proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "smart_router"
config.save()
self.updateStatusBarMenu()
def disableProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, disableGlobalProxyCommand)
xlog.info("try disable proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "disable"
config.save()
self.updateStatusBarMenu()
def setupHelper():
try:
with open(os.devnull) as devnull:
subprocess.check_call(helper_path, stderr=devnull)
except:
rmCommand = "rm \\\"%s\\\"" % helper_path
cpCommand = "cp \\\"%s\\\" \\\"%s\\\"" % (os.path.join(current_path, 'mac_helper'), helper_path)
chownCommand = "chown root \\\"%s\\\"" % helper_path
chmodCommand = "chmod 4755 \\\"%s\\\"" % helper_path
executeCommand = 'do shell script "%s;%s;%s;%s" with administrator privileges' % (
rmCommand, cpCommand, chownCommand, chmodCommand)
xlog.info("try setup helper:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
def getCurrentServiceMenuItemTitle():
if currentService:
return 'Connection: %s' % currentService
else:
return 'Connection: None'
def getProxyState(service):
if not service:
return
# Check if auto proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getautoproxyurl', service])
if (executeResult.find(b'http://127.0.0.1:8086/proxy.pac\nEnabled: Yes') != -1):
return "pac"
# Check if global proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getwebproxy', service])
if (executeResult.find(b'Enabled: Yes\nServer: 127.0.0.1\nPort: 8087') != -1):
return "gae"
# Check if global proxy is enabled
if (executeResult.find(b'Enabled: Yes\nServer: 127.0.0.1\nPort: 1080') != -1):
return "x_tunnel"
if (executeResult.find(b'Enabled: Yes\nServer: 127.0.0.1\nPort: 8086') != -1):
return "smart_router"
return "disable"
# Generate commands for Apple Script
def getEnableAutoProxyCommand(service):
return "networksetup -setautoproxyurl \\\"%s\\\" \\\"http://127.0.0.1:8086/proxy.pac\\\"" % service
def getDisableAutoProxyCommand(service):
return "networksetup -setautoproxystate \\\"%s\\\" off" % service
def getEnableGlobalProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 8087" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 8087" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getEnableXTunnelProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 1080" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 1080" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getEnableSmartRouterProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 8086" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 8086" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getDisableGlobalProxyCommand(service):
disableHttpProxyCommand = "networksetup -setwebproxystate \\\"%s\\\" off" % service
disableHttpsProxyCommand = "networksetup -setsecurewebproxystate \\\"%s\\\" off" % service
return "%s;%s" % (disableHttpProxyCommand, disableHttpsProxyCommand)
# Call helper
def helperEnableAutoProxy(service):
subprocess.check_call([helper_path, 'enableauto', service, 'http://127.0.0.1:8086/proxy.pac'])
def helperDisableAutoProxy(service):
subprocess.check_call([helper_path, 'disableauto', service])
def helperEnableGlobalProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '8087'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '8087'])
def helperEnableXTunnelProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '1080'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '1080'])
def helperEnableSmartRouterProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '8086'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '8086'])
def helperDisableGlobalProxy(service):
subprocess.check_call([helper_path, 'disablehttp', service])
subprocess.check_call([helper_path, 'disablehttps', service])
def loadConfig():
if not currentService:
return
proxy_setting = config.os_proxy_mode
if getProxyState(currentService) == proxy_setting:
return
try:
if proxy_setting == "pac":
helperDisableGlobalProxy(currentService)
helperEnableAutoProxy(currentService)
elif proxy_setting == "gae":
helperDisableAutoProxy(currentService)
helperEnableGlobalProxy(currentService)
elif proxy_setting == "x_tunnel":
helperDisableAutoProxy(currentService)
helperEnableXTunnelProxy(currentService)
elif proxy_setting == "smart_router":
helperDisableAutoProxy(currentService)
helperEnableSmartRouterProxy(currentService)<|fim▁hole|> helperDisableGlobalProxy(currentService)
else:
xlog.warn("proxy_setting:%r", proxy_setting)
except:
xlog.warn("helper failed, please manually reset proxy settings after switching connection")
sys_tray = MacTrayObject.alloc().init()
currentService = None
def fetchCurrentService(protocol):
global currentService
status = SystemConfiguration.SCDynamicStoreCopyValue(None, "State:/Network/Global/" + protocol)
if not status:
currentService = None
return
serviceID = status['PrimaryService']
service = SystemConfiguration.SCDynamicStoreCopyValue(None, "Setup:/Network/Service/" + serviceID)
if not service:
currentService = None
return
currentService = service['UserDefinedName']
@AppKit.objc.callbackFor(AppKit.CFNotificationCenterAddObserver)
def networkChanged(center, observer, name, object, userInfo):
fetchCurrentService('IPv4')
loadConfig()
sys_tray.updateStatusBarMenu()
# Note: the following code can't run in class
def serve_forever():
app = AppKit.NSApplication.sharedApplication()
app.setDelegate_(sys_tray)
# Listen for network change
nc = AppKit.CFNotificationCenterGetDarwinNotifyCenter()
AppKit.CFNotificationCenterAddObserver(nc, None, networkChanged, "com.apple.system.config.network_change", None,
AppKit.CFNotificationSuspensionBehaviorDeliverImmediately)
fetchCurrentService('IPv4')
AppHelper.runEventLoop()
def on_quit(widget=None, data=None):
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
def main():
serve_forever()
if __name__ == '__main__':
main()<|fim▁end|> | elif proxy_setting == "disable":
helperDisableAutoProxy(currentService) |
<|file_name|>productIdentification-edit.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnDestroy, OnInit, Self, Inject } from '@angular/core';
import { MAT_DIALOG_DATA, MatDialogRef } from '@angular/material/dialog';
import { Subscription, combineLatest } from 'rxjs';
import { switchMap, map } from 'rxjs/operators';
import { ContextService, MetaService, RefreshService, Saved } from '@allors/angular/services/core';
import { ProductIdentification, ProductIdentificationType } from '@allors/domain/generated';
import { PullRequest } from '@allors/protocol/system';
import { Meta } from '@allors/meta/generated';
import { SaveService, ObjectData } from '@allors/angular/material/services/core';
import { IObject, ISessionObject } from '@allors/domain/system';
import { Equals, Sort } from '@allors/data/system';
import { TestScope } from '@allors/angular/core';
@Component({
templateUrl: './productidentification-edit.component.html',
providers: [ContextService]
})
export class ProductIdentificationEditComponent extends TestScope implements OnInit, OnDestroy {
public m: Meta;
public title = 'Edit IGood Identification';
public container: ISessionObject;
public object: ProductIdentification;
public productIdentificationTypes: ProductIdentificationType[];
private subscription: Subscription;
constructor(
@Self() public allors: ContextService,
@Inject(MAT_DIALOG_DATA) public data: ObjectData,
public dialogRef: MatDialogRef<ProductIdentificationEditComponent>,
public metaService: MetaService,
public refreshService: RefreshService,
private saveService: SaveService,
) {
super();
this.m = this.metaService.m;
}
public ngOnInit(): void {
<|fim▁hole|> .pipe(
switchMap(() => {
const isCreate = (this.data as IObject).id === undefined;
const { objectType, associationRoleType } = this.data;
const pulls = [
pull.ProductIdentificationType({
predicate: new Equals({ propertyType: m.ProductIdentificationType.IsActive, value: true }),
sort: [
new Sort(m.ProductIdentificationType.Name),
],
})
];
if (!isCreate) {
pulls.push(
pull.ProductIdentification(
{
object: this.data.id,
include: {
ProductIdentificationType: x,
}
}),
);
}
if (isCreate && this.data.associationId) {
pulls.push(
pull.Good({ object: this.data.associationId }),
pull.Part({ object: this.data.associationId }),
);
}
return this.allors.context.load(new PullRequest({ pulls }))
.pipe(
map((loaded) => ({ loaded, create: isCreate, objectType, associationRoleType }))
);
})
)
.subscribe(({ loaded, create, objectType, associationRoleType }) => {
this.allors.context.reset();
this.container = loaded.objects.Good || loaded.objects.Part;
this.object = loaded.objects.ProductIdentification as ProductIdentification;
this.productIdentificationTypes = loaded.collections.ProductIdentificationTypes as ProductIdentificationType[];
if (create) {
this.title = 'Add Identification';
this.object = this.allors.context.create(objectType) as ProductIdentification;
this.container.add(associationRoleType, this.object);
}
});
}
public ngOnDestroy(): void {
if (this.subscription) {
this.subscription.unsubscribe();
}
}
public save(): void {
this.allors.context.save()
.subscribe(() => {
const data: IObject = {
id: this.object.id,
objectType: this.object.objectType,
};
this.dialogRef.close(data);
this.refreshService.refresh();
},
this.saveService.errorHandler
);
}
}<|fim▁end|> | const { m, pull, x } = this.metaService;
this.subscription = combineLatest(this.refreshService.refresh$) |
<|file_name|>rsd.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2.7
'''
RSD: The reciprocal smallest distance algorithm.
Wall, D.P., Fraser, H.B. and Hirsh, A.E. (2003) Detecting putative orthologs, Bioinformatics, 19, 1710-1711.
Original author: Dennis P. Wall, Department of Biological Sciences, Stanford University.
Contributors: I-Hsien Wu, Computational Biology Initiative, Harvard Medical School
Maintainer: Todd F. DeLuca, Center for Biomedical Informatics, Harvard Medical School
This program is written to run on linux. It has not been tested on Windows.
To run this program you need to have installed on your system:
Python 2.7
NCBI BLAST 2.2.24
paml 4.4
Kalign 2.04 (recommended) or clustalw 2.0.9 (deprecated)
See README for full details.
'''
# python package version
# should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py
__version__ = '1.1.7'
import cStringIO
import glob
import logging
import os
import re
import shutil
import subprocess
import time
import fasta
import nested
import util
PAML_ERROR_MSG = 'paml_error'
FORWARD_DIRECTION = 0
REVERSE_DIRECTION = 1
DASHLEN_RE = re.compile('^(-*)(.*?)(-*)$')
MAX_HITS = 3
MATRIX_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'jones.dat')
CODEML_CONTROL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'codeml.ctl')
# Constants used when aligning seqs with clustalw. Kalign does not need these.
USE_CLUSTALW = util.getBoolFromEnv('RSD_USE_CLUSTALW', False)
CLUSTAL_INPUT_FILENAME = 'clustal_fasta.faa'
CLUSTAL_ALIGNMENT_FILENAME = 'clustal_fasta.aln'
#################
# BLAST FUNCTIONS
#################
#
# Used to compute blast hits between two genomes, parse the results, and save the best hits to a file
#
def formatForBlast(fastaPath):
# os.chdir(os.path.dirname(fastaPath))
# cmd = 'formatdb -p -o -i'+os.path.basename(fastaPath)
# cmd = 'formatdb -p -o -i'+fastaPath
# redirect stdout to /dev/null to make the command quiter.
cmd = ['makeblastdb', '-in', fastaPath, '-dbtype', 'prot', '-parse_seqids']
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull)
def getHitId(hit):
return hit[0]
def getHitEvalue(hit):
'''
returns evalue as a float
'''
return hit[1]
def loadBlastHits(path):
'''
path: location of stored blast hits computed by computeBlastHits()
returns: mapping object from query id to hits. used to be a bsddb, now is a dict.
'''
return util.loadObject(path)
def getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
blasts every sequence in query agaist subject, adding hits that are better than evalue to a list stored in a dict keyed on the query id.
'''
# work in a nested tmp dir to avoid junking up the working dir.
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
if copyToWorking:
localFastaPath = os.path.join(tmpDir, 'query.fa')
shutil.copyfile(queryFastaPath, localFastaPath)
localIndexDir = os.path.join(tmpDir, 'local_blast')
os.makedirs(localIndexDir, 0770)
localIndexPath = os.path.join(localIndexDir, os.path.basename(subjectIndexPath))
for path in glob.glob(subjectIndexPath+'*'):
if os.path.isfile:
shutil.copy(path, localIndexDir)
queryFastaPath = localFastaPath
subjectIndexPath = localIndexPath
blastResultsPath = os.path.join(tmpDir, 'blast_results')
# blast query vs subject, using /opt/blast-2.2.22/bin/blastp
cmd = ['blastp', '-outfmt', '6', '-evalue', str(evalue),
'-query', queryFastaPath, '-db', subjectIndexPath,
'-out', blastResultsPath]
subprocess.check_call(cmd)
# parse results
hitsMap = parseResults(blastResultsPath, limitHits)
return hitsMap
def computeBlastHits(queryFastaPath, subjectIndexPath, outPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
outPath: location of file where blast hits are saved.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
Runs getBlastHits() and persists the hits to outPath.
'''
hitsMap = getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits, workingDir, copyToWorking)
util.dumpObject(hitsMap, outPath)
def parseResults(blastResultsPath, limitHits=MAX_HITS):
'''
returns: a map from query seq id to a list of tuples of (subject seq id, evalue) for the top hits of the query sequence in the subject genome
'''
# parse tabular results into hits. thank you, ncbi, for creating results this easy to parse.
hitsMap = {}
hitsCountMap = {}
prevSeqId = None
prevHitId = None
fh = open(blastResultsPath)
for line in fh:
splits = line.split()
try:
seqId = fasta.idFromName(splits[0]) # remove namespace prefix, e.g. 'gi|'
hitId = fasta.idFromName(splits[1])
hitEvalue = float(splits[10])
except Exception as e:
logging.exception('parseResults(): prevSeqId: {}, prevHitId: {}, line: {}'.format(prevSeqId, prevHitId, line))
# results table reports multiple "alignments" per "hit" in ascending order by evalue
# we only store the top hits.
if prevSeqId != seqId or prevHitId != hitId:
prevSeqId = seqId
prevHitId = hitId
if seqId not in hitsCountMap:
hitsCountMap[seqId] = 0
hitsMap[seqId] = []
if not limitHits or hitsCountMap[seqId] < limitHits:
hitsCountMap[seqId] += 1
hitsMap[seqId].append((hitId, hitEvalue))
fh.close()
return hitsMap
###############
# RSD FUNCTIONS
###############
def pamlGetDistance(path):
filename = '%s/2AA.t'%path
# adding a pause on the off-chance that the filesystem might be lagging a bit, causing the open() to fail below.
# I think it is more likely that codeml in runPaml_all() is failing before writing the file.
if not os.path.isfile(filename):
time.sleep(0.5)
with open(filename) as rst:
get_rst = rst.readlines()
os.unlink(filename)
if not get_rst:
raise Exception(PAML_ERROR_MSG, path)
str = ''
for line in get_rst[1:]:
cd1 = line.split()
if not len(cd1) > 1:
str += "%s "%(line.split('\n')[0])
continue
if len(cd1) > 1:
str+="%s %s"%(cd1[0], cd1[1])
dist = float(str.split()[2])
return dist
def alignFastaKalign(input):
'''
input: string containing fasta formatted sequences to be aligned.
runs alignment program kalign
Returns: fasta-formatted aligned sequences
'''
alignedFasta = util.run(['kalign', '-f', 'fasta'], input) # output clustalw format
return alignedFasta.replace('\n\n', '\n') # replace fixes a bug in Kalign version 2.04, where if a seq is exactly 60 chars long, an extra newline is output.
def alignFastaClustalw(input, path):
'''
input: string containing fasta formatted sequences to be aligned.
path: working directory where fasta will be written and clustal will write output files.
runs alignment program clustalw
Returns: fasta-formatted aligned sequences
'''
clustalFastaPath = os.path.join(path, CLUSTAL_INPUT_FILENAME)
clustalAlignmentPath = os.path.join(path, CLUSTAL_ALIGNMENT_FILENAME)
util.writeToFile(input, clustalFastaPath)
try:
cmd = ['clustalw', '-output', 'fasta', '-infile', clustalFastaPath, '-outfile', clustalAlignmentPath]
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
logging.exception('runClustal Error: clustalFastaPath data = %s'%open(clustalFastaPath).read())
raise
alignedFasta = util.readFromFile(clustalAlignmentPath)
return alignedFasta
def dashlen_check(seq):
'''
Objective: calculate the density of gaps in a sequence at 5' and 3' ends -- caused by poor alignment or by diff length seqs
Arguments: sequence
Result: the number of bases to be cut from the subjects 5' and 3' ends, and the divergence of the trimmed seq.
'''
seq = seq.strip()
# trim the dashes from the front and end
(frontDashes, trimmedSeq, endDashes) = DASHLEN_RE.search(seq).groups()
# logging.debug('dashlen_check: seq=%s'%seq)
# all dashes -- do not trim anything
if not trimmedSeq:
return (0, 0)
# ignore trims < 10.
frontTrim = len(frontDashes)
if frontTrim < 10:
frontTrim = 0
endTrim = len(endDashes)
if endTrim < 10:
endTrim = 0
trimmedSeqDivergence = (trimmedSeq.count('-') / float(len(trimmedSeq)))
return (frontTrim, endTrim, trimmedSeqDivergence)
def makeGetSeqForId(genomeFastaPath):
'''
genomeFastaPath: location of fasta file. also location/name of blast formatted indexes of the fasta file.
'''
# suck fasta file into memory, converting it into a map from id to sequence
# in memory dict performs much better than on-disk retrieval with xdget or fastacmd.
# and genome fasta files do not take much space (on a modern computer).
fastaMap = {}
for (seqNameline, seq) in fasta.readFasta(genomeFastaPath):
seqId = fasta.idFromName(seqNameline)
fastaMap[seqId] = seq
def getSeqForIdInMemory(seqId):
return fastaMap[seqId]
return getSeqForIdInMemory
def makeGetHitsOnTheFly(genomeIndexPath, evalue, workingDir='.'):
'''
genomeIndexPath: location of blast formatted indexes. usually same directory/name as genome fasta path
evalue: float or string. Hits with evalues >= evalue will not be included in the returned blast hits.
workingDir: a directory in which to create, use, and delete temporary files and dirs.
returns: a function that returns that takes as input a sequence id and sequence and returns the blast hits
'''
def getHitsOnTheFly(seqid, seq):
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
queryFastaPath = os.path.join(tmpDir, 'query.faa')
# add 'lcl|' to make ncbi blast happy.
util.writeToFile('{0}\n{1}\n'.format('>lcl|'+seqid, seq), queryFastaPath)
hitsDb = getBlastHits(queryFastaPath, genomeIndexPath, evalue, workingDir=workingDir)
return hitsDb.get(seqid)
return getHitsOnTheFly
def makeGetSavedHits(filename):
'''
returns a function which can be used to get the hits
from a file containing pre-computed blast results
'''
# in memory retrieval is faster than on-disk retrieval with bsddb, but this has a minor impact on overall roundup performance.
hitsDb = loadBlastHits(filename)
def getHitsInMemory(seqid, seq):
return hitsDb.get(seqid)
return getHitsInMemory
def getGoodEvalueHits(seqId, seq, getHitsFunc, getSeqFunc, evalue):
'''
evalue: a float.
returns: a list of pairs of (hitSeqId, hitSequence, hitEvalue) that have a hitEvalue below evalue. hitEvalue is a float.
'''
goodhits = []
hits = getHitsFunc(seqId, seq)
# check for 3 or fewer blast hits below evalue threshold
if hits:
hitCount = 0
for hit in hits:
if hitCount >= MAX_HITS:
break
hitSeqId = getHitId(hit)
hitEvalue = getHitEvalue(hit)
if hitEvalue < evalue:
hitCount += 1
hitSeq = getSeqFunc(hitSeqId)
goodhits.append((hitSeqId, hitSeq, hitEvalue))
return goodhits
def getDistanceForAlignedSeqPair(seqId, alignedSeq, hitSeqId, alignedHitSeq, workPath):
# paranoid check: aligned and trimmed seqs need to be the same length.
# if len(alignedSeq) != len(alignedHitSeq):
# raise Exception('getDistanceForAlignedSeqPairs: different lengths for seqs: '+str(((seqId, alignedSeq), (hitSeqId, alignedHitSeq))))
dataFileName = 'datafile.seq'
treeFileName = 'treefile.seq'
outFileName = 'outfile.seq'
dataFilePath = os.path.join(workPath, dataFileName)
treeFilePath = os.path.join(workPath, treeFileName)
outFilePath = os.path.join(workPath, outFileName)
# heading is number of seqs and length of each seq (which all need to be the same len).
heading = '2 %s\n'%len(alignedSeq)
pamlData = heading + '%s\n%s\n'%(seqId, alignedSeq) + '%s\n%s\n'%(hitSeqId, alignedHitSeq)
# logging.debug('pamlData=%s'%pamlData)
util.writeToFile(pamlData, dataFilePath)
# workPath is simply your folder that will contain codeml (Yang 2000), codeml.ctl (the codeml control file), and the jones.dat (Jones et. al, 1998)
# write the codeml control file that will run codeml
# run the codeml
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(['codeml'], cwd=workPath, stdout=devnull)
distance = pamlGetDistance(workPath)
return distance
finally:
for filePath in [dataFilePath, treeFilePath, outFilePath]:
if os.path.exists(filePath):
os.remove(filePath)
def getGoodDivergenceAlignedTrimmedSeqPair(seqId, seq, hitSeqId, hitSeq, workPath):
'''
aligns seq to hit. trims aligned seq and hit seq.
returns: pairs of pairs of id and aligned trimmed sequences for sequences in hits,
and a predicate function that, given a divergence threshold, says if the divergence of the sequences exceeds the threshold.
e.g. ((seqId, alignedTrimmedSeq), (hitSeqId, alignedTrimmedHitSeq), divergencePredicateFunc)
'''
# ALIGN SEQ and HIT
# need to align the sequences so we'z can study the rate of evolution per site
inputFasta = '>%s\n%s\n>%s\n%s\n'%(seqId, seq, hitSeqId, hitSeq)
if USE_CLUSTALW:
alignedFasta = alignFastaClustalw(inputFasta, workPath)
else:
alignedFasta = alignFastaKalign(inputFasta)
# try to recover from rare, intermittent failure of fasta alignment
if not alignedFasta:
logging.error('fasta alignment failed.\ninputFasta=%s\n' +
'alignedFasta=%s\nSleep and retry alignment.',
inputFasta, alignedFasta)
time.sleep(0.1)
alignedFasta = alignFastaKalign(inputFasta)
try:
# parse the aligned fasta into sequence ids and sequences
namelinesAndSeqs = list(fasta.readFasta(cStringIO.StringIO(alignedFasta)))
idAndSeqs = [(fasta.idFromName(seqNameline), seq) for seqNameline, seq in namelinesAndSeqs]
alignedIdAndSeq, alignedHitIdAndSeq = idAndSeqs
except Exception as e:
e.args += (inputFasta, alignedFasta)
raise
# CHECK FOR EXCESSIVE DIVERGENCE AND TRIMMING
# find most diverged sequence
# sort sequences by dash count. why?
divIdSeqs = []
for id, seq in (alignedIdAndSeq, alignedHitIdAndSeq):
dashCount = seq.count('-')
div = dashCount / float(len(seq))
g = (dashCount, div, id, seq)
divIdSeqs.append(g)
divIdSeqs.sort()
# check for excessive divergence
leastDivergedDashCount, leastDivergedDiv, leastDivergedId, leastDivergedSeq = divIdSeqs[0]
# check for excessive divergence and generate dashtrim.
mostDivergedDashCount, mostDivergedDiv, mostDivergedId, mostDivergedSeq = divIdSeqs[1]
# dashtrim = dashlen_check(mostDivergedSeq, divergence)
startTrim, endTrim, trimDivergence = dashlen_check(mostDivergedSeq)
# logging.debug('dashtrim='+str(dashtrim))
# trim and add seqs to output
def divergencePredicate(divergenceThreshold):
'''Why this logic? Ask Dennis. Function closed over local variables that returns whether or not the alignment of the sequences is too diverged.'''
if leastDivergedSeq and leastDivergedDiv > divergenceThreshold:
return True
if (startTrim or endTrim) and trimDivergence >= divergenceThreshold:
return True
return False
alignedTrimmedIdAndSeq, alignedTrimmedHitIdAndSeq = [(id, seq[startTrim:(len(seq)-endTrim)]) for id, seq in (alignedIdAndSeq, alignedHitIdAndSeq)]
return alignedTrimmedIdAndSeq, alignedTrimmedHitIdAndSeq, divergencePredicate
def minimumDicts(dicts, key):
'''
dicts: list of dictionaries.
key: a key present in every dict in dicts.
returns: list of d in dicts, s.t. d[key] <= e[key] for every d, e in dicts.
e.g.: [{'a':4, 'b':1}, {'a':5, 'b':0}, {'b': 0, 'a': 3}], 'b' -> [{'a':5, 'b':0} and {'b': 0, 'a': 3}] (not necessarily in that order)
'''
if not dicts:
return []
sortedDicts = sorted(dicts, key=lambda x: x[key])
minValue = sortedDicts[0][key]
return [d for d in sortedDicts if d[key] == minValue]
def computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds=None, workingDir='.'):
'''
queryFastaPath: fasta file path for query genome.
subjectFastaPath: fasta file path for subject genome.
divEvalues: list of (div, evalue) tuples. orthologs are computed using the given div and evalue thresholds. div and evalue can be a float or string.
getForwardHits: a function mapping a query seq id to a list of subject genome blast hits. see makeGetSavedHits() and makeGetHitsOnTheFly().
getReverseHits: a function mapping a subject seq id to a list of query genome blast hits. see makeGetSavedHits() and makeGetHitsOnTheFly().
querySeqIds: a list of sequence ids for the query genome. orthologs are only computed for those sequences.
If False, orthologs are computed for every sequence in the query genome.
workingDir: under workingDir, a temp directory is created, worked in (files and dirs created and deleted), and removed.
returns: a mapping from (div, evalue) tuples to lists of orthologs.
'''
# optimization: internally swap query and subject if subject has fewer sequences than query and no querySeqIds were given.
# compute orthologs and unswap results.
# roundup time complexity is roughly linear in the number of sequences in the query genome.
genomeSwapOptimization = True
if not querySeqIds and genomeSwapOptimization and fasta.numSeqsInFastaDb(subjectFastaPath) < fasta.numSeqsInFastaDb(queryFastaPath):
# print 'roundup(): subject genome has fewer sequences than query genome. internally swapping query and subject to improve speed.'
isSwapped = True
# swap query and subject, forward and reverse
queryFastaPath, subjectFastaPath = subjectFastaPath, queryFastaPath
getForwardHits, getReverseHits = getReverseHits, getForwardHits
else:
isSwapped = False
# make functions to look up a sequence from a sequence id.
getQuerySeqFunc = makeGetSeqForId(queryFastaPath)
getSubjectSeqFunc = makeGetSeqForId(subjectFastaPath)
# if no querySeqIds were specified, get orthologs for every query sequence
if not querySeqIds:
querySeqIds = list(fasta.readIds(queryFastaPath))
# get orthologs for every (div, evalue) combination
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
divEvalueToOrthologs = _computeOrthologsSub(querySeqIds, getQuerySeqFunc, getSubjectSeqFunc, divEvalues, getForwardHits, getReverseHits, workingDir)
# if swapped query and subject genome, need to swap back the ids in orthologs before returning them.
if isSwapped:
swappedDivEvalueToOrthologs = divEvalueToOrthologs
divEvalueToOrthologs = {}<|fim▁hole|> divEvalueToOrthologs[divEvalue] = orthologs
return divEvalueToOrthologs
def _computeOrthologsSub(querySeqIds, getQuerySeqFunc, getSubjectSeqFunc, divEvalues, getForwardHits, getReverseHits, workingDir):
'''
querySeqIds: a list of sequence ids from query genome. Only orthologs for these ids are searched for.
getQuerySeqFunc: a function that takes a seq id and returns the matching sequence from the query genome.
getSubjectSeqFunc: a function that takes a seq id and returns the matching sequence from the subject genome.
divEvalues: a list of (div, evalue) pairs which are thresholds for finding orthologs. All pairs are searched simultaneously.
div can be a float or string. So can evalue.
getForwardHits: a function that takes a query seq id and a query seq and returns the blast hits in the subject genome.
getReverseHits: a function that takes a subject seq id and a subject seq and returns the blast hits in the query genome.
find orthologs for every sequence in querySeqIds and every (div, evalue) combination.
return: a mapping from (div, evalue) pairs to lists of orthologs.
'''
# Note: the divs and evalues in divEvalues are strings which need to be converted to floats at the appropriate times below.
# copy config files to working dir
shutil.copy(MATRIX_PATH, workingDir)
shutil.copy(CODEML_CONTROL_PATH, workingDir)
divEvalueToOrthologs = dict(((div, evalue), list()) for div, evalue in divEvalues)
maxEvalue = max(float(evalue) for div, evalue in divEvalues)
maxDiv = max(float(div) for div, evalue in divEvalues)
# get ortholog(s) for each query sequence
for queryId in querySeqIds:
querySeq = getQuerySeqFunc(queryId)
# get forward hits, evalues, alignments, divergences, and distances that meet the loosest standards of all the divs and evalues.
# get forward hits and evalues, filtered by max evalue
idSeqEvalueOfForwardHits = getGoodEvalueHits(queryId, querySeq, getForwardHits, getSubjectSeqFunc, maxEvalue)
hitDataList = [{'hitId': hitId, 'hitSeq': hitSeq, 'hitEvalue': hitEvalue} for hitId, hitSeq, hitEvalue in idSeqEvalueOfForwardHits]
# get alignments and divergences
for hitData in hitDataList:
(queryId, alignedQuerySeq), (hitId, alignedHitSeq), tooDivergedPred = getGoodDivergenceAlignedTrimmedSeqPair(queryId, querySeq, hitData['hitId'], hitData['hitSeq'], workingDir)
hitData['alignedQuerySeq'] = alignedQuerySeq
hitData['alignedHitSeq'] = alignedHitSeq
hitData['tooDivergedPred'] = tooDivergedPred
# filter by max divergence.
hitDataList = [hitData for hitData in hitDataList if not hitData['tooDivergedPred'](maxDiv)]
# get distances of remaining hits, discarding hits for which paml generates no rst data.
distancesHitDataList = []
for hitData in hitDataList:
try:
hitData['distance'] = getDistanceForAlignedSeqPair(queryId, hitData['alignedQuerySeq'], hitData['hitId'], hitData['alignedHitSeq'], workingDir)
distancesHitDataList.append(hitData)
except Exception as e:
if e.args and e.args[0] == PAML_ERROR_MSG:
continue
else:
raise
# filter hits by specific div and evalue combinations.
divEvalueToMinimumDistanceHitDatas = {}
minimumHitIdToDivEvalues = {}
minimumHitIdToHitData = {}
for divEvalue in divEvalues:
div, evalue = divEvalue
# collect hit datas that pass thresholds.
goodHitDatas = []
for hitData in distancesHitDataList:
if hitData['hitEvalue'] < float(evalue) and not hitData['tooDivergedPred'](float(div)):
goodHitDatas.append(hitData)
# get the minimum hit or hits.
minimumHitDatas = minimumDicts(goodHitDatas, 'distance')
divEvalueToMinimumDistanceHitDatas[divEvalue] = minimumHitDatas
for hitData in minimumHitDatas:
minimumHitIdToDivEvalues.setdefault(hitData['hitId'], []).append(divEvalue)
minimumHitIdToHitData[hitData['hitId']] = hitData # possibly redundant, since if two divEvalues have same minimum hit, it gets inserted into dict twice.
# get reverese hits that meet the loosest standards of the divs and evalues associated with that minimum distance hit.
# performance note: wasteful or necessary to realign and compute distance between minimum hit and query seq?
for hitId in minimumHitIdToHitData:
hitData = minimumHitIdToHitData[hitId]
hitSeq = hitData['hitSeq']
# since minimum hit might not be associated with all divs and evalues, need to find the loosest div and evalue associated with this minimum hit.
maxHitEvalue = max(float(evalue) for div, evalue in minimumHitIdToDivEvalues[hitId])
maxHitDiv = max(float(div) for div, evalue in minimumHitIdToDivEvalues[hitId])
# get reverse hits and evalues, filtered by max evalue
idSeqEvalueOfReverseHits = getGoodEvalueHits(hitId, hitSeq, getReverseHits, getQuerySeqFunc, maxHitEvalue)
revHitDataList = [{'revHitId': revHitId, 'revHitSeq': revHitSeq, 'revHitEvalue': revHitEvalue} for revHitId, revHitSeq, revHitEvalue in idSeqEvalueOfReverseHits]
# if the query is not in the reverese hits, there is no way we can find an ortholog
if queryId not in [revHitData['revHitId'] for revHitData in revHitDataList]:
continue
for revHitData in revHitDataList:
values = getGoodDivergenceAlignedTrimmedSeqPair(hitId, hitSeq, revHitData['revHitId'], revHitData['revHitSeq'], workingDir)
(hitId, alignedHitSeq), (revHitId, alignedRevHitSeq), tooDivergedPred = values
revHitData['alignedHitSeq'] = alignedHitSeq
revHitData['alignedRevHitSeq'] = alignedRevHitSeq
revHitData['tooDivergedPred'] = tooDivergedPred
# filter by max divergence.
revHitDataList = [revHitData for revHitData in revHitDataList if not revHitData['tooDivergedPred'](maxHitDiv)]
# if the query is not in the reverese hits, there is no way we can find an ortholog
if queryId not in [revHitData['revHitId'] for revHitData in revHitDataList]:
continue
# get distances of remaining reverse hits, discarding reverse hits for which paml generates no rst data.
distancesRevHitDataList = []
for revHitData in revHitDataList:
try:
revHitData['distance'] = getDistanceForAlignedSeqPair(hitId, revHitData['alignedHitSeq'], revHitData['revHitId'], revHitData['alignedRevHitSeq'], workingDir)
distancesRevHitDataList.append(revHitData)
except Exception as e:
if e.args and e.args[0] == PAML_ERROR_MSG:
continue
else:
raise
# if passes div and evalue thresholds of the minimum hit and minimum reverse hit == query, write ortholog.
# filter hits by specific div and evalue combinations.
for divEvalue in minimumHitIdToDivEvalues[hitId]:
div, evalue = divEvalue
# collect hit datas that pass thresholds.
goodRevHitDatas = []
for revHitData in distancesRevHitDataList:
if revHitData['revHitEvalue'] < float(evalue) and not revHitData['tooDivergedPred'](float(div)):
goodRevHitDatas.append(revHitData)
# get the minimum hit or hits.
minimumRevHitDatas = minimumDicts(goodRevHitDatas, 'distance')
if queryId in [revHitData['revHitId'] for revHitData in minimumRevHitDatas]:
divEvalueToOrthologs[divEvalue].append((queryId, hitId, hitData['distance']))
return divEvalueToOrthologs
def computeOrthologsUsingOnTheFlyHits(queryFastaPath, subjectFastaPath, divEvalues, querySeqIds=None, workingDir='.'):
'''
Convenience function around computeOrthologs()
querySeqIds: a list of sequence ids from query genome to find orthologs for. If empty/falsy, will compute orthologs for every sequence in query genome.
queryFastaPath: location and name of of fasta file and blast indexes of the query genome. e.g. /groups/rodeo/roundup/genomes/current/Homo_sapiens.aa/Homo_sapiens.aa
subjectFastaPath: location and name of of fasta file and blast indexes of the subject genome.
workingDir: a directory in which to create, use, and delete temporary files and dirs.
This computes blast hits on-the-fly, so it slower than rounduPrecompute() for computing orthologs for full genomes.
'''
# get blast hits using the least stringent evalue from among all the evalues in divEvalues.
maxEvalue = str(max(float(evalue) for div, evalue in divEvalues))
getForwardHits = makeGetHitsOnTheFly(subjectFastaPath, maxEvalue, workingDir)
getReverseHits = makeGetHitsOnTheFly(queryFastaPath, maxEvalue, workingDir)
divEvalueToOrthologs = computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds, workingDir)
return divEvalueToOrthologs
def computeOrthologsUsingSavedHits(queryFastaPath, subjectFastaPath, divEvalues, forwardHitsPath, reverseHitsPath, querySeqIds=None, workingDir='.'):
'''
Convenience function around computeOrthologs()
returns: a mapping from (div, evalue) pairs to lists of orthologs.
'''
getForwardHits = makeGetSavedHits(forwardHitsPath)
getReverseHits = makeGetSavedHits(reverseHitsPath)
divEvalueToOrthologs = computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds, workingDir)
return divEvalueToOrthologs
def writeToOutfile(orthologs, outfile):
'''
orthologs: a list of tuples of (queryid, subjectid, distance).
outfile: where to write the orthologs
write the orthologs to the outfile in the canonical format: one ortholog per line. each line is tab-separated query id subject id and distance.
'''
data = ''.join(['%s\t%s\t%s\n'%(query, subject, distance) for query, subject, distance in orthologs])
with open(outfile, 'w') as fh:
fh.write(data)
###################################
# COMMAND-LINE PROCESSING FUNCTIONS
###################################
def copyFastaArg(srcFile, destDir):
'''
srcFile: FASTA format genome file.
destDir: where to move the fasta file.
Copy the source file to the destination dir. If the source file is already in the destination dir, it will not be copied.
return: path of the copied fasta file.
'''
# use absolute paths
srcFile = os.path.abspath(os.path.expanduser(srcFile))
destDir = os.path.abspath(os.path.expanduser(destDir))
destFile = os.path.join(destDir, os.path.basename(srcFile))
# copy GENOME to DIR if necessary
if srcFile != destFile:
shutil.copyfile(srcFile, destFile)
return destFile
def formatFastaArg(fastaFile):
'''
formatting puts blast indexes in the same dir as fastaFile.
returns: fastaFile
'''
fastaFile = os.path.abspath(os.path.expanduser(fastaFile))
formatForBlast(fastaFile)
return fastaFile
if __name__ == '__main__':
pass
# last line<|fim▁end|> | for divEvalue, swappedOrthologs in swappedDivEvalueToOrthologs.items():
orthologs = [(query, subject, distance) for subject, query, distance in swappedOrthologs] |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Unit test suite for the models of the application."""
from nose.tools import eq_
from tg2express.model import DBSession
from tg2express.tests import load_app
from tg2express.tests import setup_db, teardown_db
__all__ = ['ModelTest']
def setup():
"""Setup test fixture for all model tests."""
load_app()
setup_db()
def teardown():
"""Tear down test fixture for all model tests."""
teardown_db()
class ModelTest(object):
"""Base unit test case for the models."""
klass = None
attrs = {}
def setUp(self):
"""Setup test fixture for each model test method."""
try:
new_attrs = {}
new_attrs.update(self.attrs)
new_attrs.update(self.do_get_dependencies())
self.obj = self.klass(**new_attrs)
DBSession.add(self.obj)
DBSession.flush()
return self.obj
except:
DBSession.rollback()
raise
def tearDown(self):
"""Tear down test fixture for each model test method."""
DBSession.rollback()
def do_get_dependencies(self):
"""Get model test dependencies.
Use this method to pull in other objects that need to be created
for this object to be build properly.
"""
return {}
def test_create_obj(self):
"""Model objects can be created"""
pass
def test_query_obj(self):
"""Model objects can be queried"""<|fim▁hole|> for key, value in self.attrs.items():
eq_(getattr(obj, key), value)<|fim▁end|> | obj = DBSession.query(self.klass).one() |
<|file_name|>frontend.rs<|end_file_name|><|fim▁begin|>use std::io::{Read, Write, Stdin, stdin, Stdout, stdout};
use std::ops::Drop;
use termion;
use termion::{clear, color};
use termion::raw::IntoRawMode;
use left_pad::leftpad;
use cursor::Cursor;
// The Frontend is responsible for rendering the state of the editor
// to the screen and interacting with the user.
pub struct Frontend {
stdin: Stdin,
stdout: termion::raw::RawTerminal<Stdout>,
}
impl Frontend {
/// Creates a new Frontend
pub fn new() -> Frontend {
let in_ = stdin();
let out = stdout().into_raw_mode().unwrap();
Frontend {
stdin: in_,
stdout: out,
}
}
/// Clears the screen
pub fn clear_screen(&mut self) {
write!(self.stdout, "{}", clear::All).unwrap();
}
/// Draws the state of the editor to the screen.
pub fn draw(&mut self, cursor: &Cursor, filename: &Option<String>, lines: &[String]) {
let (width, height) = self.terminal_size();
let num_lines = lines.len();
// The index of the first line of text that is rendered.
let start = if cursor.line > height { cursor.line - height } else { 0 };
// The filename of the current buffer or a no filename message.
let name = filename.clone().unwrap_or_else(|| String::from("**no filename**"));
let padding = (width - name.len()) / 2;
let need_extra = padding*2+name.len() != width;
self.goto_term(0, 0);
// Draw the title bar.
write!(&mut self.stdout, "{}{}{}{}{}{}{}{}",
color::Bg(color::White),
color::Fg(color::Black),
leftpad(" ", padding),
name,
leftpad(" ", padding),
if need_extra { " " } else { "" },
color::Fg(color::Reset),
color::Bg(color::Reset),
).unwrap();
// Draw the lines of text.
for (y, line_number) in (start..start + height - 1).enumerate() {
self.goto_term(0, (y + 1) as u16);
if line_number < num_lines {
// Draw the line of text
write!(self.stdout, "{}{}{} {}",
color::Fg(color::Cyan),
leftpad(format!("{}", line_number + 1), 3),
color::Fg(color::Reset),
lines[line_number],
).unwrap();
} else {
// Draw a ~ to show that there is no line.
write!(self.stdout, "{} ~{}",
color::Fg(color::Cyan),
color::Fg(color::Reset),
).unwrap();
}
}
}
/// Flushes stdout to make the changes show
pub fn flush(&mut self) {
self.stdout.flush().unwrap();
}
/// Hides the cursor
pub fn hide_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Hide{}).unwrap();
}
/// Shows the cursor
pub fn show_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Show{}).unwrap();
}
/// Moves the cursor to x, y, which are both 0 based in terminal cordinates
pub fn goto_term(&mut self, x: u16, y: u16) {
write!(self.stdout, "{}", termion::cursor::Goto(x+1, y+1)).unwrap();
}
/// Moves the cursor to the position specified by the Cursor
pub fn move_cursor(&mut self, cursor: &Cursor) {
let (_, height) = self.terminal_size();
let x = (cursor.column + 4) as u16;
let y = if cursor.line > height {
cursor.line as u16
} else {
(cursor.line + 1) as u16
};
self.goto_term(x, y)
}
/// Returns the size of the terminal as (width, height)
pub fn terminal_size(&self) -> (usize, usize) {
let (w, h) = termion::terminal_size().unwrap();
(w as usize, h as usize)
}
/// Prompts for a line of text
pub fn prompt_for_text(&mut self, prompt: &str) -> Option<String> {
let (width, height) = termion::terminal_size().unwrap();
self.goto_term(0, height - 1);
// Draw the background.
write!(&mut self.stdout, "{}{}{}{}",
termion::clear::CurrentLine,
termion::color::Bg(color::White),
termion::color::Fg(color::Black),
leftpad("", width as usize)).unwrap();
self.goto_term(0, height - 1);
// Draw the prompt.
write!(&mut self.stdout, "{}: ", prompt).unwrap();
// Show it.
self.flush();
// Get the input from the user,
let input = self.read_line();
// Reset the forground and background.
write!(self.stdout, "{}{}", color::Fg(color::Reset), color::Bg(color::Reset)).unwrap();
input
}
/// Prompts for a yes/no response from the user
pub fn prompt_for_bool(&mut self, prompt: &str) -> bool {
let response = self.prompt_for_text(&format!("{} (y/n)", prompt));
if let Some(r) = response {
!r.is_empty() && r.chars().nth(0).unwrap() == 'y'
} else {
false
}
}
/// Reads a line of text from the user.
/// TODO: Fix for Unicode. I think that the actual user input is handled
/// correctly, but echoing the typed characters may not be.
fn read_line(&mut self) -> Option<String> {
// Start with a buffer of size 40 so that small inputs don't require
// reallocating the buffer.
let mut buf = Vec::with_capacity(30);
loop {
// Get one byte of input
let mut b = [0; 1];
self.stdin.read_exact(&mut b[..]).unwrap();
match b[0] {
0 | 3 | 4 => return None,
// 0x7f is backspace
0x7f if !buf.is_empty() => {
// Delete the last character typed
buf.pop();
// Clear the last character from the screen
write!(&mut self.stdout, "{}{}",<|fim▁hole|> },
0x7f => {},
// Newline or CR ends the input
b'\n' | b'\r' => break,
c => {
// Add the typed character to the input
buf.push(c);
// Draw it to the screen
write!(&mut self.stdout, "{}", char::from(c)).unwrap();
self.flush();
},
};
}
// Convert the buffer to a String.
Some(String::from_utf8(buf).unwrap())
}
}
impl Drop for Frontend {
/// Clean up the terminal after the we go out of scope.
fn drop(&mut self) {
self.clear_screen();
self.goto_term(0, 0);
self.show_cursor();
self.flush();
}
}<|fim▁end|> | termion::cursor::Left(1),
termion::clear::UntilNewline).unwrap();
self.flush(); |
<|file_name|>user.py<|end_file_name|><|fim▁begin|>from rest_framework import status
from rest_framework.decorators import detail_route, list_route
from sigma_core.views.sigma_viewset import SigmaViewSet
from sigma_core.importer import Sigma, load_ressource
User = load_ressource("User")
from django.core.mail import send_mail
from rest_framework.permissions import AllowAny
import random
reset_mail = {
'from_email': 'support@sigma.fr',
'subject': 'Mot de passe Sigma',
'message': u"""
Bonjour,
Ton mot de passe Sigma a été réinitialisé.
C'est maintenant "{password}".
Cordialement,
L'équipe Sigma.
"""
}
class UserViewSet(SigmaViewSet):
serializer_class = User.serializer
queryset = User.model.objects.all()
#*********************************************************************************************#
#** Read actions **#
#*********************************************************************************************#
def retrieve(self, request, pk=None):
"""
Retrieve an User according to its id.
"""
return self.handle_action('retrieve', request, pk)
@list_route(methods=['get'])
def me(self, request):
"""
Retrieve the data of the current user.
"""
return self.serialized_response(request.user)
#*********************************************************************************************#
#** Write actions **#
#*********************************************************************************************#
# def perform_create(self, serializer):
# from sigma_core.models.cluster import Cluster
# from sigma_core.models.group import Group
# serializer.save()
# # Create related GroupMember associations
# # TODO: Looks like a hacky-way to do this.
# # But how to do it properly ?
# memberships = [GroupMember(group=Group(id=c), user=User(id=serializer.data['id']),) for c in serializer.data['clusters_ids']]
# GroupMember.objects.bulk_create(memberships)
# def update(self, request, pk=None):
# """
# Update the data of the specified user.
# """
# try:
# user = User.objects.prefetch_related('clusters').get(pk=pk)
# except User.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# # I can update my own profile, or another's profile if I'm a sigma/cluster admin
# if not (request.user.is_sigma_admin() or int(pk) == request.user.id or request.user.is_admin_of_one_cluster(user.clusters.all())):
# return Response(status=status.HTTP_403_FORBIDDEN)
# # Names edition is allowed to sigma/clusters admins only
# if (request.data['lastname'] != user.lastname or request.data['firstname'] != user.firstname) and not request.user.is_sigma_admin() and not request.user.is_admin_of_one_cluster(user.clusters.all()):
# return Response('You cannot change your lastname or firstname', status=status.HTTP_400_BAD_REQUEST)
# return super(UserViewSet, self).update(request, pk)
# def destroy(self, request, pk=None):
# if not request.user.is_sigma_admin() and int(pk) != request.user.id:
# return Response(status=status.HTTP_403_FORBIDDEN)
# super().destroy(request, pk)
@list_route(methods=['put'])
def change_password(self, request):
"""
Allow current user to change his password.
---
omit_serializer: true
parameters_strategy:
form: replace
parameters:
- name: old_password
type: string
- name: password
type: string
"""
# PASSWORD_MIN_LENGTH = 8
# user = request.user
# data = request.data
# if not user.check_password(data['old_password']):
# return Response("Wrong password", status=status.HTTP_403_FORBIDDEN)
# if len(data['password']) < PASSWORD_MIN_LENGTH:
# return Response("'password' must be at least %d characters long" % PASSWORD_MIN_LENGTH, status=status.HTTP_400_BAD_REQUEST)
# user.set_password(data['password'])
# user.save()
return Response('Password successfully changed', status=status.HTTP_200_OK)
#Dangerous to send a password in clear...
@list_route(methods=['post'], permission_classes=[AllowAny])
def reset_password(self, request):
"""
Reset current user's password and send him an email with the new one.
---
omit_serializer: true
parameters_strategy:
form: replace
parameters:
- name: email
type: string
"""
# email = request.data.get('email')
# if email == '':
# return Response("'email' field cannot be empty", status=status.HTTP_400_BAD_REQUEST)
# try:
# user = User.objects.get(email=email)
# except User.DoesNotExist:
# return Response('No user found with this email', status=status.HTTP_404_NOT_FOUND)
# password = ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(10))
<|fim▁hole|>
# user.set_password(password)
# user.save()
return Response('Password reset', status=status.HTTP_200_OK)<|fim▁end|> | # mail = reset_mail.copy()
# mail['recipient_list'] = [user.email]
# mail['message'] = mail['message'].format(email=user.email, password=password, name=user.get_full_name())
# send_mail(**mail) |
<|file_name|>logger.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2014 Xiaomi.com, Inc. All Rights Reserved
// @file logger.go
// @author 王靖 (wangjing1@xiaomi.com)
// @date 14-11-25 20:02:50
// @version $Revision: 1.0 $
// @brief
package log
import (
"crypto/rand"
"fmt"
"math/big"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
)
// SysLog 系统Log
var SysLog *ProxyLogger = nil
// AppLog 应用Log
var AppLog *ProxyLogger = nil
// Logger the log.Logger wrapper
type ProxyLogger struct {
l *Logger
}
func logidGenerator() string {
if i, err := rand.Int(rand.Reader, big.NewInt(1<<30-1)); err != nil {
return "0"
} else {
return i.String()
}
}
func comMessage(strfmt string, args ...interface{}) map[string]string {
pc, file, line, ok := runtime.Caller(2)
if !ok {
file = "?"
line = 0
}
fn := runtime.FuncForPC(pc)
var fnName string
if fn == nil {
fnName = "?()"
} else {
dotName := filepath.Ext(fn.Name())
fnName = strings.TrimLeft(dotName, ".") + "()"
}
ret := map[string]string{
"file": filepath.Base(file) + ":" + strconv.Itoa(line),
"func": fnName,
"msg": fmt.Sprintf(strfmt, args...),
}
return ret
}
// Notice print notice message to logfile
func (lg *ProxyLogger) Notice(strfmt string, args ...interface{}) {
lg.l.Notice(comMessage(strfmt, args...), logidGenerator())
}
// Debug print debug message to logfile
func (lg *ProxyLogger) Debug(strfmt string, args ...interface{}) {
lg.l.Debug(comMessage(strfmt, args...), logidGenerator())
}
// Warn print warning message to logfile
func (lg *ProxyLogger) Warn(strfmt string, args ...interface{}) {
lg.l.Warn(comMessage(strfmt, args...), logidGenerator())
}
// Fatal print fatal message to logfile
func (lg *ProxyLogger) Fatal(strfmt string, args ...interface{}) {
lg.l.Fatal(comMessage(strfmt, args...), logidGenerator())
}
// Config Config of One Log Instance
type Config struct {<|fim▁hole|> FilePath string
LogLevel int
AppTag string
}
func init() {
realInit(&Config{FilePath: "/dev/stdout", LogLevel: 0},
&Config{FilePath: "/dev/stdout", LogLevel: 3})
}
var once sync.Once
func Init(syslog, applog *Config) {
f := func() {
realInit(syslog, applog)
}
once.Do(f)
}
func realInit(syslog, applog *Config) {
SysLog = &ProxyLogger{
l: NewLogger(syslog.FilePath),
}
SysLog.l.SetLevel(syslog.LogLevel)
SysLog.l.SetAppTag(defaultAppTag())
AppLog = &ProxyLogger{
l: NewLogger(applog.FilePath),
}
AppLog.l.SetLevel(applog.LogLevel)
AppLog.l.SetAppTag(defaultAppTag())
}
func defaultAppTag() string {
return "mysql-proxy"
}
/* vim: set expandtab ts=4 sw=4 */<|fim▁end|> | |
<|file_name|>tools.py<|end_file_name|><|fim▁begin|>'''
base tools
'''
# -*- coding: utf-8 -*-
import re
def is_ipv4(ip) :
pattern = r'^(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[0-9]{1,2})){3}$'
matcher = re.match(pattern, ip)
if matcher is not None :
return True
return False
<|fim▁hole|> return True
return False<|fim▁end|> | def is_domain(domain) :
pattern = r'[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+\.?'
matcher = re.match(pattern, domain)
if matcher is not None : |
<|file_name|>config.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
version=0.2
visitedVersion=0.2
enableNotification=True
isFirstStart=True
countClickUp=0
langList=['English','Russian']
searchEngines=['Google','Bing','Yahoo','Yandex']
defaultSearchEngine=0
defaultLangFrom='Auto'
defaultLangTo='Russian'
useControl=True
useDblControl=True
useNothing=False
useGoogle=True
useBing=False
useProxy=False
enableApp=True
startWithOS=True
proxyAddress=""
proxyPort=""
proxyLogin=""
proxyPassword=""
isRunTranslate=False
translatedTextSize=8
langForTran = {
'ar':'Arabic',
'bg':'Bulgarian',
'ca':'Catalan',
'zh-CHS':'Chinese Simplified',
'zh-CHT':'Chinese Traditional',
'cs':'Czech',
'da':'Danish',
'nl':'Dutch',
'en':'English',
'et':'Estonian',
'fi':'Finnish',
'fr':'French',
'de':'German',
'el':'Greek',
'ht':'Haitian Creole',
'he':'Hebrew',
'hu':'Hungarian',
'id':'Indonesian',
'it':'Italian',
'ja':'Japanese',
'ko':'Korean',
'lv':'Latvian',
'lt':'Lithuanian',
'no':'Norwegian',
'pl':'Polish',
'pt':'Portuguese',
'ro':'Romanian',
'ru':'Russian',
'sk':'Slovak',
'sl':'Slovenian',
'es':'Spanish',
'sv':'Swedish',
'th':'Thai',
'tr':'Turkish',
'uk':'Ukrainian',
'vi':'Vietnamese'
}
langForListen = {
'ca':'Catalan',
'ca-es':' Catalan (Spain)',
'da':'Danish',
'da-dk':'Danish (Denmark)',
'de':'German',
'de-de':'German (Germany)',
'en':'English',
'en-au':'English (Australia)',
'en-ca':'English (Canada)',
'en-gb':'English (United Kingdom)',
'en-in':'English (India)',
'en-us':'English (United States)',
'es':'Spanish',
'es-es':'Spanish (Spain)',
'es-mx':'Spanish (Mexico)',
'fi ':'Finnish',
'fi-fi':'Finnish (Finland)',
'fr ':'French',
'fr-ca':'French (Canada)',
'fr-fr':'French (France)',
'it ':'Italian',
'it-it':'Italian (Italy)',
'ja ':'Japanese',
'ja-jp':'Japanese (Japan)',<|fim▁hole|>'nl-nl':'Dutch (Netherlands)',
'no':'Norwegian',
'pl':'Polish',
'pl-pl':'Polish (Poland)',
'pt':'Portuguese',
'pt-br':'Portuguese (Brazil)',
'pt-pt':'Portuguese (Portugal)',
'ru':'Russian',
'ru-ru':'Russian (Russia)',
'sv':'Swedish',
'sv-se':'Swedish (Sweden)',
'zh-chs':'Chinese Simplified',
'zh-cht':'Chinese Traditional',
'zh-hk':'Chinese Traditional (Hong Kong S.A.R.)',
'zh-tw':'Chinese Traditional (Taiwan)'
}
import imp
import os
import sys
def main_is_frozen():
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
if main_is_frozen():
# print 'Running from path', os.path.dirname(sys.executable)
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])<|fim▁end|> | 'ko ':'Korean',
'ko-kr':'Korean (Korea)',
'nb-no':'Norwegian (Norway)',
'nl':'Dutch', |
<|file_name|>checkbox.ts<|end_file_name|><|fim▁begin|>/// <reference path="../../node_modules/forms-js/dist/forms-js.d.ts" />
/// <reference path="../utils/input-helpers.ts" />
module adaptor.directives {
export function CheckboxDirective($log:ng.ILogService):ng.IDirective {
<|fim▁hole|> templateUrl: '/templates/checkbox.html',
scope: {
disabled: '@?',
fieldName: '@',
label: '@'
},
link: function($scope:adaptor.interfaces.InputDirectiveScope,
$element:ng.IAugmentedJQuery,
$attributes:ng.IAttributes,
fjsFormController:any):void {
if (!$scope.fieldName) {
$log.error('Missing required field "fieldName"');
return;
}
adaptor.utils.InputHelpers.initScope($scope, fjsFormController.form);
}
};
}
}<|fim▁end|> | return {
require: '^fjsForm',
restrict: 'EA', |
<|file_name|>range.rs<|end_file_name|><|fim▁begin|>use std::borrow::Borrow;
use std::collections::HashSet;
use enums::Direction;
use structs::Point;
use traits::travel::Travel;
use traits::range::Base;
/// Trait wrapping range implementation
pub trait Range: Borrow<Point> {
/// Find the points within the provided manhattan distance
fn range(&self, range: i32) -> HashSet<Point>;
}
impl<T> Range for T where T: Borrow<Point> {
pub fn range(&self, range: i32) -> HashSet<Point> {
let mut set: HashSet<Point> = point.base_range(range);
for index in 1..range + 1 {
let diff = range - index;
set.extend(point.travel(&Direction::Up, index).base_range(diff));
set.extend(point.travel(&Direction::Down, index).base_range(diff));
}
set
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn range() {
let point: Point = Point(1, 2, 5);
let set: HashSet<Point> = point.range(1);
assert!(set.contains(&Point(1, 2, 5)));
assert!(set.contains(&Point(2, 2, 5)));
assert!(set.contains(&Point(1, 3, 5)));
assert!(set.contains(&Point(0, 3, 5)));
assert!(set.contains(&Point(0, 2, 5)));
assert!(set.contains(&Point(1, 1, 5)));<|fim▁hole|> assert!(set.len() == 9);
}
}<|fim▁end|> | assert!(set.contains(&Point(2, 1, 5)));
assert!(set.contains(&Point(1, 2, 4)));
assert!(set.contains(&Point(1, 2, 6))); |
<|file_name|>create_db.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import operator
<|fim▁hole|>import digits
from digits import utils
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 3
@subclass
class CreateDbTask(Task):
"""Creates a database"""
def __init__(self, input_file, db_name, backend, image_dims, **kwargs):
"""
Arguments:
input_file -- read images and labels from this file
db_name -- save database to this location
backend -- database backend (lmdb/hdf5)
image_dims -- (height, width, channels)
Keyword Arguments:
image_folder -- prepend image paths with this folder
shuffle -- shuffle images before saving
resize_mode -- used in utils.image.resize_image()
encoding -- 'none', 'png' or 'jpg'
compression -- 'none' or 'gzip'
mean_file -- save mean file to this location
labels_file -- used to print category distribution
"""
# Take keyword arguments out of kwargs
self.image_folder = kwargs.pop('image_folder', None)
self.shuffle = kwargs.pop('shuffle', True)
self.resize_mode = kwargs.pop('resize_mode' , None)
self.encoding = kwargs.pop('encoding', None)
self.compression = kwargs.pop('compression', None)
self.mean_file = kwargs.pop('mean_file', None)
self.labels_file = kwargs.pop('labels_file', None)
super(CreateDbTask, self).__init__(**kwargs)
self.pickver_task_createdb = PICKLE_VERSION
self.input_file = input_file
self.db_name = db_name
self.backend = backend
if backend == 'hdf5':
# the list of hdf5 files is stored in a textfile
self.textfile = os.path.join(self.db_name, 'list.txt')
self.image_dims = image_dims
if image_dims[2] == 3:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = None
self.entries_count = None
self.distribution = None
self.create_db_log_file = "create_%s.log" % db_name
def __getstate__(self):
d = super(CreateDbTask, self).__getstate__()
if 'create_db_log' in d:
# don't save file handle
del d['create_db_log']
if 'labels' in d:
del d['labels']
return d
def __setstate__(self, state):
super(CreateDbTask, self).__setstate__(state)
if self.pickver_task_createdb <= 1:
if self.image_dims[2] == 1:
self.image_channel_order = None
elif self.encode:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = 'RGB'
if self.pickver_task_createdb <= 2:
if hasattr(self, 'encode'):
if self.encode:
self.encoding = 'jpg'
else:
self.encoding = 'none'
delattr(self, 'encode')
else:
self.encoding = 'none'
self.pickver_task_createdb = PICKLE_VERSION
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
if not hasattr(self, 'compression') or self.compression is None:
self.compression = 'none'
@override
def name(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'Create DB (train)'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'Create DB (val)'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'Create DB (test)'
else:
return 'Create DB (%s)' % self.db_name
@override
def before_run(self):
super(CreateDbTask, self).before_run()
self.create_db_log = open(self.path(self.create_db_log_file), 'a')
@override
def html_id(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'task-create_db-train'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'task-create_db-val'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'task-create_db-test'
else:
return super(CreateDbTask, self).html_id()
@override
def offer_resources(self, resources):
key = 'create_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(digits.__file__))),
'tools', 'create_db.py'),
self.path(self.input_file),
self.path(self.db_name),
self.image_dims[1],
self.image_dims[0],
'--backend=%s' % self.backend,
'--channels=%s' % self.image_dims[2],
'--resize_mode=%s' % self.resize_mode,
]
if self.mean_file is not None:
args.append('--mean_file=%s' % self.path(self.mean_file))
# Add a visual mean_file
args.append('--mean_file=%s' % self.path(utils.constants.MEAN_FILE_IMAGE))
if self.image_folder:
args.append('--image_folder=%s' % self.image_folder)
if self.shuffle:
args.append('--shuffle')
if self.encoding and self.encoding != 'none':
args.append('--encoding=%s' % self.encoding)
if self.compression and self.compression != 'none':
args.append('--compression=%s' % self.compression)
if self.backend == 'hdf5':
args.append('--hdf5_dset_limit=%d' % 2**31)
return args
@override
def process_output(self, line):
from digits.webapp import socketio
self.create_db_log.write('%s\n' % line)
self.create_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/int(match.group(2))
self.emit_progress_update()
return True
# distribution
match = re.match(r'Category (\d+) has (\d+)', message)
if match and self.labels_file is not None:
if not hasattr(self, 'distribution') or self.distribution is None:
self.distribution = {}
self.distribution[match.group(1)] = int(match.group(2))
data = self.distribution_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'distribution',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
return True
# result
match = re.match(r'(\d+) images written to database', message)
if match:
self.entries_count = int(match.group(1))
self.logger.debug(message)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
from digits.webapp import socketio
super(CreateDbTask, self).after_run()
self.create_db_log.close()
if self.backend == 'lmdb':
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'exploration-ready',
},
namespace='/jobs',
room=self.job_id,
)
elif self.backend == 'hdf5':
# add more path information to the list of h5 files
lines = None
with open(self.path(self.textfile)) as infile:
lines = infile.readlines()
with open(self.path(self.textfile), 'w') as outfile:
for line in lines:
# XXX this works because the model job will be in an adjacent folder
outfile.write('%s\n' % os.path.join(
'..', self.job_id, self.db_name, line.strip()))
if self.mean_file:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'mean-image',
# XXX Can't use url_for here because we don't have a request context
'data': '/files/' + self.path('mean.jpg', relative=True),
},
namespace='/jobs',
room=self.job_id,
)
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self, 'labels_file'), 'labels_file not set'
assert self.labels_file, 'labels_file not set'
assert os.path.exists(self.path(self.labels_file)), 'labels_file does not exist'
labels = []
with open(self.path(self.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def distribution_data(self):
"""
Returns distribution data for a C3.js graph
"""
if self.distribution is None:
return None
try:
labels = self.get_labels()
except AssertionError:
return None
if len(self.distribution.keys()) != len(labels):
return None
values = ['Count']
titles = []
for key, value in sorted(
self.distribution.items(),
key=operator.itemgetter(1),
reverse=True):
values.append(value)
titles.append(labels[int(key)])
return {
'data': {
'columns': [values],
'type': 'bar'
},
'axis': {
'x': {
'type': 'category',
'categories': titles,
}
},
}<|fim▁end|> | |
<|file_name|>angular-mocks.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for Angular JS 1.3 (ngMock, ngMockE2E module)
// Project: http://angularjs.org
// Definitions by: Diego Vilar <http://github.com/diegovilar>, Tony Curtis <http://github.com/daltin>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/// <reference path="angular.d.ts" />
declare module "angular-mocks/ngMock" {
var _:string;
export = _;
}<|fim▁hole|>declare module "angular-mocks/ngMockE2E" {
var _:string;
export = _;
}
declare module "angular-mocks/ngAnimateMock" {
var _:string;
export = _;
}
///////////////////////////////////////////////////////////////////////////////
// ngMock module (angular-mocks.js)
///////////////////////////////////////////////////////////////////////////////
declare namespace angular {
///////////////////////////////////////////////////////////////////////////
// AngularStatic
// We reopen it to add the MockStatic definition
///////////////////////////////////////////////////////////////////////////
interface IAngularStatic {
mock: IMockStatic;
}
// see https://docs.angularjs.org/api/ngMock/function/angular.mock.inject
interface IInjectStatic {
(...fns:Function[]): any;
(...inlineAnnotatedConstructor:any[]): any; // this overload is undocumented, but works
strictDi(val?:boolean): void;
}
interface IMockStatic {
// see https://docs.angularjs.org/api/ngMock/function/angular.mock.dump
dump(obj:any): string;
inject: IInjectStatic
// see https://docs.angularjs.org/api/ngMock/function/angular.mock.module
module: {
(...modules:any[]): any;
sharedInjector(): void;
}
// see https://docs.angularjs.org/api/ngMock/type/angular.mock.TzDate
TzDate(offset:number, timestamp:number): Date;
TzDate(offset:number, timestamp:string): Date;
}
///////////////////////////////////////////////////////////////////////////
// ExceptionHandlerService
// see https://docs.angularjs.org/api/ngMock/service/$exceptionHandler
// see https://docs.angularjs.org/api/ngMock/provider/$exceptionHandlerProvider
///////////////////////////////////////////////////////////////////////////
interface IExceptionHandlerProvider extends IServiceProvider {
mode(mode:string): void;
}
///////////////////////////////////////////////////////////////////////////
// TimeoutService
// see https://docs.angularjs.org/api/ngMock/service/$timeout
// Augments the original service
///////////////////////////////////////////////////////////////////////////
interface ITimeoutService {
flush(delay?:number): void;
flushNext(expectedDelay?:number): void;
verifyNoPendingTasks(): void;
}
///////////////////////////////////////////////////////////////////////////
// IntervalService
// see https://docs.angularjs.org/api/ngMock/service/$interval
// Augments the original service
///////////////////////////////////////////////////////////////////////////
interface IIntervalService {
flush(millis?:number): number;
}
///////////////////////////////////////////////////////////////////////////
// LogService
// see https://docs.angularjs.org/api/ngMock/service/$log
// Augments the original service
///////////////////////////////////////////////////////////////////////////
interface ILogService {
assertEmpty(): void;
reset(): void;
}
interface ILogCall {
logs: string[];
}
///////////////////////////////////////////////////////////////////////////
// ControllerService mock
// see https://docs.angularjs.org/api/ngMock/service/$controller
// This interface extends http://docs.angularjs.org/api/ng.$controller
///////////////////////////////////////////////////////////////////////////
interface IControllerService {
// Although the documentation doesn't state this, locals are optional
<T>(controllerConstructor:new (...args:any[]) => T, locals?:any, bindings?:any): T;
<T>(controllerConstructor:Function, locals?:any, bindings?:any): T;
<T>(controllerName:string, locals?:any, bindings?:any): T;
}
///////////////////////////////////////////////////////////////////////////
// ComponentControllerService
// see https://docs.angularjs.org/api/ngMock/service/$componentController
///////////////////////////////////////////////////////////////////////////
interface IComponentControllerService {
// TBinding is an interface exposed by a component as per John Papa's style guide
// https://github.com/johnpapa/angular-styleguide/blob/master/a1/README.md#accessible-members-up-top
<T, TBinding>(componentName:string, locals:{ $scope: IScope, [key: string]: any }, bindings?:TBinding, ident?:string): T;
}
///////////////////////////////////////////////////////////////////////////
// HttpBackendService
// see https://docs.angularjs.org/api/ngMock/service/$httpBackend
///////////////////////////////////////////////////////////////////////////
interface IHttpBackendService {
/**
* Flushes all pending requests using the trained responses.
* @param count Number of responses to flush (in the order they arrived). If undefined, all pending requests will be flushed.
*/
flush(count?:number): void;
/**
* Resets all request expectations, but preserves all backend definitions.
*/
resetExpectations(): void;
/**
* Verifies that all of the requests defined via the expect api were made. If any of the requests were not made, verifyNoOutstandingExpectation throws an exception.
*/
verifyNoOutstandingExpectation(): void;
/**
* Verifies that there are no outstanding requests that need to be flushed.
*/
verifyNoOutstandingRequest(): void;
/**
* Creates a new request expectation.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, object, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param method HTTP method.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
expect(method:string, url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object | ((object:Object) => boolean)) :mock.IRequestHandler;
/**
* Creates a new request expectation for DELETE requests.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, object, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url is as expected.
* @param headers HTTP headers object to be compared with the HTTP headers in the request.
*/
expectDELETE(url:string | RegExp | ((url:string) => boolean), headers?:Object): mock.IRequestHandler;
/**
* Creates a new request expectation for GET requests.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, object, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param headers HTTP headers object to be compared with the HTTP headers in the request.
*/
expectGET(url:string | RegExp | ((url:string) => boolean), headers?:Object): mock.IRequestHandler;
/**
* Creates a new request expectation for HEAD requests.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, object, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param headers HTTP headers object to be compared with the HTTP headers in the request.
*/
expectHEAD(url:string | RegExp | ((url:string) => boolean), headers?:Object): mock.IRequestHandler;
/**
* Creates a new request expectation for JSONP requests.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
*/
expectJSONP(url:string | RegExp | ((url:string) => boolean)): mock.IRequestHandler;
/**
* Creates a new request expectation for PATCH requests.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, object, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
expectPATCH(url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object): mock.IRequestHandler;
/**
* Creates a new request expectation for POST requests.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, object, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
expectPOST(url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object): mock.IRequestHandler;
/**
* Creates a new request expectation for PUT requests.
* Throws a preformatted error if expectation(s) don't match supplied string, regular expression, object, or if function returns false.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
expectPUT(url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object): mock.IRequestHandler;
/**
* Creates a new backend definition.
* Returns an object with respond method that controls how a matched request is handled.
* @param method HTTP method.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
when(method:string, url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object | ((object:Object) => boolean)): mock.IRequestHandler;
/**
* Creates a new backend definition for DELETE requests.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
whenDELETE(url:string | RegExp | ((url:string) => boolean), headers?:Object | ((object:Object) => boolean)): mock.IRequestHandler;
/**
* Creates a new backend definition for GET requests.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
whenGET(url:string | RegExp | ((url:string) => boolean), headers?:Object | ((object:Object) => boolean)): mock.IRequestHandler;
/**
* Creates a new backend definition for HEAD requests.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
whenHEAD(url:string | RegExp | ((url:string) => boolean), headers?:Object | ((object:Object) => boolean)): mock.IRequestHandler;
/**
* Creates a new backend definition for JSONP requests.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
whenJSONP(url:string | RegExp | ((url:string) => boolean)): mock.IRequestHandler;
/**
* Creates a new backend definition for PATCH requests.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
whenPATCH(url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object | ((object:Object) => boolean)): mock.IRequestHandler;
/**
* Creates a new backend definition for POST requests.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
whenPOST(url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object | ((object:Object) => boolean)): mock.IRequestHandler;
/**
* Creates a new backend definition for PUT requests.
* Returns an object with respond method that controls how a matched request is handled.
* @param url HTTP url string, regular expression or function that receives a url and returns true if the url matches the current expctation.
* @param data HTTP request body string, json object, regular expression or function that receives the data and returns true if the data matches the current expectation.
* @param headers HTTP headers object or function that receives the headers and returns true if the headers match the current expectation.
*/
whenPUT(url:string | RegExp | ((url:string) => boolean), data?:string | RegExp | Object | ((data:string) => boolean), headers?:Object | ((object:Object) => boolean)): mock.IRequestHandler;
}
export module mock {
// returned interface by the the mocked HttpBackendService expect/when methods
interface IRequestHandler {
/**
* Controls the response for a matched request using a function to construct the response.
* Returns the RequestHandler object for possible overrides.
* @param func Function that receives the request HTTP method, url, data, and headers and returns an array containing response status (number), data, headers, and status text.
*/
respond(func:((method:string, url:string, data:string | Object, headers:Object) => [number, string | Object, Object, string])): IRequestHandler;
/**
* Controls the response for a matched request using supplied static data to construct the response.
* Returns the RequestHandler object for possible overrides.
* @param status HTTP status code to add to the response.
* @param data Data to add to the response.
* @param headers Headers object to add to the response.
* @param responseText Response text to add to the response.
*/
respond(status:number, data:string | Object, headers?:Object, responseText?:string): IRequestHandler;
/**
* Controls the response for a matched request using the HTTP status code 200 and supplied static data to construct the response.
* Returns the RequestHandler object for possible overrides.
* @param data Data to add to the response.
* @param headers Headers object to add to the response.
* @param responseText Response text to add to the response.
*/
respond(data:string | Object, headers?:Object, responseText?:string): IRequestHandler;
// Available when ngMockE2E is loaded
/**
* Any request matching a backend definition or expectation with passThrough handler will be passed through to the real backend (an XHR request will be made to the server.)
*/
passThrough(): IRequestHandler;
}
}
}
///////////////////////////////////////////////////////////////////////////////
// functions attached to global object (window)
///////////////////////////////////////////////////////////////////////////////
//Use `angular.mock.module` instead of `module`, as `module` conflicts with commonjs.
//declare var module: (...modules: any[]) => any;
declare var inject:angular.IInjectStatic;<|fim▁end|> | |
<|file_name|>GSPoints_Distances.cpp<|end_file_name|><|fim▁begin|>/**********************************************************
* Version $Id: GSPoints_Distances.cpp 1921 2014-01-09 10:24:11Z oconrad $
*********************************************************/
///////////////////////////////////////////////////////////
// //
// SAGA //
// //
// System for Automated Geoscientific Analyses //
// //
// Module Library: //
// statistics_points //
// //
//-------------------------------------------------------//
// //
// GSPoints_Distances.cpp //
// //
// Copyright (C) 2010 by //
// Olaf Conrad //
// //
//-------------------------------------------------------//
// //
// This file is part of 'SAGA - System for Automated //
// Geoscientific Analyses'. SAGA is free software; you //
// can redistribute it and/or modify it under the terms //
// of the GNU General Public License as published by the //
// Free Software Foundation; version 2 of the License. //
// //
// SAGA is distributed in the hope that it will be //
// useful, but WITHOUT ANY WARRANTY; without even the //
// implied warranty of MERCHANTABILITY or FITNESS FOR A //
// PARTICULAR PURPOSE. See the GNU General Public //
// License for more details. //
// //
// You should have received a copy of the GNU General //
// Public License along with this program; if not, //
// write to the Free Software Foundation, Inc., //
// 51 Franklin Street, 5th Floor, Boston, MA 02110-1301, //
// USA. //
// //
//-------------------------------------------------------//
// //
// e-mail: oconrad@saga-gis.org //
// //
// contact: Olaf Conrad //
// Institute of Geography //
// University of Hamburg //
// Germany //
// //
///////////////////////////////////////////////////////////
//---------------------------------------------------------
///////////////////////////////////////////////////////////
// //
// //
// //
///////////////////////////////////////////////////////////
//---------------------------------------------------------
#include "GSPoints_Distances.h"
///////////////////////////////////////////////////////////
// //
// //
// //
///////////////////////////////////////////////////////////
//---------------------------------------------------------
CGSPoints_Distances::CGSPoints_Distances(void)
{
CSG_Parameter *pNode;
//-----------------------------------------------------
Set_Name (_TL("Minimum Distance Analysis"));
Set_Author (SG_T("O.Conrad (c) 2010"));
Set_Description(
_TL("")
);
//-----------------------------------------------------
pNode = Parameters.Add_Shapes(
NULL , "POINTS" , _TL("Points"),
_TL(""),
PARAMETER_INPUT, SHAPE_TYPE_Point<|fim▁hole|>
Parameters.Add_Table(
NULL , "TABLE" , _TL("Minimum Distance Analysis"),
_TL(""),
PARAMETER_OUTPUT
);
}
///////////////////////////////////////////////////////////
// //
// //
// //
///////////////////////////////////////////////////////////
//---------------------------------------------------------
#define SET_VALUE(s, v) { pRecord = pTable->Add_Record(); pRecord->Set_Value(0, s); pRecord->Set_Value(1, v); }
//---------------------------------------------------------
bool CGSPoints_Distances::On_Execute(void)
{
//-----------------------------------------------------
CSG_Shapes *pPoints = Parameters("POINTS") ->asShapes();
CSG_Table *pTable = Parameters("TABLE") ->asTable();
//-----------------------------------------------------
CSG_PRQuadTree QT(pPoints, 0);
CSG_Simple_Statistics s;
double x, y, z;
for(int iPoint=0; iPoint<pPoints->Get_Count() && Set_Progress(iPoint, pPoints->Get_Count()); iPoint++)
{
TSG_Point p = pPoints->Get_Shape(iPoint)->Get_Point(0);
if( QT.Select_Nearest_Points(p.x, p.y, 2) && QT.Get_Selected_Point(1, x, y, z) && (x != p.x || y != p.y) )
{
s.Add_Value(SG_Get_Distance(x, y, p.x, p.y));
}
}
//-----------------------------------------------------
if( s.Get_Count() > 0 )
{
CSG_Table_Record *pRecord;
pTable->Destroy();
pTable->Set_Name(CSG_String::Format(SG_T("%s [%s]"), _TL("Minimum Distance Analysis"), pPoints->Get_Name()));
pTable->Add_Field(SG_T("NAME") , SG_DATATYPE_String);
pTable->Add_Field(SG_T("VALUE") , SG_DATATYPE_Double);
SET_VALUE(_TL("Mean Average") , s.Get_Mean());
SET_VALUE(_TL("Minimum") , s.Get_Minimum());
SET_VALUE(_TL("Maximum") , s.Get_Maximum());
SET_VALUE(_TL("Standard Deviation") , s.Get_StdDev());
SET_VALUE(_TL("Duplicates") , pPoints->Get_Count() - s.Get_Count());
DataObject_Update(pTable, SG_UI_DATAOBJECT_SHOW);
return( true );
}
Message_Dlg(_TL("not enough observations"));
return( false );
}
///////////////////////////////////////////////////////////
// //
// //
// //
///////////////////////////////////////////////////////////
//---------------------------------------------------------<|fim▁end|> | ); |
<|file_name|>TestProtocolBuffersDeserializers.java<|end_file_name|><|fim▁begin|>package org.jsense.serialize;
import com.google.common.collect.ImmutableList;
import org.joda.time.Instant;
import org.joda.time.ReadableInstant;
import org.jsense.AccelerometerEvent;
import org.jsense.ModelFactory;
import org.junit.Before;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Iterator;
import static org.junit.Assert.*;
/**
* Tests the {@link org.jsense.serialize.PbAccelerometerEventDeserializer}.
*
* @author Markus Wüstenberg
*/
public class TestProtocolBuffersDeserializers {
private static final int SEED = 88951;
private static final ReadableInstant ABSOLUTE_TIMESTAMP = new Instant(123L);
private static final long RELATIVE_TIMESTAMP = 124L;
private static final float X = 0.1f;
private static final float Y = 0.2f;
private static final float Z = 0.3f;
private AccelerometerEvent event1, event2;
@Before
public void setUp() throws IOException {
ModelFactory.setSeed(SEED);
event1 = ModelFactory.newRandomAccelerometerEvent();
event2 = ModelFactory.newRandomAccelerometerEvent();
}
@Test
public void deserializeSingleAccelerometerEvent() throws IOException {
Deserializer<AccelerometerEvent> deserializer = new PbAccelerometerEventDeserializer(new ByteArrayInputStream(getByteArrayFrom(ImmutableList.of(event1))));
Iterable<AccelerometerEvent> events = deserializer.deserialize();
Iterator<AccelerometerEvent> eventsIterator = events.iterator();
assertTrue(eventsIterator.hasNext());
assertEquals(event1, eventsIterator.next());
}
@Test
public void deserializeMultipleAccelerometerEvents() throws IOException {
Deserializer<AccelerometerEvent> deserializer = new PbAccelerometerEventDeserializer(new ByteArrayInputStream(getByteArrayFrom(ImmutableList.of(event1, event2))));
Iterable<AccelerometerEvent> events = deserializer.deserialize();
Iterator<AccelerometerEvent> eventsIterator = events.iterator();
assertTrue(eventsIterator.hasNext());
assertEquals(event1, eventsIterator.next());
assertTrue(eventsIterator.hasNext());
assertEquals(event2, eventsIterator.next());
}
@Test(expected = NullPointerException.class)
public void sourceCantBeNull() throws IOException {
new PbAccelerometerEventDeserializer(null);
}
@Test
public void deserializeMultipleAccelerometerEventsAndDontKeepState() throws IOException {
AccelerometerEvent eventWithRelativeTimestamp = AccelerometerEvent.newBuilder()
.setAbsoluteTimestamp(ABSOLUTE_TIMESTAMP)
.setRelativeTimestamp(RELATIVE_TIMESTAMP)
.setX(X)
.setY(Y)
.setZ(Z)
.build();
AccelerometerEvent eventNoRelativeTimestamp = AccelerometerEvent.newBuilder()
.setAbsoluteTimestamp(ABSOLUTE_TIMESTAMP)
.setX(X)
.setY(Y)<|fim▁hole|> .setZ(Z)
.build();
ByteArrayInputStream serialized = new ByteArrayInputStream(getByteArrayFrom(ImmutableList.of(eventWithRelativeTimestamp, eventNoRelativeTimestamp)));
Deserializer<AccelerometerEvent> deserializer = new PbAccelerometerEventDeserializer(serialized);
Iterable<AccelerometerEvent> events = deserializer.deserialize();
Iterator<AccelerometerEvent> eventsIterator = events.iterator();
assertTrue(eventsIterator.next().hasRelativeTimestamp());
assertFalse(eventsIterator.next().hasRelativeTimestamp());
}
private byte[] getByteArrayFrom(Iterable<AccelerometerEvent> events) throws IOException {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
new PbAccelerometerEventSerializer(out).serialize(events).flush();
return out.toByteArray();
}
}<|fim▁end|> | |
<|file_name|>ident.rs<|end_file_name|><|fim▁begin|>use super::error::TomlHelper;
use log::error;
use regex::Regex;
use std::fmt;
use toml::Value;
#[derive(Clone, Debug)]
pub enum Ident {
Name(String),
Pattern(Box<Regex>),
}
impl fmt::Display for Ident {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Ident::Name(name) => f.write_str(name),
Ident::Pattern(regex) => write!(f, "Regex {}", regex),
}
}
}
impl PartialEq for Ident {
fn eq(&self, other: &Ident) -> bool {
pub use self::Ident::*;
match (self, other) {
(Name(s1), Name(s2)) => s1 == s2,
(Pattern(r1), Pattern(r2)) => r1.as_str() == r2.as_str(),
_ => false,
}
}
}
impl Eq for Ident {}
impl Ident {
pub fn parse(toml: &Value, object_name: &str, what: &str) -> Option<Ident> {
match toml.lookup("pattern").and_then(Value::as_str) {
Some(s) => Regex::new(&format!("^{}$", s))<|fim▁hole|> .map(Ident::Pattern)
.map_err(|e| {
error!(
"Bad pattern `{}` in {} for `{}`: {}",
s, what, object_name, e
);
e
})
.ok(),
None => match toml.lookup("name").and_then(Value::as_str) {
Some(name) => {
if name.contains(['.', '+', '*'].as_ref()) {
error!(
"Should be `pattern` instead of `name` in {} for `{}`",
what, object_name
);
None
} else {
Some(Ident::Name(name.into()))
}
}
None => None,
},
}
}
pub fn is_match(&self, name: &str) -> bool {
use self::Ident::*;
match self {
Name(n) => name == n,
Pattern(regex) => regex.is_match(name),
}
}
}<|fim▁end|> | .map(Box::new) |
<|file_name|>run_qap_func.py<|end_file_name|><|fim▁begin|>"""
run quality assurance measures on functional data
"""
import sys,glob
sys.path.append('/corral-repl/utexas/poldracklab/software_lonestar/quality-assessment-protocol')
import os
import numpy
from run_shell_cmd import run_shell_cmd
from compute_fd import compute_fd
from qap import load_func,load_image, load_mask, summary_mask, cnr,efc,fber,fwhm,artifacts,ghost_all,calc_mean_func,calc_dvars,mean_outlier_timepoints,mean_quality_timepoints
basedir='/corral-repl/utexas/poldracklab/data/selftracking/shared_dataset'
funcfiles=glob.glob(os.path.join(basedir,'sub*/BOLD/resting_run001/bold.nii.gz'))
funcdata={'subcode':[],'func_efc':[],'func_fber':[],'func_fwhm':[],'func_gsr':[],'func_dvars':[],'func_outlier':[],'func_quality':[],'func_mean_fd':[],'func_num_fd':[],'func_perc_fd':[]}
#for funcfile in funcfiles:
func_file=funcfiles[0]
if 1:
subcode=func_file.split('/')[7]
print 'processing',subcode
funcdata['subcode'].append(subcode)
mask_file=func_file.replace('.nii.gz','_brain_mask.nii.gz')
if not os.path.exists(mask_file):
cmd='bet %s %s -m -F'%(func_file,func_file.replace('.nii.gz','_brain'))
print cmd
run_shell_cmd(cmd)
func_data = load_func(func_file,mask_file)<|fim▁hole|> func_efc = efc(func_data)
#func_fber = fber(func_data, func_mask)
#func_fwhm = fwhm(func_file, mask_file, out_vox=False)
print 'running ghost_all'
_,func_gsr,_=ghost_all(mean_func_data,func_mask)
print 'running calc_dvars'
func_dvars = calc_dvars(func_data, output_all=False)
print 'running mean_outlier_timepoints'
func_outlier = mean_outlier_timepoints(func_file, mask_file, out_fraction=True)
print 'running compute_fd'
motpars=numpy.loadtxt(func_file.replace('.nii.gz','_mcf.par'))
fd=compute_fd(motpars)
sdf
funcdata['mean_gm'].append(mean_gm)
funcdata['mean_wm'].append(mean_wm)
funcdata['std_bg'].append(std_bg)
funcdata['anat_efc'].append(anat_efc)
funcdata['anat_fber'].append(anat_fber)
funcdata['anat_fwhm'].append(anat_fwhm)
funcdata['anat_qi1'].append(anat_qi1)<|fim▁end|> | mean_func_data = calc_mean_func(func_file)
func_mask = load_mask(mask_file)
|
<|file_name|>DurationRangeJsonUnmarshaller.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.inspector.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.inspector.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* DurationRange JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DurationRangeJsonUnmarshaller implements Unmarshaller<DurationRange, JsonUnmarshallerContext> {
public DurationRange unmarshall(JsonUnmarshallerContext context) throws Exception {
DurationRange durationRange = new DurationRange();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("minSeconds", targetDepth)) {
context.nextToken();
durationRange.setMinSeconds(context.getUnmarshaller(Integer.class).unmarshall(context));
}
if (context.testExpression("maxSeconds", targetDepth)) {
context.nextToken();
durationRange.setMaxSeconds(context.getUnmarshaller(Integer.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return durationRange;
}<|fim▁hole|>
public static DurationRangeJsonUnmarshaller getInstance() {
if (instance == null)
instance = new DurationRangeJsonUnmarshaller();
return instance;
}
}<|fim▁end|> |
private static DurationRangeJsonUnmarshaller instance; |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/*!
* Normalization processes a parse tree until it is in suitable form to
* be converted to the more canonical form. This is done as a series of
* passes, each contained in their own module below.
*/
use grammar::parse_tree as pt;
use grammar::repr as r;
pub type NormResult<T> = Result<T, NormError>;
#[derive(Clone, Debug)]
pub struct NormError {
pub message: String,
pub span: pt::Span,
}
macro_rules! return_err {
($span: expr, $($args:expr),+) => {
return Err(NormError {
message: format!($($args),+),
span: $span
});
}
}
pub fn normalize(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, true)
}
/// for unit tests, it is convenient to skip the validation step
#[cfg(test)]
pub fn normalize_without_validating(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, false)
}
fn normalize_helper(grammar: pt::Grammar, validate: bool) -> NormResult<r::Grammar> {<|fim▁hole|> if validate { try!(prevalidate::validate(&grammar)); }
let grammar = try!(resolve::resolve(grammar));
let grammar = try!(macro_expand::expand_macros(grammar));
if validate { try!(postvalidate::validate(&grammar)); }
let types = try!(tyinfer::infer_types(&grammar));
lower::lower(grammar, types)
}
// These are executed *IN ORDER*:
// Check most safety conditions.
mod prevalidate;
// Resolve identifiers into terminals/nonterminals etc.
mod resolve;
// Expands macros and expressions
//
// X = ...1 Comma<X> (X Y Z) ...2
//
// to
//
// X = ...1 `Comma<X>` `(X Y Z)` ...2
// `Comma_X`: Vec<<X>> = ...;
// `(X Y Z)` = X Y Z;
//
// AFTER THIS POINT: No more macros, macro references, guarded
// alternatives, repeats, or expr symbols, though type indirections
// may occur.
mod macro_expand;
// Check some safety conditions that can only be tested
// after macro expansion.
mod postvalidate;
// Computes types where the user omitted them (or from macro
// byproducts).
//
// AFTER THIS POINT: there is a separate `repr::Types` table
// providing all nonterminals with an explicit type.
mod tyinfer;
// Lowers the parse tree to the repr notation.
mod lower;
///////////////////////////////////////////////////////////////////////////
// Shared routines
mod norm_util;<|fim▁end|> | |
<|file_name|>CoordinateMap.java<|end_file_name|><|fim▁begin|>// Copyright 2012 Google Inc. All Rights Reserved.
//<|fim▁hole|>//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.collide.client.editor;
import com.google.collide.client.util.logging.Log;
import com.google.collide.json.shared.JsonArray;
import com.google.collide.shared.document.Document;
import com.google.collide.shared.document.Line;
import com.google.collide.shared.document.LineInfo;
import com.google.collide.shared.document.anchor.Anchor;
import com.google.collide.shared.document.anchor.Anchor.RemovalStrategy;
import com.google.collide.shared.document.anchor.AnchorManager;
import com.google.collide.shared.document.anchor.AnchorType;
import com.google.collide.shared.util.ListenerRegistrar.Remover;
import com.google.collide.shared.util.SortedList;
import com.google.collide.shared.util.SortedList.OneWayIntComparator;
/**
* This class takes care of mapping between the different coordinates used by
* the editor. The two supported systems are:
* <ul>
* <li>Offset (x,y) - in pixels, relative to the top left of line 0 in the
* current document.
* <li>Line (line, column) - the real line number and column, taking into
* account spacer objects in between lines. Lines and columns are 0-indexed.
* </ul>
*/
class CoordinateMap implements Document.LineListener {
interface DocumentSizeProvider {
float getEditorCharacterWidth();
int getEditorLineHeight();
void handleSpacerHeightChanged(Spacer spacer, int oldHeight);
}
private static class OffsetCache {
private static final SortedList.Comparator<OffsetCache> COMPARATOR =
new SortedList.Comparator<OffsetCache>() {
@Override
public int compare(OffsetCache a, OffsetCache b) {
return a.offset - b.offset;
}
};
private static final SortedList.OneWayIntComparator<OffsetCache> Y_OFFSET_ONE_WAY_COMPARATOR =
new SortedList.OneWayIntComparator<OffsetCache>() {
@Override
public int compareTo(OffsetCache s) {
return value - s.offset;
}
};
private static final SortedList.OneWayIntComparator<OffsetCache> LINE_NUMBER_ONE_WAY_COMPARATOR
= new SortedList.OneWayIntComparator<OffsetCache>() {
@Override
public int compareTo(OffsetCache s) {
return value - s.lineNumber;
}
};
private final int offset;
private final int height;
private final int lineNumber;
private OffsetCache(int offset, int lineNumber, int height) {
this.offset = offset;
this.height = height;
this.lineNumber = lineNumber;
}
}
private static final OffsetCache BEGINNING_EMPTY_OFFSET_CACHE = new OffsetCache(0, 0, 0);
private static final AnchorType SPACER_ANCHOR_TYPE = AnchorType.create(CoordinateMap.class,
"spacerAnchorType");
private static final Spacer.Comparator SPACER_COMPARATOR = new Spacer.Comparator();
private static final Spacer.OneWaySpacerComparator SPACER_ONE_WAY_COMPARATOR =
new Spacer.OneWaySpacerComparator();
/** Used by {@link #getPrecedingOffsetCache(int, int)} */
private static final int IGNORE = Integer.MIN_VALUE;
private Document document;
private DocumentSizeProvider documentSizeProvider;
/** List of offset cache items, sorted by the offset */
private SortedList<OffsetCache> offsetCache;
/**
* True if there is at least one spacer in the editor, false otherwise (false
* means a simple height / line height calculation can be used)
*/
private boolean requiresMapping;
/** Sorted by line number */
private SortedList<Spacer> spacers;
/** Summation of all spacers' heights */
private int totalSpacerHeight;
/** Remover for listener */
private Remover documentLineListenerRemover;
CoordinateMap(DocumentSizeProvider documentSizeProvider) {
this.documentSizeProvider = documentSizeProvider;
requiresMapping = false;
}
int convertYToLineNumber(int y) {
if (y < 0) {
return 0;
}
int lineHeight = documentSizeProvider.getEditorLineHeight();
if (!requiresMapping) {
return y / lineHeight;
}
OffsetCache precedingOffsetCache = getPrecedingOffsetCache(y, IGNORE);
int precedingOffsetCacheBottom = precedingOffsetCache.offset + precedingOffsetCache.height;
int lineNumberRelativeToOffsetCacheLine = (y - precedingOffsetCacheBottom) / lineHeight;
if (y < precedingOffsetCacheBottom) {
// y is inside the spacer
return precedingOffsetCache.lineNumber;
} else {
return precedingOffsetCache.lineNumber + lineNumberRelativeToOffsetCacheLine;
}
}
/**
* Returns the top of the given line.
*/
int convertLineNumberToY(int lineNumber) {
int lineHeight = documentSizeProvider.getEditorLineHeight();
if (!requiresMapping) {
return lineNumber * lineHeight;
}
OffsetCache precedingOffsetCache = getPrecedingOffsetCache(IGNORE, lineNumber);
int precedingOffsetCacheBottom = precedingOffsetCache.offset + precedingOffsetCache.height;
int offsetRelativeToOffsetCacheBottom =
(lineNumber - precedingOffsetCache.lineNumber) * lineHeight;
return precedingOffsetCacheBottom + offsetRelativeToOffsetCacheBottom;
}
/**
* Returns the first {@link OffsetCache} that is positioned less than or equal
* to {@code y} or {@code lineNumber}. This methods fills the
* {@link #offsetCache} if necessary ensuring the returned {@link OffsetCache}
* is up-to-date.
*
* @param y the y, or {@link #IGNORE} if looking up by {@code lineNumber}
* @param lineNumber the line number, or {@link #IGNORE} if looking up by
* {@code y}
*/
private OffsetCache getPrecedingOffsetCache(int y, int lineNumber) {
assert (y != IGNORE && lineNumber == IGNORE) || (lineNumber != IGNORE && y == IGNORE);
final int lineHeight = documentSizeProvider.getEditorLineHeight();
OffsetCache previousOffsetCache;
if (y != IGNORE) {
previousOffsetCache =
getCachedPrecedingOffsetCacheImpl(OffsetCache.Y_OFFSET_ONE_WAY_COMPARATOR, y);
} else {
previousOffsetCache =
getCachedPrecedingOffsetCacheImpl(OffsetCache.LINE_NUMBER_ONE_WAY_COMPARATOR, lineNumber);
}
if (previousOffsetCache == null) {
if (spacers.size() > 0 && spacers.get(0).getLineNumber() == 0) {
previousOffsetCache = createOffsetCache(0, 0, spacers.get(0).getHeight());
} else {
previousOffsetCache = BEGINNING_EMPTY_OFFSET_CACHE;
}
}
/*
* Optimization so the common case that the target has previously been
* computed requires no more computation
*/
int offsetCacheSize = offsetCache.size();
if (offsetCacheSize > 0
&& isTargetEarlierThanOffsetCache(y, lineNumber, offsetCache.get(offsetCacheSize - 1))) {
return previousOffsetCache;
}
// This will return this offset cache's matching spacer
int spacerPos = getPrecedingSpacerIndex(previousOffsetCache.lineNumber);
/*
* We want the spacer following this offset cache's spacer, or the first
* spacer if none were found
*/
spacerPos++;
for (int n = spacers.size(); spacerPos < n; spacerPos++) {
Spacer curSpacer = spacers.get(spacerPos);
int previousOffsetCacheBottom = previousOffsetCache.offset + previousOffsetCache.height;
int simpleLinesHeight =
(curSpacer.getLineNumber() - previousOffsetCache.lineNumber) * lineHeight;
if (simpleLinesHeight == 0) {
Log.warn(Spacer.class, "More than one spacer on line " + previousOffsetCache.lineNumber);
}
// Create an offset cache for this spacer
OffsetCache curOffsetCache =
createOffsetCache(previousOffsetCacheBottom + simpleLinesHeight,
curSpacer.getLineNumber(), curSpacer.getHeight());
if (isTargetEarlierThanOffsetCache(y, lineNumber, curOffsetCache)) {
return previousOffsetCache;
}
previousOffsetCache = curOffsetCache;
}
return previousOffsetCache;
}
/**
* Returns the {@link OffsetCache} instance in list that has the greatest
* value less than or equal to the given {@code value}. Returns null if there
* isn't one.
*
* This should only be used by {@link #getPrecedingOffsetCache(int, int)}.
*/
private OffsetCache getCachedPrecedingOffsetCacheImpl(
OneWayIntComparator<OffsetCache> comparator, int value) {
comparator.setValue(value);
int index = offsetCache.findInsertionIndex(comparator, false);
return index >= 0 ? offsetCache.get(index) : null;
}
private boolean isTargetEarlierThanOffsetCache(int y, int lineNumber, OffsetCache offsetCache) {
return ((y != IGNORE && y < offsetCache.offset) ||
(lineNumber != IGNORE && lineNumber < offsetCache.lineNumber));
}
private OffsetCache createOffsetCache(int offset, int lineNumber, int height) {
OffsetCache createdOffsetCache = new OffsetCache(offset, lineNumber, height);
offsetCache.add(createdOffsetCache);
return createdOffsetCache;
}
private int getPrecedingSpacerIndex(int lineNumber) {
SPACER_ONE_WAY_COMPARATOR.setValue(lineNumber);
return spacers.findInsertionIndex(SPACER_ONE_WAY_COMPARATOR, false);
}
/**
* Adds a spacer above the given lineInfo line with height heightPx and
* returns the created Spacer object.
*
* @param lineInfo the line before which the spacer will be inserted
* @param height the height in pixels of the spacer
*/
Spacer createSpacer(LineInfo lineInfo, int height, Buffer buffer, String cssClass) {
int lineNumber = lineInfo.number();
// create an anchor on the current line
Anchor anchor =
document.getAnchorManager().createAnchor(SPACER_ANCHOR_TYPE, lineInfo.line(), lineNumber,
AnchorManager.IGNORE_COLUMN);
anchor.setRemovalStrategy(RemovalStrategy.SHIFT);
// account for the height of the line the spacer is on
Spacer spacer = new Spacer(anchor, height, this, buffer, cssClass);
spacers.add(spacer);
totalSpacerHeight += height;
invalidateLineNumberAndFollowing(lineNumber);
requiresMapping = true;
return spacer;
}
boolean removeSpacer(Spacer spacer) {
int lineNumber = spacer.getLineNumber();
if (spacers.remove(spacer)) {
document.getAnchorManager().removeAnchor(spacer.getAnchor());
totalSpacerHeight -= spacer.getHeight();
invalidateLineNumberAndFollowing(lineNumber - 1);
updateRequiresMapping();
return true;
}
return false;
}
void handleDocumentChange(Document document) {
if (documentLineListenerRemover != null) {
documentLineListenerRemover.remove();
}
this.document = document;
spacers = new SortedList<Spacer>(SPACER_COMPARATOR);
offsetCache =
new SortedList<OffsetCache>(OffsetCache.COMPARATOR);
documentLineListenerRemover = document.getLineListenerRegistrar().add(this);
requiresMapping = false; // starts with no items in list
totalSpacerHeight = 0;
}
@Override
public void onLineAdded(Document document, int lineNumber, JsonArray<Line> addedLines) {
invalidateLineNumberAndFollowing(lineNumber);
}
@Override
public void onLineRemoved(Document document, int lineNumber, JsonArray<Line> removedLines) {
invalidateLineNumberAndFollowing(lineNumber);
}
/**
* Call this after any line changes (adding/deleting lines, changing line
* heights). Only invalidate (delete) cache items >= lineNumber, don't
* recalculate.
*/
void invalidateLineNumberAndFollowing(int lineNumber) {
OffsetCache.LINE_NUMBER_ONE_WAY_COMPARATOR.setValue(lineNumber);
int insertionIndex = offsetCache.findInsertionIndex(OffsetCache.LINE_NUMBER_ONE_WAY_COMPARATOR);
offsetCache.removeThisAndFollowing(insertionIndex);
}
private void updateRequiresMapping() {
// check to change active status
requiresMapping = spacers.size() > 0;
}
int getTotalSpacerHeight() {
return totalSpacerHeight;
}
void handleSpacerHeightChanged(Spacer spacer, int oldHeight) {
totalSpacerHeight -= oldHeight;
totalSpacerHeight += spacer.getHeight();
invalidateLineNumberAndFollowing(spacer.getLineNumber());
documentSizeProvider.handleSpacerHeightChanged(spacer, oldHeight);
}
}<|fim▁end|> | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at |
<|file_name|>tell.py<|end_file_name|><|fim▁begin|>import re
from .. import irc, var, ini
from ..tools import is_identified
# Require identification with NickServ to send messages.
def ident (f):
def check (user, channel, word):
if is_identified(user):
f(user, channel, word)
else:
irc.msg(channel, "{}: Identify with NickServ first.".format(user))
return check
# Insert a message monitor to look for user activity.
def ins_monitor (line_obj):
if line_obj.event in ["JOIN", "PRIVMSG"]:
send_messages(line_obj.user)
# Fill commands dictionary.
def ins_command ():
var.commands["tell"] = type("command", (object,), {})()
var.commands["tell"].method = leave_message
var.commands["tell"].aliases = [".tell", ".msg"]
var.commands["tell"].usage = ["{} user message - Leave a message to user."]
var.commands["listtell"] = type("command", (object,), {})()
var.commands["listtell"].method = list_messages
var.commands["listtell"].aliases = [".listtell", ".ltell", ".listtells", ".showtells"]
var.commands["listtell"].usage = ["{} - Check if you have any messages and show them."]
# Fill a space for the messages database.
def ins_db ():
var.data["messages"] = ini.fill_dict("messages.ini", "Messages")
# Turning list of strings into a list of tuples.
for user in var.data["messages"]:
msg_list = [(msg.split(" ~ ")[0], msg.split(" ~ ", 1)[1]) for msg in var.data["messages"][user]]
var.data["messages"][user] = msg_list
# Leave a message to someone.
def leave_message (user, channel, word):
# It needs a nickname and a message.<|fim▁hole|>
target = word[1]
message = " ".join(word[2:])
# Check if target is a valid nickname.
match = re.match("[a-zA-Z\[\]\\`_\^\{\|\}][a-zA-Z0-9\[\]\\`_\^\{\|\}]+", target)
if not match or (hasattr(match, "group") and match.group() != target):
irc.msg(channel, "{} is not a valid nickname.".format(target))
return
# Check for "hurr Imma tell myself something".
if target.lower() == user.lower():
irc.msg(channel, "{}: Do it yourself. I'm not .tell'ing you shit!".format(user))
return
# The bot won't tell itself something.
if target.lower() == irc.botnick.lower():
irc.msg(channel, "{}: I'm right here, say it to my face!".format(user))
return
# Check for repeated messages.
if target in var.data["messages"]:
if (user, message) in var.data["messages"][target]:
irc.msg(channel, "{}: You already left this message.".format(user))
return
# Create an empty list for users not in the database.
if target not in var.data["messages"]:
var.data["messages"][target] = []
# Append tuple and add to ini.
var.data["messages"][target].append((user, message))
message_list = ["{} ~ {}".format(pair[0], pair[1]) for pair in var.data["messages"][target]]
ini.add_to_ini("Messages", target, "\n".join(message_list), "messages.ini")
irc.msg(channel, "{}: Message stored.".format(user))
# Send a user stored messages.
def send_messages (user):
# Be case insensitive, please.
for nick in var.data["messages"]:
if user.lower() == nick.lower():
user = nick
# There's no use going on if the user isn't in the messages database.
if user not in var.data["messages"]:
return
if len(var.data["messages"][user]) > 4:
# Send the first 4 messages.
for pair in var.data["messages"][user][0:4]:
irc.msg(user, "{} sent you: {}".format(pair[0], pair[1]))
irc.msg(pair[0], "{} received your message.".format(user))
# Remove the sent messages.
st_messages = var.data["messages"][user][0:4]
for pair in st_messages:
var.data["messages"][user].remove(pair)
new_messages = ["{} ~ {}".format(pair[0], pair[1]) for pair in var.data["messages"][user]]
ini.add_to_ini("Messages", user, "\n".join(new_messages), "messages.ini")
irc.msg(user, "To reply to them, use .tell user message")
irc.msg(user, "You have more messages. Type \x034.showtells\x0f to view them.")
else:
# Send every message.
for pair in var.data["messages"][user]:
irc.msg(user, "{} sent you: {}".format(pair[0], pair[1]))
irc.msg(pair[0], "{} received your message.".format(user))
# Remove them.
del var.data["messages"][user]
ini.remove_from_ini("Messages", user, "messages.ini")
irc.msg(user, "To reply to them, use .tell user message")
# Send the rest of the messages.
def list_messages (user, channel, word):
# There's no use going on if the user isn't in the messages database.
if user not in var.data["messages"]:
irc.msg(channel, "{}: You don't have any messages.".format(user))
return
send_messages(user)
irc.msg(channel, "{}: Sent ;)".format(user))<|fim▁end|> | if len(word) < 3:
irc.msg(channel, "{}: Wrong syntax. Check .help".format(user))
return |
<|file_name|>pagers.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.CommitmentAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.CommitmentAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.CommitmentAggregatedList],
request: compute.AggregatedListRegionCommitmentsRequest,
response: compute.CommitmentAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListRegionCommitmentsRequest):
The initial request object.
response (google.cloud.compute_v1.types.CommitmentAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.<|fim▁hole|> """
self._method = method
self._request = compute.AggregatedListRegionCommitmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[compute.CommitmentAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[Tuple[str, compute.CommitmentsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.CommitmentsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.CommitmentList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.CommitmentList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.CommitmentList],
request: compute.ListRegionCommitmentsRequest,
response: compute.CommitmentList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListRegionCommitmentsRequest):
The initial request object.
response (google.cloud.compute_v1.types.CommitmentList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListRegionCommitmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[compute.CommitmentList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[compute.Commitment]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)<|fim▁end|> | |
<|file_name|>getifaddrs.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License,
// version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which
// licence you accepted on initial access to the Software (the "Licences").
//
// By contributing code to the SAFE Network Software, or to this project generally, you agree to be
// bound by the terms of the MaidSafe Contributor Agreement, version 1.0. This, along with the
// Licenses can be found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR.
//
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
//
// Please review the Licences for the specific language governing permissions and limitations
// relating to use of the SAFE Network Software.
use std::net::{IpAddr, Ipv4Addr};
/// Details about an interface on this host
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub struct IfAddr {
/// The name of the interface
pub name: String,
/// The IP address of the interface
pub addr: IpAddr,
/// The netmask of the interface
pub netmask: IpAddr,
/// How to send a broadcast on the interface
pub broadcast: IpAddr,
}
impl IfAddr {
/// Create a new IfAddr
pub fn new() -> IfAddr {
IfAddr {
name: String::new(),
addr: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
netmask: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
broadcast: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))
}
}
}
#[cfg(not(windows))]
mod getifaddrs_posix {
use super::IfAddr;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::{mem, str};
use std::ffi::CStr;
use libc::consts::os::bsd44::{AF_INET, AF_INET6};
use libc::funcs::bsd43::getifaddrs as posix_getifaddrs;
use libc::funcs::bsd43::freeifaddrs as posix_freeifaddrs;
use libc::types::os::common::bsd44::ifaddrs as posix_ifaddrs;
use libc::types::os::common::bsd44::sockaddr as posix_sockaddr;
use libc::types::os::common::bsd44::sockaddr_in as posix_sockaddr_in;
use libc::types::os::common::bsd44::sockaddr_in6 as posix_sockaddr_in6;
#[allow(unsafe_code)]
fn sockaddr_to_ipaddr(sockaddr : *const posix_sockaddr) -> Option<IpAddr> {
if sockaddr.is_null() { return None }
if unsafe{*sockaddr}.sa_family as u32 == AF_INET as u32 {
let ref sa = unsafe{*(sockaddr as *const posix_sockaddr_in)};
Some(IpAddr::V4(Ipv4Addr::new(
((sa.sin_addr.s_addr>>0) & 255) as u8,
((sa.sin_addr.s_addr>>8) & 255) as u8,
((sa.sin_addr.s_addr>>16) & 255) as u8,
((sa.sin_addr.s_addr>>24) & 255) as u8,
)))
} else if unsafe{*sockaddr}.sa_family as u32 == AF_INET6 as u32 {
let ref sa = unsafe{*(sockaddr as *const posix_sockaddr_in6)};
// Ignore all fe80:: addresses as these are link locals
if sa.sin6_addr.s6_addr[0]==0x80fe { return None }
Some(IpAddr::V6(Ipv6Addr::new(
((sa.sin6_addr.s6_addr[0] & 255)<<8) | ((sa.sin6_addr.s6_addr[0]>>8) & 255),
((sa.sin6_addr.s6_addr[1] & 255)<<8) | ((sa.sin6_addr.s6_addr[1]>>8) & 255),
((sa.sin6_addr.s6_addr[2] & 255)<<8) | ((sa.sin6_addr.s6_addr[2]>>8) & 255),
((sa.sin6_addr.s6_addr[3] & 255)<<8) | ((sa.sin6_addr.s6_addr[3]>>8) & 255),
((sa.sin6_addr.s6_addr[4] & 255)<<8) | ((sa.sin6_addr.s6_addr[4]>>8) & 255),
((sa.sin6_addr.s6_addr[5] & 255)<<8) | ((sa.sin6_addr.s6_addr[5]>>8) & 255),
((sa.sin6_addr.s6_addr[6] & 255)<<8) | ((sa.sin6_addr.s6_addr[6]>>8) & 255),
((sa.sin6_addr.s6_addr[7] & 255)<<8) | ((sa.sin6_addr.s6_addr[7]>>8) & 255),
)))
}
else { None }
}
#[cfg(any(target_os = "linux", target_os = "android", target_os = "nacl"))]
fn do_broadcast(ifaddr : &posix_ifaddrs) -> IpAddr {
sockaddr_to_ipaddr(ifaddr.ifa_ifu)
.unwrap_or(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))
}
#[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "ios"))]
fn do_broadcast(ifaddr : &posix_ifaddrs) -> IpAddr {
sockaddr_to_ipaddr(ifaddr.ifa_dstaddr)
.unwrap_or(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))
}
/// Return a vector of IP details for all the valid interfaces on this host
#[allow(unsafe_code)]
pub fn getifaddrs() -> Vec<IfAddr> {
let mut ret = Vec::<IfAddr>::new();
let mut ifaddrs : *mut posix_ifaddrs;
unsafe {
ifaddrs = mem::uninitialized();
if -1 == posix_getifaddrs(&mut ifaddrs) {
panic!("failed to retrieve interface details from getifaddrs()");
}
}
let mut _ifaddr = ifaddrs;
let mut first = true;
while !_ifaddr.is_null() {
if first { first=false; }
else { _ifaddr = unsafe { (*_ifaddr).ifa_next }; }
if _ifaddr.is_null() { break; }
let ref ifaddr = unsafe { *_ifaddr };
// debug!("ifaddr1={}, next={}", _ifaddr as u64, ifaddr.ifa_next as u64);
if ifaddr.ifa_addr.is_null() {
continue;
}
let mut item = IfAddr::new();
let name = unsafe { CStr::from_ptr(ifaddr.ifa_name) }.to_bytes();
item.name = item.name + str::from_utf8(name).unwrap();
match sockaddr_to_ipaddr(ifaddr.ifa_addr) {
Some(a) => item.addr = a,
None => continue,
};
if let Some(a) = sockaddr_to_ipaddr(ifaddr.ifa_netmask) {
item.netmask = a
};
if (ifaddr.ifa_flags & 2 /*IFF_BROADCAST*/) != 0 {
item.broadcast = do_broadcast(ifaddr);
}
ret.push(item);
}
unsafe { posix_freeifaddrs(ifaddrs); }
ret
}
}
#[cfg(not(windows))]
pub fn getifaddrs() -> Vec<IfAddr> {
getifaddrs_posix::getifaddrs()
}
#[cfg(windows)]
mod getifaddrs_windows {
use super::IfAddr;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::{str, ptr};
use std::ffi::CStr;
use libc::types::common::c95::c_void;
use libc::types::os::arch::c95::{c_char, c_ulong, size_t, c_int };
use libc::types::os::arch::extra::*; // libc source code says this is all the Windows integral types
use libc::consts::os::extra::*; // win32 status code, constants etc
use libc::consts::os::bsd44::*; // the winsock constants
use libc::types::os::common::bsd44::*; // the winsock types
use libc;
#[repr(C)]
#[allow(bad_style)]
struct SOCKET_ADDRESS {
pub lpSockaddr : *const sockaddr,
pub iSockaddrLength : c_int,
}
#[repr(C)]
#[allow(bad_style)]
struct IP_ADAPTER_UNICAST_ADDRESS {
pub Length : c_ulong,
pub Flags : DWORD,
pub Next : *const IP_ADAPTER_UNICAST_ADDRESS,
pub Address : SOCKET_ADDRESS,
// Loads more follows, but I'm not bothering to map these for now
}
#[repr(C)]
#[allow(bad_style)]
struct IP_ADAPTER_PREFIX {
pub Length : c_ulong,
pub Flags : DWORD,
pub Next : *const IP_ADAPTER_PREFIX,
pub Address : SOCKET_ADDRESS,
pub PrefixLength : c_ulong,
}
#[repr(C)]
#[allow(bad_style)]
struct IP_ADAPTER_ADDRESSES {
pub Length : c_ulong,
pub IfIndex : DWORD,
pub Next : *const IP_ADAPTER_ADDRESSES,
pub AdapterName : *const c_char,
pub FirstUnicastAddress : *const IP_ADAPTER_UNICAST_ADDRESS,
FirstAnycastAddress : *const c_void,
FirstMulticastAddress : *const c_void,
FirstDnsServerAddress : *const c_void,
DnsSuffix : *const c_void,
Description : *const c_void,
FriendlyName : *const c_void,
PhysicalAddress : [c_char; 8],
PhysicalAddressLength : DWORD,
Flags : DWORD,
Mtu : DWORD,
IfType : DWORD,
OperStatus : c_int,
Ipv6IfIndex : DWORD,
ZoneIndices : [DWORD; 16],
pub FirstPrefix : *const IP_ADAPTER_PREFIX,
// Loads more follows, but I'm not bothering to map these for now
}
#[link(name="Iphlpapi")]
extern "system" {
pub fn GetAdaptersAddresses(family : c_ulong, flags : c_ulong, reserved : *const c_void, addresses : *const IP_ADAPTER_ADDRESSES, size : *mut c_ulong) -> c_ulong;
}
#[allow(unsafe_code)]
fn sockaddr_to_ipaddr(sockaddr : *const sockaddr) -> Option<IpAddr> {
if sockaddr.is_null() { return None }
if unsafe{*sockaddr}.sa_family as u32 == AF_INET as u32 {
let ref sa = unsafe{*(sockaddr as *const sockaddr_in)};
// Ignore all 169.254.x.x addresses as these are not active interfaces
if sa.sin_addr.s_addr & 65535 == 0xfea9 { return None }
Some(IpAddr::V4(Ipv4Addr::new(<|fim▁hole|> ((sa.sin_addr.s_addr>>0) & 255) as u8,
((sa.sin_addr.s_addr>>8) & 255) as u8,
((sa.sin_addr.s_addr>>16) & 255) as u8,
((sa.sin_addr.s_addr>>24) & 255) as u8,
)))
} else if unsafe{*sockaddr}.sa_family as u32 == AF_INET6 as u32 {
let ref sa = unsafe{*(sockaddr as *const sockaddr_in6)};
// Ignore all fe80:: addresses as these are link locals
if sa.sin6_addr.s6_addr[0]==0x80fe { return None }
Some(IpAddr::V6(Ipv6Addr::new(
((sa.sin6_addr.s6_addr[0] & 255)<<8) | ((sa.sin6_addr.s6_addr[0]>>8) & 255),
((sa.sin6_addr.s6_addr[1] & 255)<<8) | ((sa.sin6_addr.s6_addr[1]>>8) & 255),
((sa.sin6_addr.s6_addr[2] & 255)<<8) | ((sa.sin6_addr.s6_addr[2]>>8) & 255),
((sa.sin6_addr.s6_addr[3] & 255)<<8) | ((sa.sin6_addr.s6_addr[3]>>8) & 255),
((sa.sin6_addr.s6_addr[4] & 255)<<8) | ((sa.sin6_addr.s6_addr[4]>>8) & 255),
((sa.sin6_addr.s6_addr[5] & 255)<<8) | ((sa.sin6_addr.s6_addr[5]>>8) & 255),
((sa.sin6_addr.s6_addr[6] & 255)<<8) | ((sa.sin6_addr.s6_addr[6]>>8) & 255),
((sa.sin6_addr.s6_addr[7] & 255)<<8) | ((sa.sin6_addr.s6_addr[7]>>8) & 255),
)))
}
else { None }
}
// trivial_numeric_casts lint may become allow by default.
// Refer: https://github.com/rust-lang/rfcs/issues/1020
/// Return a vector of IP details for all the valid interfaces on this host
#[allow(unsafe_code, trivial_numeric_casts)]
pub fn getifaddrs() -> Vec<IfAddr> {
let mut ret = Vec::<IfAddr>::new();
let mut ifaddrs : *const IP_ADAPTER_ADDRESSES;
let mut buffersize : c_ulong = 15000;
loop {
unsafe {
ifaddrs = libc::malloc(buffersize as size_t) as *mut IP_ADAPTER_ADDRESSES;
if ifaddrs.is_null() {
panic!("Failed to allocate buffer in getifaddrs()");
}
let retcode = GetAdaptersAddresses(0,
0x3e /* GAA_FLAG_SKIP_ANYCAST|GAA_FLAG_SKIP_MULTICAST|GAA_FLAG_SKIP_DNS_SERVER|GAA_FLAG_INCLUDE_PREFIX|GAA_FLAG_SKIP_FRIENDLY_NAME */,
ptr::null(),
ifaddrs,
&mut buffersize) as c_int;
match retcode {
ERROR_SUCCESS => break,
111 /*ERROR_BUFFER_OVERFLOW*/ => {
libc::free(ifaddrs as *mut c_void);
buffersize = buffersize * 2;
continue
},
_ => panic!("GetAdaptersAddresses() failed with error code {}", retcode)
}
}
}
let mut _ifaddr = ifaddrs;
let mut first = true;
while !_ifaddr.is_null() {
if first { first=false; }
else { _ifaddr = unsafe { (*_ifaddr).Next }; }
if _ifaddr.is_null() { break; }
let ref ifaddr = unsafe { &*_ifaddr };
// debug!("ifaddr1={}, next={}", _ifaddr as u64, ifaddr.ifa_next as u64);
let mut addr = ifaddr.FirstUnicastAddress;
if addr.is_null() { continue; }
let mut firstaddr = true;
while !addr.is_null() {
if firstaddr { firstaddr=false; }
else { addr = unsafe { (*addr).Next }; }
if addr.is_null() { break; }
let mut item = IfAddr::new();
let name = unsafe { CStr::from_ptr(ifaddr.AdapterName) }.to_bytes();
item.name = item.name + str::from_utf8(name).unwrap();
let ipaddr = sockaddr_to_ipaddr(unsafe { (*addr).Address.lpSockaddr });
if !ipaddr.is_some() { continue; }
item.addr = ipaddr.unwrap();
// Search prefixes for a prefix matching addr
let mut prefix = ifaddr.FirstPrefix;
if !prefix.is_null() {
let mut firstprefix = true;
'prefixloop: while !prefix.is_null() {
if firstprefix { firstprefix=false; }
else { prefix = unsafe { (*prefix).Next }; }
if prefix.is_null() { break; }
let ipprefix = sockaddr_to_ipaddr(unsafe { (*prefix).Address.lpSockaddr });
if !ipprefix.is_some() { continue; }
match ipprefix.unwrap() {
IpAddr::V4(ref a) => {
if let IpAddr::V4(b) = item.addr {
let mut netmask : [u8; 4] = [0; 4];
for n in 0..unsafe{ (*prefix).PrefixLength as usize + 7 }/8 {
let x_byte = b.octets()[n];
let y_byte = a.octets()[n];
for m in 0..8 {
if (n * 8) + m > unsafe{ (*prefix).PrefixLength as usize } { break; }
let bit = 1 << m;
if (x_byte & bit) == (y_byte & bit) {
netmask[n] = netmask[n] | bit;
} else {
continue 'prefixloop;
}
}
}
item.netmask = IpAddr::V4(Ipv4Addr::new(
netmask[0],
netmask[1],
netmask[2],
netmask[3],
));
let mut broadcast : [u8; 4] = b.octets();
for n in 0..4 {
broadcast[n] = broadcast[n] | !netmask[n];
}
item.broadcast = IpAddr::V4(Ipv4Addr::new(
broadcast[0],
broadcast[1],
broadcast[2],
broadcast[3],
));
break 'prefixloop;
}
},
IpAddr::V6(ref a) => {
if let IpAddr::V6(b) = item.addr {
// Iterate the bits in the prefix, if they all match this prefix is the
// right one, else try the next prefix
let mut netmask : [u16; 8] = [0; 8];
for n in 0..unsafe{ (*prefix).PrefixLength as usize + 15 }/16 {
let x_word = b.segments()[n];
let y_word = a.segments()[n];
for m in 0..16 {
if (n * 16) + m > unsafe{ (*prefix).PrefixLength as usize } { break; }
let bit = 1 << m;
if (x_word & bit) == (y_word & bit) {
netmask[n] = netmask[n] | bit;
} else {
continue 'prefixloop;
}
}
}
item.netmask = IpAddr::V6(Ipv6Addr::new(
netmask[0],
netmask[1],
netmask[2],
netmask[3],
netmask[4],
netmask[5],
netmask[6],
netmask[7],
));
break 'prefixloop;
}
},
};
}
}
ret.push(item);
}
}
unsafe { libc::free(ifaddrs as *mut c_void); }
ret
}
}
#[cfg(windows)]
pub fn getifaddrs() -> Vec<IfAddr> {
getifaddrs_windows::getifaddrs()
}
fn is_loopback(if_addr: &IfAddr) -> bool {
match if_addr.addr {
IpAddr::V4(v4) => v4.is_loopback(),
IpAddr::V6(v6) => v6.is_loopback(),
}
}
/// Remove loopback address(s)
pub fn filter_loopback(mut ifaddrs: Vec<IfAddr>)->Vec<IfAddr> {
ifaddrs.retain(|x| !is_loopback(&x));
ifaddrs
}
#[cfg(test)]
mod test {
use super::{getifaddrs, filter_loopback};
use std::net::IpAddr;
#[test]
fn test_getifaddrs() {
let mut has_loopback4 = false;
let mut has_loopback6 = false;
for ifaddr in getifaddrs() {
debug!(" Interface {} has IP {} netmask {} broadcast {}", ifaddr.name,
ifaddr.addr, ifaddr.netmask, ifaddr.broadcast);
match ifaddr.addr {
IpAddr::V4(v4) => if v4.is_loopback() { has_loopback4=true; },
IpAddr::V6(v6) => if v6.is_loopback() { has_loopback6=true; },
}
}
// Quick sanity test, can't think of anything better
assert_eq!(has_loopback4 || has_loopback6, true);
}
#[test]
fn test_filter_loopback() {
let ifaddrs = filter_loopback(getifaddrs());
for ifaddr in ifaddrs {
debug!(" Interface {} has IP {} netmask {} broadcast {}", ifaddr.name,
ifaddr.addr, ifaddr.netmask, ifaddr.broadcast);
let is_loopback = match ifaddr.addr {
IpAddr::V4(v4) => v4.is_loopback(),
IpAddr::V6(v6) => v6.is_loopback(),
};
assert!(!is_loopback);
}
}
}<|fim▁end|> | |
<|file_name|>TextArea.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import { TMemo } from '@shared/components/TMemo';
import { Input, Form } from 'antd';
import type { FastFormFieldComponent } from '@shared/components/FastForm/field';
import { getValidateStatus } from '../utils';
export const FastFormTextArea: FastFormFieldComponent = TMemo((props) => {
const { name, label, value, onChange, error, maxLength, placeholder } = props;
return (
<Form.Item
label={label}
validateStatus={getValidateStatus(error)}
help={error}
>
<Input.TextArea
name={name}
rows={4}
maxLength={maxLength}
placeholder={placeholder}
value={value}
onChange={(e) => onChange(e.target.value)}
/>
</Form.Item>
);<|fim▁hole|><|fim▁end|> | });
FastFormTextArea.displayName = 'FastFormTextArea'; |
<|file_name|>CommentDialog.java<|end_file_name|><|fim▁begin|>package com.malalaoshi.android.ui.dialogs;
import android.content.Context;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.text.Editable;
import android.text.TextUtils;
import android.text.TextWatcher;
import android.view.LayoutInflater;
import android.view.View;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.RatingBar;
import android.widget.TextView;
import com.malalaoshi.android.R;
import com.malalaoshi.android.core.network.api.ApiExecutor;
import com.malalaoshi.android.core.network.api.BaseApiContext;
import com.malalaoshi.android.core.stat.StatReporter;
import com.malalaoshi.android.core.utils.MiscUtil;
import com.malalaoshi.android.entity.Comment;
import com.malalaoshi.android.network.Constants;
import com.malalaoshi.android.network.api.PostCommentApi;
import com.malalaoshi.android.ui.widgets.DoubleImageView;
import org.json.JSONException;
import org.json.JSONObject;
import butterknife.Bind;
import butterknife.ButterKnife;
import butterknife.OnClick;
/**
* Created by donald on 2017/6/29.
*/
public class CommentDialog extends BaseDialog {
private static String ARGS_DIALOG_COMMENT_TYPE = "comment type";
private static String ARGS_DIALOG_TEACHER_NAME = "teacher name";
private static String ARGS_DIALOG_TEACHER_AVATAR = "teacher avatar";
private static String ARGS_DIALOG_LECTURER_NAME = "lecturer name";
private static String ARGS_DIALOG_LECTURER_AVATAR = "lecturer avatar";
private static String ARGS_DIALOG_ASSIST_NAME = "assist name";
private static String ARGS_DIALOG_ASSIST_AVATAR = "assist avatar";
private static String ARGS_DIALOG_COURSE_NAME = "course name";
private static String ARGS_DIALOG_COMMENT = "comment";
private static String ARGS_DIALOG_TIMESLOT = "timeslot";
@Bind(R.id.div_comment_dialog_avatar)
DoubleImageView mDivCommentDialogAvatar;
@Bind(R.id.tv_comment_dialog_teacher_course)
TextView mTvCommentDialogTeacherCourse;
@Bind(R.id.rb_comment_dialog_score)
RatingBar mRbCommentDialogScore;<|fim▁hole|> EditText mEtCommentDialogInput;
@Bind(R.id.tv_comment_dialog_commit)
TextView mTvCommentDialogCommit;
@Bind(R.id.iv_comment_dialog_close)
ImageView mIvCommentDialogClose;
private int mCommentType;
private String mTeacherName;
private String mTeacherAvatar;
private String mLeactureAvatar;
private String mLeactureName;
private String mAssistantAvatar;
private String mAssistantName;
private String mCourseName;
private Comment mComment;
private long mTimeslot;
private OnCommentResultListener mResultListener;
public CommentDialog(Context context) {
super(context);
}
public CommentDialog(Context context, Bundle bundle) {
super(context);
if (bundle != null) {
mCommentType = bundle.getInt(ARGS_DIALOG_COMMENT_TYPE);
if (mCommentType == 0) {
mTeacherName = bundle.getString(ARGS_DIALOG_TEACHER_NAME, "");
mTeacherAvatar = bundle.getString(ARGS_DIALOG_TEACHER_AVATAR, "");
} else if (mCommentType == 1) {
mLeactureAvatar = bundle.getString(ARGS_DIALOG_LECTURER_AVATAR, "");
mLeactureName = bundle.getString(ARGS_DIALOG_LECTURER_NAME, "");
mAssistantAvatar = bundle.getString(ARGS_DIALOG_ASSIST_AVATAR, "");
mAssistantName = bundle.getString(ARGS_DIALOG_ASSIST_NAME, "");
}
mCourseName = bundle.getString(ARGS_DIALOG_COURSE_NAME, "");
mComment = bundle.getParcelable(ARGS_DIALOG_COMMENT);
mTimeslot = bundle.getLong(ARGS_DIALOG_TIMESLOT, 0L);
}
initView();
}
private void initView() {
setCancelable(false);
if (mCommentType == 0)
mDivCommentDialogAvatar.loadImg(mTeacherAvatar, "", DoubleImageView.LOAD_SIGNLE_BIG);
else if (mCommentType == 1)
mDivCommentDialogAvatar.loadImg(mLeactureAvatar, mAssistantAvatar, DoubleImageView.LOAD_DOUBLE);
if (mComment != null) {
StatReporter.commentPage(true);
updateUI(mComment);
mRbCommentDialogScore.setIsIndicator(true);
mEtCommentDialogInput.setFocusableInTouchMode(false);
mEtCommentDialogInput.setCursorVisible(false);
mTvCommentDialogCommit.setText("查看评价");
} else {
StatReporter.commentPage(false);
mTvCommentDialogCommit.setText("提交");
mRbCommentDialogScore.setIsIndicator(false);
mEtCommentDialogInput.setFocusableInTouchMode(true);
mEtCommentDialogInput.setCursorVisible(true);
}
mTvCommentDialogTeacherCourse.setText(mCourseName);
mRbCommentDialogScore.setOnRatingBarChangeListener(new RatingBar.OnRatingBarChangeListener() {
@Override
public void onRatingChanged(RatingBar ratingBar, float rating, boolean fromUser) {
if (mComment == null){
if (fromUser && rating > 0 && mEtCommentDialogInput.getText().length() > 0){
mTvCommentDialogCommit.setEnabled(true);
}else {
mTvCommentDialogCommit.setEnabled(false);
}
}
}
});
mEtCommentDialogInput.addTextChangedListener(new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence s, int start, int count, int after) {
}
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
}
@Override
public void afterTextChanged(Editable s) {
if (mComment == null){
if (s.length() > 0 && mRbCommentDialogScore.getRating() > 0){
mTvCommentDialogCommit.setEnabled(true);
}else {
mTvCommentDialogCommit.setEnabled(false);
}
}
}
});
}
@Override
protected View getView() {
View view = LayoutInflater.from(mContext).inflate(R.layout.dialog_comment_v2, null);
ButterKnife.bind(this, view);
return view;
}
private void updateUI(Comment comment) {
if (comment != null) {
mRbCommentDialogScore.setRating(comment.getScore());
mEtCommentDialogInput.setText(comment.getContent());
} else {
mRbCommentDialogScore.setRating(0);
mEtCommentDialogInput.setText("");
}
}
@Override
protected int getDialogStyleId() {
return 0;
}
public static CommentDialog newInstance(Context context, String lecturerName, String lecturerAvatarUrl, String assistName, String assistAvatarUrl, String courseName,
Long timeslot, Comment comment) {
Bundle args = new Bundle();
args.putInt(ARGS_DIALOG_COMMENT_TYPE, 1);
args.putString(ARGS_DIALOG_LECTURER_NAME, lecturerName);
args.putString(ARGS_DIALOG_LECTURER_AVATAR, lecturerAvatarUrl);
args.putString(ARGS_DIALOG_ASSIST_NAME, assistName);
args.putString(ARGS_DIALOG_ASSIST_AVATAR, assistAvatarUrl);
args.putString(ARGS_DIALOG_COURSE_NAME, courseName);
args.putParcelable(ARGS_DIALOG_COMMENT, comment);
args.putLong(ARGS_DIALOG_TIMESLOT, timeslot);
// f.setArguments(args);
CommentDialog f = new CommentDialog(context, args);
return f;
}
public void setOnCommentResultListener(OnCommentResultListener listener) {
mResultListener = listener;
}
public static CommentDialog newInstance(Context context, String teacherName, String teacherAvatarUrl, String courseName,
Long timeslot, Comment comment) {
Bundle args = new Bundle();
args.putInt(ARGS_DIALOG_COMMENT_TYPE, 0);
args.putString(ARGS_DIALOG_TEACHER_NAME, teacherName);
args.putString(ARGS_DIALOG_TEACHER_AVATAR, teacherAvatarUrl);
args.putString(ARGS_DIALOG_COURSE_NAME, courseName);
args.putParcelable(ARGS_DIALOG_COMMENT, comment);
args.putLong(ARGS_DIALOG_TIMESLOT, timeslot);
// f.setArguments(args);
CommentDialog f = new CommentDialog(context, args);
return f;
}
@OnClick({R.id.tv_comment_dialog_commit, R.id.iv_comment_dialog_close})
public void onViewClicked(View view) {
switch (view.getId()) {
case R.id.tv_comment_dialog_commit:
commit();
dismiss();
break;
case R.id.iv_comment_dialog_close:
dismiss();
break;
}
}
private void commit() {
StatReporter.commentSubmit();
if (mComment != null) {
dismiss();
return;
}
float score = mRbCommentDialogScore.getRating();
if (score == 0.0) {
MiscUtil.toast(R.string.rate_the_course);
return;
}
String content = mEtCommentDialogInput.getText().toString();
if (TextUtils.isEmpty(content)) {
MiscUtil.toast(R.string.write_few_reviews);
return;
}
JSONObject json = new JSONObject();
try {
json.put(Constants.TIMESLOT, mTimeslot);
json.put(Constants.SCORE, score);
json.put(Constants.CONTENT, content);
} catch (JSONException e) {
e.printStackTrace();
return;
}
ApiExecutor.exec(new PostCommentRequest(this, json.toString()));
}
public interface OnCommentResultListener {
void onSuccess(Comment comment);
}
private static final class PostCommentRequest extends BaseApiContext<CommentDialog, Comment> {
private String body;
public PostCommentRequest(CommentDialog commentDialog, String body) {
super(commentDialog);
this.body = body;
}
@Override
public Comment request() throws Exception {
return new PostCommentApi().post(body);
}
@Override
public void onApiSuccess(@NonNull Comment response) {
get().commentSucceed(response);
}
@Override
public void onApiFailure(Exception exception) {
get().commentFailed();
}
}
private void commentFailed() {
MiscUtil.toast(R.string.comment_failed);
}
private void commentSucceed(Comment response) {
MiscUtil.toast(R.string.comment_succeed);
if (mResultListener != null)
mResultListener.onSuccess(response);
dismiss();
}
}<|fim▁end|> | @Bind(R.id.et_comment_dialog_input) |
<|file_name|>users.py<|end_file_name|><|fim▁begin|>"""
:synopsis: user-centric views for askbot
This module includes all views that are specific to a given user - his or her profile,
and other views showing profile-related information.
Also this module includes the view listing all forum users.
"""
import calendar
import collections
import functools
import datetime
import logging
import operator
import urllib
from django.db.models import Count
from django.conf import settings as django_settings
from django.contrib.auth.decorators import login_required
from django.core import exceptions as django_exceptions
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseForbidden
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import ugettext as _
from django.utils import simplejson
from django.views.decorators import csrf
from askbot.utils.slug import slugify
from askbot.utils.html import sanitize_html
from askbot.mail import send_mail
from askbot.utils.http import get_request_info
from askbot.utils import functions
from askbot import forms
from askbot import const
from askbot.views import context as view_context
from askbot.conf import settings as askbot_settings
from askbot import models
from askbot import exceptions
from askbot.models.badges import award_badges_signal
from askbot.models.tag import format_personal_group_name
from askbot.search.state_manager import SearchState
from askbot.utils import url_utils
from askbot.utils.loading import load_module
def owner_or_moderator_required(f):
@functools.wraps(f)
def wrapped_func(request, profile_owner, context):
if profile_owner == request.user:
pass
elif request.user.is_authenticated() and request.user.can_moderate_user(profile_owner):
pass
else:
next_url = request.path + '?' + urllib.urlencode(request.REQUEST)
params = '?next=%s' % urllib.quote(next_url)
return HttpResponseRedirect(url_utils.get_login_url() + params)
return f(request, profile_owner, context)
return wrapped_func
def show_users(request, by_group=False, group_id=None, group_slug=None):
"""Users view, including listing of users by group"""
if askbot_settings.GROUPS_ENABLED and not by_group:
default_group = models.Group.objects.get_global_group()
group_slug = slugify(default_group.name)
new_url = reverse('users_by_group',
kwargs={'group_id': default_group.id,
'group_slug': group_slug})
return HttpResponseRedirect(new_url)
users = models.User.objects.exclude(status = 'b')
group = None
group_email_moderation_enabled = False
user_acceptance_level = 'closed'
user_membership_level = 'none'
if by_group == True:
if askbot_settings.GROUPS_ENABLED == False:
raise Http404
if group_id:
if all((group_id, group_slug)) == False:
return HttpResponseRedirect('groups')
else:
try:
group = models.Group.objects.get(id = group_id)
group_email_moderation_enabled = \
(
askbot_settings.GROUP_EMAIL_ADDRESSES_ENABLED \
and askbot_settings.ENABLE_CONTENT_MODERATION
)
user_acceptance_level = group.get_openness_level_for_user(
request.user
)
except models.Group.DoesNotExist:
raise Http404
if group_slug == slugify(group.name):
#filter users by full group memberships
#todo: refactor as Group.get_full_members()
full_level = models.GroupMembership.FULL
memberships = models.GroupMembership.objects.filter(
group=group, level=full_level
)
user_ids = memberships.values_list('user__id', flat=True)
users = users.filter(id__in=user_ids)
if request.user.is_authenticated():
membership = request.user.get_group_membership(group)
if membership:
user_membership_level = membership.get_level_display()
else:
group_page_url = reverse(
'users_by_group',
kwargs = {
'group_id': group.id,
'group_slug': slugify(group.name)
}
)
return HttpResponseRedirect(group_page_url)
is_paginated = True
sortby = request.GET.get('sort', 'reputation')
if askbot_settings.KARMA_MODE == 'private' and sortby == 'reputation':
sortby = 'newest'
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
search_query = request.GET.get('query', "")
if search_query == "":
if sortby == "newest":
order_by_parameter = '-date_joined'
elif sortby == "last":
order_by_parameter = 'date_joined'
elif sortby == "user":
order_by_parameter = 'username'
else:
# default
order_by_parameter = '-reputation'
objects_list = Paginator(
users.order_by(order_by_parameter),
const.USERS_PAGE_SIZE
)
base_url = request.path + '?sort=%s&' % sortby
else:
sortby = "reputation"
matching_users = models.get_users_by_text_query(search_query, users)
objects_list = Paginator(
matching_users.order_by('-reputation'),
const.USERS_PAGE_SIZE
)
base_url = request.path + '?name=%s&sort=%s&' % (search_query, sortby)
try:
users_page = objects_list.page(page)
except (EmptyPage, InvalidPage):
users_page = objects_list.page(objects_list.num_pages)
paginator_data = {
'is_paginated' : is_paginated,
'pages': objects_list.num_pages,
'page': page,
'has_previous': users_page.has_previous(),
'has_next': users_page.has_next(),
'previous': users_page.previous_page_number(),
'next': users_page.next_page_number(),
'base_url' : base_url
}
paginator_context = functions.setup_paginator(paginator_data) #
#todo: move to contexts
#extra context for the groups
if askbot_settings.GROUPS_ENABLED:
#todo: cleanup this branched code after groups are migrated to auth_group
user_groups = models.Group.objects.exclude_personal()
if len(user_groups) <= 1:
assert(user_groups[0].name == askbot_settings.GLOBAL_GROUP_NAME)
user_groups = None
group_openness_choices = models.Group().get_openness_choices()
else:
user_groups = None
group_openness_choices = None
data = {
'active_tab': 'users',
'page_class': 'users-page',
'users' : users_page,
'group': group,
'search_query' : search_query,
'tab_id' : sortby,
'paginator_context' : paginator_context,
'group_email_moderation_enabled': group_email_moderation_enabled,
'user_acceptance_level': user_acceptance_level,
'user_membership_level': user_membership_level,
'user_groups': user_groups,
'group_openness_choices': group_openness_choices
}
return render(request, 'users.html', data)
@csrf.csrf_protect
def user_moderate(request, subject, context):
"""user subview for moderation
"""
moderator = request.user
if moderator.is_authenticated() and not moderator.can_moderate_user(subject):
raise Http404
user_rep_changed = False
user_status_changed = False
message_sent = False
email_error_message = None
user_rep_form = forms.ChangeUserReputationForm()
send_message_form = forms.SendMessageForm()
if request.method == 'POST':
if 'change_status' in request.POST:
user_status_form = forms.ChangeUserStatusForm(
request.POST,
moderator = moderator,
subject = subject
)
if user_status_form.is_valid():
subject.set_status( user_status_form.cleaned_data['user_status'] )
user_status_changed = True
elif 'send_message' in request.POST:
send_message_form = forms.SendMessageForm(request.POST)
if send_message_form.is_valid():
subject_line = send_message_form.cleaned_data['subject_line']
body_text = send_message_form.cleaned_data['body_text']
try:
send_mail(
subject_line = subject_line,
body_text = body_text,
recipient_list = [subject.email],
headers={'Reply-to':moderator.email},
raise_on_failure = True
)
message_sent = True
except exceptions.EmailNotSent, e:
email_error_message = unicode(e)
send_message_form = forms.SendMessageForm()
else:
reputation_change_type = None
if 'subtract_reputation' in request.POST:
rep_change_type = 'subtract'
elif 'add_reputation' in request.POST:
rep_change_type = 'add'<|fim▁hole|> else:
raise Http404
user_rep_form = forms.ChangeUserReputationForm(request.POST)
if user_rep_form.is_valid():
rep_delta = user_rep_form.cleaned_data['user_reputation_delta']
comment = user_rep_form.cleaned_data['comment']
if rep_change_type == 'subtract':
rep_delta = -1 * rep_delta
moderator.moderate_user_reputation(
user = subject,
reputation_change = rep_delta,
comment = comment,
timestamp = datetime.datetime.now(),
)
#reset form to preclude accidentally repeating submission
user_rep_form = forms.ChangeUserReputationForm()
user_rep_changed = True
#need to re-initialize the form even if it was posted, because
#initial values will most likely be different from the previous
user_status_form = forms.ChangeUserStatusForm(
moderator = moderator,
subject = subject
)
data = {
'active_tab': 'users',
'page_class': 'user-profile-page',
'tab_name': 'moderation',
'tab_description': _('moderate this user'),
'page_title': _('moderate user'),
'change_user_status_form': user_status_form,
'change_user_reputation_form': user_rep_form,
'send_message_form': send_message_form,
'message_sent': message_sent,
'email_error_message': email_error_message,
'user_rep_changed': user_rep_changed,
'user_status_changed': user_status_changed
}
context.update(data)
return render(request, 'user_profile/user_moderate.html', context)
#non-view function
def set_new_email(user, new_email, nomessage=False):
if new_email != user.email:
user.email = new_email
user.email_isvalid = False
user.save()
#if askbot_settings.EMAIL_VALIDATION == True:
# send_new_email_key(user,nomessage=nomessage)
@login_required
@csrf.csrf_protect
def edit_user(request, id):
"""View that allows to edit user profile.
This view is accessible to profile owners or site administrators
"""
user = get_object_or_404(models.User, id=id)
if not(request.user == user or request.user.is_superuser):
raise Http404
if request.method == "POST":
form = forms.EditUserForm(user, request.POST)
if form.is_valid():
if 'email' in form.cleaned_data and askbot_settings.EDITABLE_EMAIL:
new_email = sanitize_html(form.cleaned_data['email'])
set_new_email(user, new_email)
if askbot_settings.EDITABLE_SCREEN_NAME:
new_username = sanitize_html(form.cleaned_data['username'])
if user.username != new_username:
group = user.get_personal_group()
user.username = new_username
group.name = format_personal_group_name(user)
group.save()
user.real_name = sanitize_html(form.cleaned_data['realname'])
user.website = sanitize_html(form.cleaned_data['website'])
user.location = sanitize_html(form.cleaned_data['city'])
user.date_of_birth = form.cleaned_data.get('birthday', None)
user.about = sanitize_html(form.cleaned_data['about'])
user.country = form.cleaned_data['country']
user.show_country = form.cleaned_data['show_country']
user.show_marked_tags = form.cleaned_data['show_marked_tags']
user.save()
# send user updated signal if full fields have been updated
award_badges_signal.send(None,
event = 'update_user_profile',
actor = user,
context_object = user
)
return HttpResponseRedirect(user.get_profile_url())
else:
form = forms.EditUserForm(user)
data = {
'active_tab': 'users',
'page_class': 'user-profile-edit-page',
'form' : form,
'marked_tags_setting': askbot_settings.MARKED_TAGS_ARE_PUBLIC_WHEN,
'support_custom_avatars': ('avatar' in django_settings.INSTALLED_APPS),
'view_user': user,
}
return render(request, 'user_profile/user_edit.html', data)
def user_stats(request, user, context):
question_filter = {}
if request.user != user:
question_filter['is_anonymous'] = False
if askbot_settings.ENABLE_CONTENT_MODERATION:
question_filter['approved'] = True
#
# Questions
#
questions = user.posts.get_questions(
user=request.user
).filter(
**question_filter
).order_by(
'-points', '-thread__last_activity_at'
).select_related(
'thread', 'thread__last_activity_by'
)[:100]
#added this if to avoid another query if questions is less than 100
if len(questions) < 100:
question_count = len(questions)
else:
question_count = user.posts.get_questions().filter(**question_filter).count()
#
# Top answers
#
top_answers = user.posts.get_answers(
request.user
).filter(
deleted=False,
thread__posts__deleted=False,
thread__posts__post_type='question',
).select_related(
'thread'
).order_by(
'-points', '-added_at'
)[:100]
top_answer_count = len(top_answers)
#
# Votes
#
up_votes = models.Vote.objects.get_up_vote_count_from_user(user)
down_votes = models.Vote.objects.get_down_vote_count_from_user(user)
votes_today = models.Vote.objects.get_votes_count_today_from_user(user)
votes_total = askbot_settings.MAX_VOTES_PER_USER_PER_DAY
#
# Tags
#
# INFO: There's bug in Django that makes the following query kind of broken (GROUP BY clause is problematic):
# http://stackoverflow.com/questions/7973461/django-aggregation-does-excessive-group-by-clauses
# Fortunately it looks like it returns correct results for the test data
user_tags = models.Tag.objects.filter(threads__posts__author=user).distinct().\
annotate(user_tag_usage_count=Count('threads')).\
order_by('-user_tag_usage_count')[:const.USER_VIEW_DATA_SIZE]
user_tags = list(user_tags) # evaluate
when = askbot_settings.MARKED_TAGS_ARE_PUBLIC_WHEN
if when == 'always' or \
(when == 'when-user-wants' and user.show_marked_tags == True):
#refactor into: user.get_marked_tag_names('good'/'bad'/'subscribed')
interesting_tag_names = user.get_marked_tag_names('good')
ignored_tag_names = user.get_marked_tag_names('bad')
subscribed_tag_names = user.get_marked_tag_names('subscribed')
else:
interesting_tag_names = None
ignored_tag_names = None
subscribed_tag_names = None
# tags = models.Post.objects.filter(author=user).values('id', 'thread', 'thread__tags')
# post_ids = set()
# thread_ids = set()
# tag_ids = set()
# for t in tags:
# post_ids.add(t['id'])
# thread_ids.add(t['thread'])
# tag_ids.add(t['thread__tags'])
# if t['thread__tags'] == 11:
# print t['thread'], t['id']
# import ipdb; ipdb.set_trace()
#
# Badges/Awards (TODO: refactor into Managers/QuerySets when a pattern emerges; Simplify when we get rid of Question&Answer models)
#
post_type = ContentType.objects.get_for_model(models.Post)
user_awards = models.Award.objects.filter(user=user).select_related('badge')
awarded_post_ids = []
for award in user_awards:
if award.content_type_id == post_type.id:
awarded_post_ids.append(award.object_id)
awarded_posts = models.Post.objects.filter(id__in=awarded_post_ids)\
.select_related('thread') # select related to avoid additional queries in Post.get_absolute_url()
awarded_posts_map = {}
for post in awarded_posts:
awarded_posts_map[post.id] = post
badges_dict = collections.defaultdict(list)
for award in user_awards:
# Fetch content object
if award.content_type_id == post_type.id:
#here we go around a possibility of awards
#losing the content objects when the content
#objects are deleted for some reason
awarded_post = awarded_posts_map.get(award.object_id, None)
if awarded_post is not None:
#protect from awards that are associated with deleted posts
award.content_object = awarded_post
award.content_object_is_post = True
else:
award.content_object_is_post = False
else:
award.content_object_is_post = False
# "Assign" to its Badge
badges_dict[award.badge].append(award)
badges = badges_dict.items()
badges.sort(key=operator.itemgetter(1), reverse=True)
user_groups = models.Group.objects.get_for_user(user = user)
user_groups = user_groups.exclude_personal()
global_group = models.Group.objects.get_global_group()
user_groups = user_groups.exclude(name=global_group.name)
if request.user == user:
groups_membership_info = user.get_groups_membership_info(user_groups)
else:
groups_membership_info = collections.defaultdict()
data = {
'active_tab':'users',
'page_class': 'user-profile-page',
'support_custom_avatars': ('avatar' in django_settings.INSTALLED_APPS),
'tab_name' : 'stats',
'tab_description' : _('user profile'),
'page_title' : _('user profile overview'),
'user_status_for_display': user.get_status_display(soft = True),
'questions' : questions,
'question_count': question_count,
'top_answers': top_answers,
'top_answer_count': top_answer_count,
'up_votes' : up_votes,
'down_votes' : down_votes,
'total_votes': up_votes + down_votes,
'votes_today_left': votes_total - votes_today,
'votes_total_per_day': votes_total,
'user_tags' : user_tags,
'user_groups': user_groups,
'groups_membership_info': groups_membership_info,
'interesting_tag_names': interesting_tag_names,
'ignored_tag_names': ignored_tag_names,
'subscribed_tag_names': subscribed_tag_names,
'badges': badges,
'total_badges' : len(badges),
}
context.update(data)
return render(request, 'user_profile/user_stats.html', context)
def user_recent(request, user, context):
def get_type_name(type_id):
for item in const.TYPE_ACTIVITY:
if type_id in item:
return item[1]
class Event(object):
is_badge = False
def __init__(self, time, type, title, summary, answer_id, question_id):
self.time = time
self.type = get_type_name(type)
self.type_id = type
self.title = title
self.summary = summary
slug_title = slugify(title)
self.title_link = reverse(
'question',
kwargs={'id':question_id}
) + u'%s' % slug_title
if int(answer_id) > 0:
self.title_link += '#%s' % answer_id
class AwardEvent(object):
is_badge = True
def __init__(self, time, type, content_object, badge):
self.time = time
self.type = get_type_name(type)
self.content_object = content_object
self.badge = badge
# TODO: Don't process all activities here for the user, only a subset ([:const.USER_VIEW_DATA_SIZE])
activity_types = (
const.TYPE_ACTIVITY_ASK_QUESTION,
const.TYPE_ACTIVITY_ANSWER,
const.TYPE_ACTIVITY_COMMENT_QUESTION,
const.TYPE_ACTIVITY_COMMENT_ANSWER,
const.TYPE_ACTIVITY_UPDATE_QUESTION,
const.TYPE_ACTIVITY_UPDATE_ANSWER,
const.TYPE_ACTIVITY_MARK_ANSWER,
const.TYPE_ACTIVITY_PRIZE
)
#source of information about activities
activity_objects = models.Activity.objects.filter(
user=user,
activity_type__in=activity_types
)[:const.USER_VIEW_DATA_SIZE]
#a list of digest objects, suitable for display
#the number of activities to show is not guaranteed to be
#const.USER_VIEW_DATA_TYPE, because we don't show activity
#for deleted content
activities = []
for activity in activity_objects:
# TODO: multi-if means that we have here a construct for which a design pattern should be used
# ask questions
if activity.activity_type == const.TYPE_ACTIVITY_ASK_QUESTION:
question = activity.content_object
if not question.deleted:
activities.append(Event(
time=activity.active_at,
type=activity.activity_type,
title=question.thread.title,
summary='', #q.summary, # TODO: was set to '' before, but that was probably wrong
answer_id=0,
question_id=question.id
))
elif activity.activity_type == const.TYPE_ACTIVITY_ANSWER:
ans = activity.content_object
question = ans.thread._question_post()
if not ans.deleted and not question.deleted:
activities.append(Event(
time=activity.active_at,
type=activity.activity_type,
title=ans.thread.title,
summary=question.summary,
answer_id=ans.id,
question_id=question.id
))
elif activity.activity_type == const.TYPE_ACTIVITY_COMMENT_QUESTION:
cm = activity.content_object
q = cm.parent
#assert q.is_question(): todo the activity types may be wrong
if not q.deleted:
activities.append(Event(
time=cm.added_at,
type=activity.activity_type,
title=q.thread.title,
summary='',
answer_id=0,
question_id=q.id
))
elif activity.activity_type == const.TYPE_ACTIVITY_COMMENT_ANSWER:
cm = activity.content_object
ans = cm.parent
#assert ans.is_answer()
question = ans.thread._question_post()
if not ans.deleted and not question.deleted:
activities.append(Event(
time=cm.added_at,
type=activity.activity_type,
title=ans.thread.title,
summary='',
answer_id=ans.id,
question_id=question.id
))
elif activity.activity_type == const.TYPE_ACTIVITY_UPDATE_QUESTION:
q = activity.content_object
if not q.deleted:
activities.append(Event(
time=activity.active_at,
type=activity.activity_type,
title=q.thread.title,
summary=q.summary,
answer_id=0,
question_id=q.id
))
elif activity.activity_type == const.TYPE_ACTIVITY_UPDATE_ANSWER:
ans = activity.content_object
question = ans.thread._question_post()
if not ans.deleted and not question.deleted:
activities.append(Event(
time=activity.active_at,
type=activity.activity_type,
title=ans.thread.title,
summary=ans.summary,
answer_id=ans.id,
question_id=question.id
))
elif activity.activity_type == const.TYPE_ACTIVITY_MARK_ANSWER:
ans = activity.content_object
question = ans.thread._question_post()
if not ans.deleted and not question.deleted:
activities.append(Event(
time=activity.active_at,
type=activity.activity_type,
title=ans.thread.title,
summary='',
answer_id=0,
question_id=question.id
))
elif activity.activity_type == const.TYPE_ACTIVITY_PRIZE:
award = activity.content_object
if award is not None:#todo: work around halfa$$ comment deletion
activities.append(AwardEvent(
time=award.awarded_at,
type=activity.activity_type,
content_object=award.content_object,
badge=award.badge,
))
activities.sort(key=operator.attrgetter('time'), reverse=True)
data = {
'active_tab': 'users',
'page_class': 'user-profile-page',
'tab_name' : 'recent',
'tab_description' : _('recent user activity'),
'page_title' : _('profile - recent activity'),
'activities' : activities
}
context.update(data)
return render(request, 'user_profile/user_recent.html', context)
#not a view - no direct url route here, called by `user_responses`
@csrf.csrf_protect
def show_group_join_requests(request, user, context):
"""show group join requests to admins who belong to the group"""
if request.user.is_administrator_or_moderator() is False:
raise Http404
#get group to which user belongs
groups = request.user.get_groups()
#construct a dictionary group id --> group object
#to avoid loading group via activity content object
groups_dict = dict([(group.id, group) for group in groups])
#get join requests for those groups
group_content_type = ContentType.objects.get_for_model(models.Group)
join_requests = models.Activity.objects.filter(
activity_type=const.TYPE_ACTIVITY_ASK_TO_JOIN_GROUP,
content_type=group_content_type,
object_id__in=groups_dict.keys()
).order_by('-active_at')
data = {
'active_tab':'users',
'page_class': 'user-profile-page',
'tab_name' : 'join_requests',
'tab_description' : _('group joining requests'),
'page_title' : _('profile - moderation'),
'groups_dict': groups_dict,
'join_requests': join_requests
}
context.update(data)
return render(request, 'user_inbox/group_join_requests.html', context)
@owner_or_moderator_required
def user_responses(request, user, context):
"""
We list answers for question, comments, and
answer accepted by others for this user.
as well as mentions of the user
user - the profile owner
the view has two sub-views - "forum" - i.e. responses
and "flags" - moderation items for mods only
"""
#0) temporary, till urls are fixed: update context
# to contain response counts for all sub-sections
context.update(view_context.get_for_inbox(request.user))
#1) select activity types according to section
section = request.GET.get('section', 'forum')
if section == 'flags' and not\
(request.user.is_moderator() or request.user.is_administrator()):
raise Http404
if section == 'forum':
activity_types = const.RESPONSE_ACTIVITY_TYPES_FOR_DISPLAY
activity_types += (const.TYPE_ACTIVITY_MENTION,)
elif section == 'flags':
activity_types = (const.TYPE_ACTIVITY_MARK_OFFENSIVE,)
if askbot_settings.ENABLE_CONTENT_MODERATION:
activity_types += (
const.TYPE_ACTIVITY_MODERATED_NEW_POST,
const.TYPE_ACTIVITY_MODERATED_POST_EDIT
)
elif section == 'join_requests':
return show_group_join_requests(request, user, context)
elif section == 'messages':
if request.user != user:
raise Http404
from group_messaging.views import SendersList, ThreadsList
context.update(SendersList().get_context(request))
context.update(ThreadsList().get_context(request))
data = {
'inbox_threads_count': context['threads_count'],#a hackfor the inbox count
'active_tab':'users',
'page_class': 'user-profile-page',
'tab_name' : 'inbox',
'inbox_section': section,
'tab_description' : _('private messages'),
'page_title' : _('profile - messages')
}
context.update(data)
if 'thread_id' in request.GET:
from group_messaging.models import Message
from group_messaging.views import ThreadDetails
try:
thread_id = request.GET['thread_id']
context.update(ThreadDetails().get_context(request, thread_id))
context['group_messaging_template_name'] = \
'group_messaging/home_thread_details.html'
except Message.DoesNotExist:
raise Http404
else:
context['group_messaging_template_name'] = 'group_messaging/home.html'
#here we take shortcut, because we don't care about
#all the extra context loaded below
return render(request, 'user_inbox/messages.html', context)
else:
raise Http404
#2) load the activity notifications according to activity types
#todo: insert pagination code here
memo_set = request.user.get_notifications(activity_types)
memo_set = memo_set.select_related(
'activity',
'activity__content_type',
'activity__question__thread',
'activity__user',
'activity__user__gravatar',
).order_by(
'-activity__active_at'
)[:const.USER_VIEW_DATA_SIZE]
#3) "package" data for the output
response_list = list()
for memo in memo_set:
if memo.activity.content_object is None:
continue#a temp plug due to bug in the comment deletion
response = {
'id': memo.id,
'timestamp': memo.activity.active_at,
'user': memo.activity.user,
'is_new': memo.is_new(),
'response_url': memo.activity.get_absolute_url(),
'response_snippet': memo.activity.get_snippet(),
'response_title': memo.activity.question.thread.title,
'response_type': memo.activity.get_activity_type_display(),
'response_id': memo.activity.question.id,
'nested_responses': [],
'response_content': memo.activity.content_object.html,
}
response_list.append(response)
#4) sort by response id
response_list.sort(lambda x,y: cmp(y['response_id'], x['response_id']))
#5) group responses by thread (response_id is really the question post id)
last_response_id = None #flag to know if the response id is different
filtered_response_list = list()
for i, response in enumerate(response_list):
#todo: group responses by the user as well
if response['response_id'] == last_response_id:
original_response = dict.copy(filtered_response_list[len(filtered_response_list)-1])
original_response['nested_responses'].append(response)
filtered_response_list[len(filtered_response_list)-1] = original_response
else:
filtered_response_list.append(response)
last_response_id = response['response_id']
#6) sort responses by time
filtered_response_list.sort(lambda x,y: cmp(y['timestamp'], x['timestamp']))
reject_reasons = models.PostFlagReason.objects.all().order_by('title')
data = {
'active_tab':'users',
'page_class': 'user-profile-page',
'tab_name' : 'inbox',
'inbox_section': section,
'tab_description' : _('comments and answers to others questions'),
'page_title' : _('profile - responses'),
'post_reject_reasons': reject_reasons,
'responses' : filtered_response_list,
}
context.update(data)
return render(request, 'user_inbox/responses_and_flags.html', context)
def user_network(request, user, context):
if 'followit' not in django_settings.INSTALLED_APPS:
raise Http404
data = {
'tab_name': 'network',
'followed_users': user.get_followed_users(),
'followers': user.get_followers(),
}
context.update(data)
return render(request, 'user_profile/user_network.html', context)
@owner_or_moderator_required
def user_votes(request, user, context):
all_votes = list(models.Vote.objects.filter(user=user))
votes = []
for vote in all_votes:
post = vote.voted_post
if post.is_question():
vote.title = post.thread.title
vote.question_id = post.id
vote.answer_id = 0
votes.append(vote)
elif post.is_answer():
vote.title = post.thread.title
vote.question_id = post.thread._question_post().id
vote.answer_id = post.id
votes.append(vote)
votes.sort(key=operator.attrgetter('id'), reverse=True)
data = {
'active_tab':'users',
'page_class': 'user-profile-page',
'tab_name' : 'votes',
'tab_description' : _('user vote record'),
'page_title' : _('profile - votes'),
'votes' : votes[:const.USER_VIEW_DATA_SIZE]
}
context.update(data)
return render(request, 'user_profile/user_votes.html', context)
def user_reputation(request, user, context):
reputes = models.Repute.objects.filter(user=user).select_related('question', 'question__thread', 'user').order_by('-reputed_at')
# prepare data for the graph - last values go in first
rep_list = ['[%s,%s]' % (calendar.timegm(datetime.datetime.now().timetuple()) * 1000, user.reputation)]
for rep in reputes:
rep_list.append('[%s,%s]' % (calendar.timegm(rep.reputed_at.timetuple()) * 1000, rep.reputation))
reps = ','.join(rep_list)
reps = '[%s]' % reps
data = {
'active_tab':'users',
'page_class': 'user-profile-page',
'tab_name': 'reputation',
'tab_description': _('user karma'),
'page_title': _("Profile - User's Karma"),
'reputation': reputes,
'reps': reps
}
context.update(data)
return render(request, 'user_profile/user_reputation.html', context)
def user_favorites(request, user, context):
favorite_threads = user.user_favorite_questions.values_list('thread', flat=True)
questions = models.Post.objects.filter(post_type='question', thread__in=favorite_threads)\
.select_related('thread', 'thread__last_activity_by')\
.order_by('-points', '-thread__last_activity_at')[:const.USER_VIEW_DATA_SIZE]
data = {
'active_tab':'users',
'page_class': 'user-profile-page',
'tab_name' : 'favorites',
'tab_description' : _('users favorite questions'),
'page_title' : _('profile - favorite questions'),
'questions' : questions,
}
context.update(data)
return render(request, 'user_profile/user_favorites.html', context)
@csrf.csrf_protect
def user_select_languages(request, id=None, slug=None):
if request.method != 'POST':
raise django_exceptions.PermissionDenied
user = get_object_or_404(models.User, id=id)
if not(request.user.id == user.id or request.user.is_administrator()):
raise django_exceptions.PermissionDenied
languages = request.POST.getlist('languages')
user.languages = ' '.join(languages)
user.save()
redirect_url = reverse(
'user_subscriptions',
kwargs={'id': user.id, 'slug': slugify(user.username)}
)
return HttpResponseRedirect(redirect_url)
@owner_or_moderator_required
@csrf.csrf_protect
def user_email_subscriptions(request, user, context):
logging.debug(get_request_info(request))
if request.method == 'POST':
email_feeds_form = forms.EditUserEmailFeedsForm(request.POST)
tag_filter_form = forms.TagFilterSelectionForm(request.POST, instance=user)
if email_feeds_form.is_valid() and tag_filter_form.is_valid():
action_status = None
tag_filter_saved = tag_filter_form.save()
if tag_filter_saved:
action_status = _('changes saved')
if 'save' in request.POST:
feeds_saved = email_feeds_form.save(user)
if feeds_saved:
action_status = _('changes saved')
elif 'stop_email' in request.POST:
email_stopped = email_feeds_form.reset().save(user)
initial_values = forms.EditUserEmailFeedsForm.NO_EMAIL_INITIAL
email_feeds_form = forms.EditUserEmailFeedsForm(initial=initial_values)
if email_stopped:
action_status = _('email updates canceled')
else:
#user may have been created by some app that does not know
#about the email subscriptions, in that case the call below
#will add any subscription settings that are missing
#using the default frequencies
user.add_missing_askbot_subscriptions()
#initialize the form
email_feeds_form = forms.EditUserEmailFeedsForm()
email_feeds_form.set_initial_values(user)
tag_filter_form = forms.TagFilterSelectionForm(instance=user)
action_status = None
data = {
'active_tab': 'users',
'subscribed_tag_names': user.get_marked_tag_names('subscribed'),
'page_class': 'user-profile-page',
'tab_name': 'email_subscriptions',
'tab_description': _('email subscription settings'),
'page_title': _('profile - email subscriptions'),
'email_feeds_form': email_feeds_form,
'tag_filter_selection_form': tag_filter_form,
'action_status': action_status,
'user_languages': user.languages.split()
}
context.update(data)
return render(
request,
'user_profile/user_email_subscriptions.html',
context
)
@csrf.csrf_protect
def user_custom_tab(request, user, context):
"""works only if `ASKBOT_CUSTOM_USER_PROFILE_TAB`
setting in the ``settings.py`` is properly configured"""
tab_settings = django_settings.ASKBOT_CUSTOM_USER_PROFILE_TAB
module_path = tab_settings['CONTENT_GENERATOR']
content_generator = load_module(module_path)
page_title = _('profile - %(section)s') % \
{'section': tab_settings['NAME']}
context.update({
'custom_tab_content': content_generator(request, user),
'tab_name': tab_settings['SLUG'],
'page_title': page_title
})
return render(request, 'user_profile/custom_tab.html', context)
USER_VIEW_CALL_TABLE = {
'stats': user_stats,
'recent': user_recent,
'inbox': user_responses,
'network': user_network,
'reputation': user_reputation,
'favorites': user_favorites,
'votes': user_votes,
'email_subscriptions': user_email_subscriptions,
'moderation': user_moderate,
}
CUSTOM_TAB = getattr(django_settings, 'ASKBOT_CUSTOM_USER_PROFILE_TAB', None)
if CUSTOM_TAB:
CUSTOM_SLUG = CUSTOM_TAB['SLUG']
USER_VIEW_CALL_TABLE[CUSTOM_SLUG] = user_custom_tab
#todo: rename this function - variable named user is everywhere
def user(request, id, slug=None, tab_name=None):
"""Main user view function that works as a switchboard
id - id of the profile owner
todo: decide what to do with slug - it is not used
in the code in any way
"""
profile_owner = get_object_or_404(models.User, id = id)
if not tab_name:
tab_name = request.GET.get('sort', 'stats')
if askbot_settings.KARMA_MODE == 'public':
can_show_karma = True
elif askbot_settings.KARMA_MODE == 'hidden':
can_show_karma = False
else:
if request.user.is_anonymous():
can_show_karma = False
elif request.user.is_administrator_or_moderator():
can_show_karma = True
elif request.user == profile_owner:
can_show_karma = True
else:
can_show_karma = False
if can_show_karma == False and tab_name == 'reputation':
raise Http404
user_view_func = USER_VIEW_CALL_TABLE.get(tab_name, user_stats)
search_state = SearchState( # Non-default SearchState with user data set
scope=None,
sort=None,
query=None,
tags=None,
author=profile_owner.id,
page=None,
user_logged_in=profile_owner.is_authenticated(),
)
context = {
'view_user': profile_owner,
'can_show_karma': can_show_karma,
'search_state': search_state,
'user_follow_feature_on': ('followit' in django_settings.INSTALLED_APPS),
}
if CUSTOM_TAB:
context['custom_tab_name'] = CUSTOM_TAB['NAME']
context['custom_tab_slug'] = CUSTOM_TAB['SLUG']
return user_view_func(request, profile_owner, context)
@csrf.csrf_exempt
def update_has_custom_avatar(request):
"""updates current avatar type data for the user
"""
if request.is_ajax() and request.user.is_authenticated():
if request.user.avatar_type in ('n', 'g'):
request.user.update_avatar_type()
request.session['avatar_data_updated_at'] = datetime.datetime.now()
return HttpResponse(simplejson.dumps({'status':'ok'}), mimetype='application/json')
return HttpResponseForbidden()
def groups(request, id = None, slug = None):
"""output groups page
"""
if askbot_settings.GROUPS_ENABLED == False:
raise Http404
#6 lines of input cleaning code
if request.user.is_authenticated():
scope = request.GET.get('sort', 'all-groups')
if scope not in ('all-groups', 'my-groups'):
scope = 'all-groups'
else:
scope = 'all-groups'
if scope == 'all-groups':
groups = models.Group.objects.all()
else:
groups = models.Group.objects.get_for_user(
user=request.user
)
groups = groups.exclude_personal()
groups = groups.annotate(users_count=Count('user'))
user_can_add_groups = request.user.is_authenticated() and \
request.user.is_administrator_or_moderator()
groups_membership_info = collections.defaultdict()
if request.user.is_authenticated():
#collect group memberhship information
groups_membership_info = request.user.get_groups_membership_info(groups)
data = {
'groups': groups,
'groups_membership_info': groups_membership_info,
'user_can_add_groups': user_can_add_groups,
'active_tab': 'groups',#todo vars active_tab and tab_name are too similar
'tab_name': scope,
'page_class': 'groups-page'
}
return render(request, 'groups.html', data)<|fim▁end|> | |
<|file_name|>grid.locale-da.js<|end_file_name|><|fim▁begin|>;(function($){
/**
* jqGrid Danish Translation
* Aesiras A/S
* http://www.aesiras.dk
* Dual licensed under the MIT and GPL licenses:
* http://www.opensource.org/licenses/mit-license.php
* http://www.gnu.org/licenses/gpl.html
**/
$.jgrid = $.jgrid || {};
$.extend($.jgrid,{
defaults : {
recordtext: "Vis {0} - {1} of {2}",
emptyrecords: "Ingen linjer fundet",
loadtext: "Henter...",
pgtext : "Side {0} af {1}"
},
search : {
caption: "Søg...",
Find: "Find",
Reset: "Nulstil",
odata: [{ oper:'eq', text:"lig"},{ oper:'ne', text:"forskellige fra"},{ oper:'lt', text:"mindre"},{ oper:'le', text:"mindre eller lig"},{ oper:'gt', text:"større"},{ oper:'ge', text:"større eller lig"},{ oper:'bw', text:"begynder med"},{ oper:'bn', text:"begynder ikke med"},{ oper:'in', text:"findes i"},{ oper:'ni', text:"findes ikke i"},{ oper:'ew', text:"ender med"},{ oper:'en', text:"ender ikke med"},{ oper:'cn', text:"indeholder"},{ oper:'nc', text:"indeholder ikke"},{ oper:'nu', text:'is null'},{ oper:'nn', text:'is not null'}],
groupOps: [ { op: "AND", text: "all" }, { op: "OR", text: "any" } ],
operandTitle : "Click to select search operation.",
resetTitle : "Reset Search Value"
},
edit : {
addCaption: "Tilføj",
editCaption: "Ret",
bSubmit: "Send",
bCancel: "Annuller",
bClose: "Luk",
saveData: "Data er ændret. Gem data?",
bYes : "Ja",
bNo : "Nej",
bExit : "Fortryd",
msg: {
required:"Felt er nødvendigt",
number:"Indtast venligst et validt tal",
minValue:"værdi skal være større end eller lig med",
maxValue:"værdi skal være mindre end eller lig med",
email: "er ikke en gyldig email",
integer: "Indtast venligst et gyldigt heltal",
date: "Indtast venligst en gyldig datoværdi",
url: "er ugyldig URL. Prefix mangler ('http://' or 'https://')",
nodefined : " er ikke defineret!",
novalue : " returværdi kræves!",
customarray : "Custom function should return array!",
customfcheck : "Custom function should be present in case of custom checking!"
}
},
view : {
caption: "Vis linje",
bClose: "Luk"
},
del : {
caption: "Slet",
msg: "Slet valgte linje(r)?",
bSubmit: "Slet",
bCancel: "Fortryd"
},
nav : {
edittext: " ",
edittitle: "Rediger valgte linje",
addtext:" ",
addtitle: "Tilføj ny linje",
deltext: " ",
deltitle: "Slet valgte linje",
searchtext: " ",
searchtitle: "Find linjer",
refreshtext: "",
refreshtitle: "Indlæs igen",
alertcap: "Advarsel",
alerttext: "Vælg venligst linje",
viewtext: "",
viewtitle: "Vis valgte linje"
},
col : {<|fim▁hole|> bSubmit: "Opdatere",
bCancel: "Fortryd"
},
errors : {
errcap : "Fejl",
nourl : "Ingen url valgt",
norecords: "Ingen linjer at behandle",
model : "colNames og colModel har ikke samme længde!"
},
formatter : {
integer : {thousandsSeparator: " ", defaultValue: '0'},
number : {decimalSeparator:",", thousandsSeparator: " ", decimalPlaces: 2, defaultValue: '0,00'},
currency : {decimalSeparator:",", thousandsSeparator: " ", decimalPlaces: 2, prefix: "", suffix:"", defaultValue: '0,00'},
date : {
dayNames: [
"Søn", "Man", "Tir", "Ons", "Tor", "Fre", "Lør",
"Søndag", "Mandag", "Tirsdag", "Onsdag", "Torsdag", "Fredag", "Lørdag"
],
monthNames: [
"Jan", "Feb", "Mar", "Apr", "Maj", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dec",
"Januar", "Februar", "Marts", "April", "Maj", "Juni", "Juli", "August", "September", "Oktober", "November", "December"
],
AmPm : ["","","",""],
S: function (j) {return '.'},
srcformat: 'Y-m-d',
newformat: 'd/m/Y',
parseRe : /[#%\\\/:_;.,\t\s-]/,
masks : {
ISO8601Long:"Y-m-d H:i:s",
ISO8601Short:"Y-m-d",
ShortDate: "j/n/Y",
LongDate: "l d. F Y",
FullDateTime: "l d F Y G:i:s",
MonthDay: "d. F",
ShortTime: "G:i",
LongTime: "G:i:s",
SortableDateTime: "Y-m-d\\TH:i:s",
UniversalSortableDateTime: "Y-m-d H:i:sO",
YearMonth: "F Y"
},
reformatAfterEdit : false
},
baseLinkUrl: '',
showAction: '',
target: '',
checkbox : {disabled:true},
idName : 'id'
}
});
// DA
})(jQuery);<|fim▁end|> | caption: "Vis/skjul kolonner", |
<|file_name|>ru_bcv_parser.js<|end_file_name|><|fim▁begin|>// Generated by CoffeeScript 1.9.1
(function() {
var bcv_parser, bcv_passage, bcv_utils, root,
hasProp = {}.hasOwnProperty;
root = this;
bcv_parser = (function() {
bcv_parser.prototype.s = "";
bcv_parser.prototype.entities = [];
bcv_parser.prototype.passage = null;
bcv_parser.prototype.regexps = {};
bcv_parser.prototype.options = {
consecutive_combination_strategy: "combine",
osis_compaction_strategy: "b",
book_sequence_strategy: "ignore",
invalid_sequence_strategy: "ignore",
sequence_combination_strategy: "combine",
invalid_passage_strategy: "ignore",
non_latin_digits_strategy: "ignore",
passage_existence_strategy: "bcv",
zero_chapter_strategy: "error",
zero_verse_strategy: "error",
book_alone_strategy: "ignore",
book_range_strategy: "ignore",
captive_end_digits_strategy: "delete",
end_range_digits_strategy: "verse",
include_apocrypha: false,
ps151_strategy: "c",
versification_system: "default",
case_sensitive: "none"
};
function bcv_parser() {
var key, ref, val;
this.options = {};
ref = bcv_parser.prototype.options;
for (key in ref) {
if (!hasProp.call(ref, key)) continue;
val = ref[key];
this.options[key] = val;
}
this.versification_system(this.options.versification_system);
}
bcv_parser.prototype.parse = function(s) {
var ref;
this.reset();
this.s = s;
s = this.replace_control_characters(s);
ref = this.match_books(s), s = ref[0], this.passage.books = ref[1];
this.entities = this.match_passages(s)[0];
return this;
};
bcv_parser.prototype.parse_with_context = function(s, context) {
var entities, ref, ref1, ref2;
this.reset();
ref = this.match_books(this.replace_control_characters(context)), context = ref[0], this.passage.books = ref[1];
ref1 = this.match_passages(context), entities = ref1[0], context = ref1[1];
this.reset();
this.s = s;
s = this.replace_control_characters(s);
ref2 = this.match_books(s), s = ref2[0], this.passage.books = ref2[1];
this.passage.books.push({
value: "",
parsed: [],
start_index: 0,
type: "context",
context: context
});
s = "\x1f" + (this.passage.books.length - 1) + "/9\x1f" + s;
this.entities = this.match_passages(s)[0];
return this;
};
bcv_parser.prototype.reset = function() {
this.s = "";
this.entities = [];
if (this.passage) {
this.passage.books = [];
return this.passage.indices = {};
} else {
this.passage = new bcv_passage;
this.passage.options = this.options;
return this.passage.translations = this.translations;
}
};
bcv_parser.prototype.set_options = function(options) {
var key, val;
for (key in options) {
if (!hasProp.call(options, key)) continue;
val = options[key];
if (key === "include_apocrypha" || key === "versification_system" || key === "case_sensitive") {
this[key](val);
} else {
this.options[key] = val;
}
}
return this;
};
bcv_parser.prototype.include_apocrypha = function(arg) {
var base, base1, ref, translation, verse_count;
if (!((arg != null) && (arg === true || arg === false))) {
return this;
}
this.options.include_apocrypha = arg;
this.regexps.books = this.regexps.get_books(arg, this.options.case_sensitive);
ref = this.translations;
for (translation in ref) {
if (!hasProp.call(ref, translation)) continue;
if (translation === "aliases" || translation === "alternates") {
continue;
}
if ((base = this.translations[translation]).chapters == null) {
base.chapters = {};
}
if ((base1 = this.translations[translation].chapters)["Ps"] == null) {
base1["Ps"] = bcv_utils.shallow_clone_array(this.translations["default"].chapters["Ps"]);
}
if (arg === true) {
if (this.translations[translation].chapters["Ps151"] != null) {
verse_count = this.translations[translation].chapters["Ps151"][0];
} else {
verse_count = this.translations["default"].chapters["Ps151"][0];
}
this.translations[translation].chapters["Ps"][150] = verse_count;
} else {
if (this.translations[translation].chapters["Ps"].length === 151) {
this.translations[translation].chapters["Ps"].pop();
}
}
}
return this;
};
bcv_parser.prototype.versification_system = function(system) {
var base, base1, base2, book, chapter_list, ref, ref1;
if (!((system != null) && (this.translations[system] != null))) {
return this;
}
if (this.translations.alternates["default"] != null) {
if (system === "default") {
if (this.translations.alternates["default"].order != null) {
this.translations["default"].order = bcv_utils.shallow_clone(this.translations.alternates["default"].order);
}
ref = this.translations.alternates["default"].chapters;
for (book in ref) {
if (!hasProp.call(ref, book)) continue;
chapter_list = ref[book];
this.translations["default"].chapters[book] = bcv_utils.shallow_clone_array(chapter_list);
}
} else {
this.versification_system("default");
}
}
if ((base = this.translations.alternates)["default"] == null) {
base["default"] = {
order: null,
chapters: {}
};
}
if (system !== "default" && (this.translations[system].order != null)) {
if ((base1 = this.translations.alternates["default"]).order == null) {
base1.order = bcv_utils.shallow_clone(this.translations["default"].order);
}
this.translations["default"].order = bcv_utils.shallow_clone(this.translations[system].order);
}
if (system !== "default" && (this.translations[system].chapters != null)) {
ref1 = this.translations[system].chapters;
for (book in ref1) {
if (!hasProp.call(ref1, book)) continue;
chapter_list = ref1[book];
if ((base2 = this.translations.alternates["default"].chapters)[book] == null) {
base2[book] = bcv_utils.shallow_clone_array(this.translations["default"].chapters[book]);
}
this.translations["default"].chapters[book] = bcv_utils.shallow_clone_array(chapter_list);
}
}
this.options.versification_system = system;
this.include_apocrypha(this.options.include_apocrypha);
return this;
};
bcv_parser.prototype.case_sensitive = function(arg) {
if (!((arg != null) && (arg === "none" || arg === "books"))) {
return this;
}
if (arg === this.options.case_sensitive) {
return this;
}
this.options.case_sensitive = arg;
this.regexps.books = this.regexps.get_books(this.options.include_apocrypha, arg);
return this;
};
bcv_parser.prototype.translation_info = function(new_translation) {
var book, chapter_list, id, old_translation, out, ref, ref1, ref2;
if (new_translation == null) {
new_translation = "default";
}
if ((new_translation != null) && (((ref = this.translations.aliases[new_translation]) != null ? ref.alias : void 0) != null)) {
new_translation = this.translations.aliases[new_translation].alias;
}
if (!((new_translation != null) && (this.translations[new_translation] != null))) {
new_translation = "default";
}
old_translation = this.options.versification_system;
if (new_translation !== old_translation) {
this.versification_system(new_translation);
}
out = {
order: bcv_utils.shallow_clone(this.translations["default"].order),
books: [],
chapters: {}
};
ref1 = this.translations["default"].chapters;
for (book in ref1) {
if (!hasProp.call(ref1, book)) continue;
chapter_list = ref1[book];
out.chapters[book] = bcv_utils.shallow_clone_array(chapter_list);
}
ref2 = out.order;
for (book in ref2) {
if (!hasProp.call(ref2, book)) continue;
id = ref2[book];
out.books[id - 1] = book;
}
if (new_translation !== old_translation) {
this.versification_system(old_translation);
}
return out;
};
bcv_parser.prototype.replace_control_characters = function(s) {
s = s.replace(this.regexps.control, " ");
if (this.options.non_latin_digits_strategy === "replace") {
s = s.replace(/[٠۰߀०০੦૦୦0౦೦൦๐໐༠၀႐០᠐᥆᧐᪀᪐᭐᮰᱀᱐꘠꣐꤀꧐꩐꯰0]/g, "0");
s = s.replace(/[١۱߁१১੧૧୧௧౧೧൧๑໑༡၁႑១᠑᥇᧑᪁᪑᭑᮱᱁᱑꘡꣑꤁꧑꩑꯱1]/g, "1");
s = s.replace(/[٢۲߂२২੨૨୨௨౨೨൨๒໒༢၂႒២᠒᥈᧒᪂᪒᭒᮲᱂᱒꘢꣒꤂꧒꩒꯲2]/g, "2");
s = s.replace(/[٣۳߃३৩੩૩୩௩౩೩൩๓໓༣၃႓៣᠓᥉᧓᪃᪓᭓᮳᱃᱓꘣꣓꤃꧓꩓꯳3]/g, "3");
s = s.replace(/[٤۴߄४৪੪૪୪௪౪೪൪๔໔༤၄႔៤᠔᥊᧔᪄᪔᭔᮴᱄᱔꘤꣔꤄꧔꩔꯴4]/g, "4");
s = s.replace(/[٥۵߅५৫੫૫୫௫౫೫൫๕໕༥၅႕៥᠕᥋᧕᪅᪕᭕᮵᱅᱕꘥꣕꤅꧕꩕꯵5]/g, "5");
s = s.replace(/[٦۶߆६৬੬૬୬௬౬೬൬๖໖༦၆႖៦᠖᥌᧖᪆᪖᭖᮶᱆᱖꘦꣖꤆꧖꩖꯶6]/g, "6");
s = s.replace(/[٧۷߇७৭੭૭୭௭౭೭൭๗໗༧၇႗៧᠗᥍᧗᪇᪗᭗᮷᱇᱗꘧꣗꤇꧗꩗꯷7]/g, "7");
s = s.replace(/[٨۸߈८৮੮૮୮௮౮೮൮๘໘༨၈႘៨᠘᥎᧘᪈᪘᭘᮸᱈᱘꘨꣘꤈꧘꩘꯸8]/g, "8");
s = s.replace(/[٩۹߉९৯੯૯୯௯౯೯൯๙໙༩၉႙៩᠙᥏᧙᪉᪙᭙᮹᱉᱙꘩꣙꤉꧙꩙꯹9]/g, "9");
}
return s;
};
bcv_parser.prototype.match_books = function(s) {
var book, books, has_replacement, k, len, ref;
books = [];
ref = this.regexps.books;
for (k = 0, len = ref.length; k < len; k++) {
book = ref[k];
has_replacement = false;
s = s.replace(book.regexp, function(full, prev, bk) {
var extra;
has_replacement = true;
books.push({
value: bk,
parsed: book.osis,
type: "book"
});
extra = book.extra != null ? "/" + book.extra : "";
return prev + "\x1f" + (books.length - 1) + extra + "\x1f";
});
if (has_replacement === true && /^[\s\x1f\d:.,;\-\u2013\u2014]+$/.test(s)) {
break;
}
}
s = s.replace(this.regexps.translations, function(match) {
books.push({
value: match,
parsed: match.toLowerCase(),
type: "translation"
});
return "\x1e" + (books.length - 1) + "\x1e";
});
return [s, this.get_book_indices(books, s)];
};
bcv_parser.prototype.get_book_indices = function(books, s) {
var add_index, match, re;
add_index = 0;
re = /([\x1f\x1e])(\d+)(?:\/\d+)?\1/g;
while (match = re.exec(s)) {
books[match[2]].start_index = match.index + add_index;
add_index += books[match[2]].value.length - match[0].length;
}
return books;
};
bcv_parser.prototype.match_passages = function(s) {
var accum, book_id, entities, full, match, next_char, original_part_length, part, passage, post_context, re, ref, regexp_index_adjust, start_index_adjust;
entities = [];
post_context = {};
while (match = this.regexps.escaped_passage.exec(s)) {
full = match[0], part = match[1], book_id = match[2];
original_part_length = part.length;
match.index += full.length - original_part_length;
if (/\s[2-9]\d\d\s*$|\s\d{4,}\s*$/.test(part)) {
re = /\s+\d+\s*$/;
part = part.replace(re, "");
}
if (!/[\d\x1f\x1e)]$/.test(part)) {
part = this.replace_match_end(part);
}
if (this.options.captive_end_digits_strategy === "delete") {
next_char = match.index + part.length;
if (s.length > next_char && /^\w/.test(s.substr(next_char, 1))) {
part = part.replace(/[\s*]+\d+$/, "");
}
part = part.replace(/(\x1e[)\]]?)[\s*]*\d+$/, "$1");
}
part = part.replace(/[A-Z]+/g, function(capitals) {
return capitals.toLowerCase();
});
start_index_adjust = part.substr(0, 1) === "\x1f" ? 0 : part.split("\x1f")[0].length;
passage = {
value: grammar.parse(part),
type: "base",
start_index: this.passage.books[book_id].start_index - start_index_adjust,
match: part
};
if (this.options.book_alone_strategy === "full" && this.options.book_range_strategy === "include" && passage.value[0].type === "b" && (passage.value.length === 1 || (passage.value.length > 1 && passage.value[1].type === "translation_sequence")) && start_index_adjust === 0 && (this.passage.books[book_id].parsed.length === 1 || (this.passage.books[book_id].parsed.length > 1 && this.passage.books[book_id].parsed[1].type === "translation")) && /^[234]/.test(this.passage.books[book_id].parsed[0])) {
this.create_book_range(s, passage, book_id);
}
ref = this.passage.handle_obj(passage), accum = ref[0], post_context = ref[1];
entities = entities.concat(accum);
regexp_index_adjust = this.adjust_regexp_end(accum, original_part_length, part.length);
if (regexp_index_adjust > 0) {
this.regexps.escaped_passage.lastIndex -= regexp_index_adjust;
}
}
return [entities, post_context];
};
bcv_parser.prototype.adjust_regexp_end = function(accum, old_length, new_length) {
var regexp_index_adjust;
regexp_index_adjust = 0;
if (accum.length > 0) {
regexp_index_adjust = old_length - accum[accum.length - 1].indices[1] - 1;
} else if (old_length !== new_length) {
regexp_index_adjust = old_length - new_length;
}
return regexp_index_adjust;
};
bcv_parser.prototype.replace_match_end = function(part) {
var match, remove;
remove = part.length;
while (match = this.regexps.match_end_split.exec(part)) {
remove = match.index + match[0].length;
}
if (remove < part.length) {
part = part.substr(0, remove);
}
return part;
};
bcv_parser.prototype.create_book_range = function(s, passage, book_id) {
var cases, i, k, limit, prev, range_regexp, ref;
cases = [bcv_parser.prototype.regexps.first, bcv_parser.prototype.regexps.second, bcv_parser.prototype.regexps.third];
limit = parseInt(this.passage.books[book_id].parsed[0].substr(0, 1), 10);
for (i = k = 1, ref = limit; 1 <= ref ? k < ref : k > ref; i = 1 <= ref ? ++k : --k) {
range_regexp = i === limit - 1 ? bcv_parser.prototype.regexps.range_and : bcv_parser.prototype.regexps.range_only;
prev = s.match(RegExp("(?:^|\\W)(" + cases[i - 1] + "\\s*" + range_regexp + "\\s*)\\x1f" + book_id + "\\x1f", "i"));
if (prev != null) {
return this.add_book_range_object(passage, prev, i);
}
}
return false;
};
bcv_parser.prototype.add_book_range_object = function(passage, prev, start_book_number) {
var i, k, length, ref, ref1, results;
length = prev[1].length;
passage.value[0] = {
type: "b_range_pre",
value: [
{
type: "b_pre",
value: start_book_number.toString(),
indices: [prev.index, prev.index + length]
}, passage.value[0]
],
indices: [0, passage.value[0].indices[1] + length]
};
passage.value[0].value[1].indices[0] += length;
passage.value[0].value[1].indices[1] += length;
passage.start_index -= length;
passage.match = prev[1] + passage.match;
if (passage.value.length === 1) {
return;
}
results = [];
for (i = k = 1, ref = passage.value.length; 1 <= ref ? k < ref : k > ref; i = 1 <= ref ? ++k : --k) {
if (passage.value[i].value == null) {
continue;
}
if (((ref1 = passage.value[i].value[0]) != null ? ref1.indices : void 0) != null) {
passage.value[i].value[0].indices[0] += length;
passage.value[i].value[0].indices[1] += length;
}
passage.value[i].indices[0] += length;
results.push(passage.value[i].indices[1] += length);
}
return results;
};
bcv_parser.prototype.osis = function() {
var k, len, osis, out, ref;
out = [];
ref = this.parsed_entities();
for (k = 0, len = ref.length; k < len; k++) {
osis = ref[k];
if (osis.osis.length > 0) {
out.push(osis.osis);
}
}
return out.join(",");
};
bcv_parser.prototype.osis_and_translations = function() {
var k, len, osis, out, ref;
out = [];
ref = this.parsed_entities();
for (k = 0, len = ref.length; k < len; k++) {
osis = ref[k];
if (osis.osis.length > 0) {
out.push([osis.osis, osis.translations.join(",")]);
}
}
return out;
};
bcv_parser.prototype.osis_and_indices = function() {
var k, len, osis, out, ref;
out = [];
ref = this.parsed_entities();
for (k = 0, len = ref.length; k < len; k++) {
osis = ref[k];
if (osis.osis.length > 0) {
out.push({
osis: osis.osis,
translations: osis.translations,
indices: osis.indices
});
}
}
return out;
};
bcv_parser.prototype.parsed_entities = function() {
var entity, entity_id, i, k, l, last_i, len, len1, length, m, n, osis, osises, out, passage, ref, ref1, ref2, ref3, strings, translation, translation_alias, translation_osis, translations;
out = [];
for (entity_id = k = 0, ref = this.entities.length; 0 <= ref ? k < ref : k > ref; entity_id = 0 <= ref ? ++k : --k) {
entity = this.entities[entity_id];
if (entity.type && entity.type === "translation_sequence" && out.length > 0 && entity_id === out[out.length - 1].entity_id + 1) {
out[out.length - 1].indices[1] = entity.absolute_indices[1];
}
if (entity.passages == null) {
continue;
}
if ((entity.type === "b" && this.options.book_alone_strategy === "ignore") || (entity.type === "b_range" && this.options.book_range_strategy === "ignore") || entity.type === "context") {
continue;
}
translations = [];
translation_alias = null;
if (entity.passages[0].translations != null) {
ref1 = entity.passages[0].translations;
for (l = 0, len = ref1.length; l < len; l++) {
translation = ref1[l];
translation_osis = ((ref2 = translation.osis) != null ? ref2.length : void 0) > 0 ? translation.osis : "";
if (translation_alias == null) {
translation_alias = translation.alias;
}
translations.push(translation_osis);
}
} else {
translations = [""];
translation_alias = "default";
}
osises = [];
length = entity.passages.length;
for (i = m = 0, ref3 = length; 0 <= ref3 ? m < ref3 : m > ref3; i = 0 <= ref3 ? ++m : --m) {
passage = entity.passages[i];
if (passage.type == null) {
passage.type = entity.type;
}
if (passage.valid.valid === false) {
if (this.options.invalid_sequence_strategy === "ignore" && entity.type === "sequence") {
this.snap_sequence("ignore", entity, osises, i, length);
}
if (this.options.invalid_passage_strategy === "ignore") {
continue;
}
}
if ((passage.type === "b" || passage.type === "b_range") && this.options.book_sequence_strategy === "ignore" && entity.type === "sequence") {
this.snap_sequence("book", entity, osises, i, length);
continue;
}
if ((passage.type === "b_range_start" || passage.type === "range_end_b") && this.options.book_range_strategy === "ignore") {
this.snap_range(entity, i);
}
if (passage.absolute_indices == null) {
passage.absolute_indices = entity.absolute_indices;
}
osises.push({
osis: passage.valid.valid ? this.to_osis(passage.start, passage.end, translation_alias) : "",
type: passage.type,
indices: passage.absolute_indices,
translations: translations,
start: passage.start,
end: passage.end,
enclosed_indices: passage.enclosed_absolute_indices,
entity_id: entity_id,
entities: [passage]
});
}
if (osises.length === 0) {
continue;
}
if (osises.length > 1 && this.options.consecutive_combination_strategy === "combine") {
osises = this.combine_consecutive_passages(osises, translation_alias);
}
if (this.options.sequence_combination_strategy === "separate") {
out = out.concat(osises);
} else {
strings = [];
last_i = osises.length - 1;
if ((osises[last_i].enclosed_indices != null) && osises[last_i].enclosed_indices[1] >= 0) {
entity.absolute_indices[1] = osises[last_i].enclosed_indices[1];
}
for (n = 0, len1 = osises.length; n < len1; n++) {
osis = osises[n];
if (osis.osis.length > 0) {
strings.push(osis.osis);
}
}
out.push({
osis: strings.join(","),
indices: entity.absolute_indices,
translations: translations,
entity_id: entity_id,
entities: osises
});
}
}
return out;
};
bcv_parser.prototype.to_osis = function(start, end, translation) {
var osis, out;
if ((end.c == null) && (end.v == null) && start.b === end.b && (start.c == null) && (start.v == null) && this.options.book_alone_strategy === "first_chapter") {
end.c = 1;
}
osis = {
start: "",
end: ""
};
if (start.c == null) {
start.c = 1;
}
if (start.v == null) {
start.v = 1;
}
if (end.c == null) {
if (this.options.passage_existence_strategy.indexOf("c") >= 0 || ((this.passage.translations[translation].chapters[end.b] != null) && this.passage.translations[translation].chapters[end.b].length === 1)) {
end.c = this.passage.translations[translation].chapters[end.b].length;
} else {
end.c = 999;
}
}
if (end.v == null) {
if ((this.passage.translations[translation].chapters[end.b][end.c - 1] != null) && this.options.passage_existence_strategy.indexOf("v") >= 0) {
end.v = this.passage.translations[translation].chapters[end.b][end.c - 1];
} else {
end.v = 999;
}
}
if (this.options.include_apocrypha && this.options.ps151_strategy === "b" && ((start.c === 151 && start.b === "Ps") || (end.c === 151 && end.b === "Ps"))) {
this.fix_ps151(start, end, translation);
}
if (this.options.osis_compaction_strategy === "b" && start.c === 1 && start.v === 1 && end.c === this.passage.translations[translation].chapters[end.b].length && end.v === this.passage.translations[translation].chapters[end.b][end.c - 1]) {
osis.start = start.b;
osis.end = end.b;
} else if (this.options.osis_compaction_strategy.length <= 2 && start.v === 1 && (end.v === 999 || end.v === this.passage.translations[translation].chapters[end.b][end.c - 1])) {
osis.start = start.b + "." + start.c.toString();
osis.end = end.b + "." + end.c.toString();
} else {
osis.start = start.b + "." + start.c.toString() + "." + start.v.toString();
osis.end = end.b + "." + end.c.toString() + "." + end.v.toString();
}
if (osis.start === osis.end) {
out = osis.start;
} else {
out = osis.start + "-" + osis.end;
}
if (start.extra != null) {
out = start.extra + "," + out;
}
if (end.extra != null) {
out += "," + end.extra;
}
return out;
};
bcv_parser.prototype.fix_ps151 = function(start, end, translation) {
var ref;
if (translation !== "default" && (((ref = this.translations[translation]) != null ? ref.chapters["Ps151"] : void 0) == null)) {
this.passage.promote_book_to_translation("Ps151", translation);
}
if (start.c === 151 && start.b === "Ps") {
if (end.c === 151 && end.b === "Ps") {
start.b = "Ps151";
start.c = 1;
end.b = "Ps151";
return end.c = 1;
} else {
start.extra = this.to_osis({
b: "Ps151",
c: 1,
v: start.v
}, {
b: "Ps151",
c: 1,
v: this.passage.translations[translation].chapters["Ps151"][0]
}, translation);
start.b = "Prov";
start.c = 1;
return start.v = 1;
}
} else {
end.extra = this.to_osis({
b: "Ps151",
c: 1,
v: 1
}, {
b: "Ps151",
c: 1,
v: end.v
}, translation);
end.c = 150;
return end.v = this.passage.translations[translation].chapters["Ps"][149];
}
};
bcv_parser.prototype.combine_consecutive_passages = function(osises, translation) {
var enclosed_sequence_start, has_enclosed, i, is_enclosed_last, k, last_i, osis, out, prev, prev_i, ref;
out = [];
prev = {};
last_i = osises.length - 1;
enclosed_sequence_start = -1;
has_enclosed = false;
for (i = k = 0, ref = last_i; 0 <= ref ? k <= ref : k >= ref; i = 0 <= ref ? ++k : --k) {
osis = osises[i];
if (osis.osis.length > 0) {
prev_i = out.length - 1;
is_enclosed_last = false;
if (osis.enclosed_indices[0] !== enclosed_sequence_start) {
enclosed_sequence_start = osis.enclosed_indices[0];
}
if (enclosed_sequence_start >= 0 && (i === last_i || osises[i + 1].enclosed_indices[0] !== osis.enclosed_indices[0])) {
is_enclosed_last = true;
has_enclosed = true;
}
if (this.is_verse_consecutive(prev, osis.start, translation)) {
out[prev_i].end = osis.end;
out[prev_i].is_enclosed_last = is_enclosed_last;
out[prev_i].indices[1] = osis.indices[1];
out[prev_i].enclosed_indices[1] = osis.enclosed_indices[1];
out[prev_i].osis = this.to_osis(out[prev_i].start, osis.end, translation);
} else {
out.push(osis);
}
prev = {
b: osis.end.b,
c: osis.end.c,
v: osis.end.v
};
} else {
out.push(osis);
prev = {};
}
}
if (has_enclosed) {
this.snap_enclosed_indices(out);
}
return out;
};
bcv_parser.prototype.snap_enclosed_indices = function(osises) {
var k, len, osis;
for (k = 0, len = osises.length; k < len; k++) {
osis = osises[k];
if (osis.is_enclosed_last != null) {
if (osis.enclosed_indices[0] < 0 && osis.is_enclosed_last) {
osis.indices[1] = osis.enclosed_indices[1];
}
delete osis.is_enclosed_last;
}
}
return osises;
};
bcv_parser.prototype.is_verse_consecutive = function(prev, check, translation) {
var translation_order;
if (prev.b == null) {
return false;
}
translation_order = this.passage.translations[translation].order != null ? this.passage.translations[translation].order : this.passage.translations["default"].order;
if (prev.b === check.b) {
if (prev.c === check.c) {
if (prev.v === check.v - 1) {
return true;
}
} else if (check.v === 1 && prev.c === check.c - 1) {
if (prev.v === this.passage.translations[translation].chapters[prev.b][prev.c - 1]) {
return true;
}
}
} else if (check.c === 1 && check.v === 1 && translation_order[prev.b] === translation_order[check.b] - 1) {
if (prev.c === this.passage.translations[translation].chapters[prev.b].length && prev.v === this.passage.translations[translation].chapters[prev.b][prev.c - 1]) {
return true;
}
}
return false;
};
bcv_parser.prototype.snap_range = function(entity, passage_i) {
var entity_i, key, pluck, ref, source_entity, target_entity, temp, type;
if (entity.type === "b_range_start" || (entity.type === "sequence" && entity.passages[passage_i].type === "b_range_start")) {
entity_i = 1;
source_entity = "end";
type = "b_range_start";
} else {
entity_i = 0;
source_entity = "start";
type = "range_end_b";
}
target_entity = source_entity === "end" ? "start" : "end";
ref = entity.passages[passage_i][target_entity];
for (key in ref) {
if (!hasProp.call(ref, key)) continue;
entity.passages[passage_i][target_entity][key] = entity.passages[passage_i][source_entity][key];
}
if (entity.type === "sequence") {
if (passage_i >= entity.value.length) {
passage_i = entity.value.length - 1;
}
pluck = this.passage.pluck(type, entity.value[passage_i]);
if (pluck != null) {
temp = this.snap_range(pluck, 0);
if (passage_i === 0) {
entity.absolute_indices[0] = temp.absolute_indices[0];
} else {
entity.absolute_indices[1] = temp.absolute_indices[1];
}
}
} else {
entity.original_type = entity.type;
entity.type = entity.value[entity_i].type;
entity.absolute_indices = [entity.value[entity_i].absolute_indices[0], entity.value[entity_i].absolute_indices[1]];
}
return entity;
};
bcv_parser.prototype.snap_sequence = function(type, entity, osises, i, length) {
var passage;
passage = entity.passages[i];
if (passage.absolute_indices[0] === entity.absolute_indices[0] && i < length - 1 && this.get_snap_sequence_i(entity.passages, i, length) !== i) {
entity.absolute_indices[0] = entity.passages[i + 1].absolute_indices[0];
this.remove_absolute_indices(entity.passages, i + 1);
} else if (passage.absolute_indices[1] === entity.absolute_indices[1] && i > 0) {
entity.absolute_indices[1] = osises.length > 0 ? osises[osises.length - 1].indices[1] : entity.passages[i - 1].absolute_indices[1];
} else if (type === "book" && i < length - 1 && !this.starts_with_book(entity.passages[i + 1])) {
entity.passages[i + 1].absolute_indices[0] = passage.absolute_indices[0];
}
return entity;
};
bcv_parser.prototype.get_snap_sequence_i = function(passages, i, length) {
var j, k, ref, ref1;
for (j = k = ref = i + 1, ref1 = length; ref <= ref1 ? k < ref1 : k > ref1; j = ref <= ref1 ? ++k : --k) {
if (this.starts_with_book(passages[j])) {
return j;
}
if (passages[j].valid.valid) {
return i;
}
}
return i;
};
bcv_parser.prototype.starts_with_book = function(passage) {
if (passage.type.substr(0, 1) === "b") {
return true;
}
if ((passage.type === "range" || passage.type === "ff") && passage.start.type.substr(0, 1) === "b") {
return true;
}
return false;
};
bcv_parser.prototype.remove_absolute_indices = function(passages, i) {
var end, k, len, passage, ref, ref1, start;
if (passages[i].enclosed_absolute_indices[0] < 0) {
return false;
}
ref = passages[i].enclosed_absolute_indices, start = ref[0], end = ref[1];
ref1 = passages.slice(i);
for (k = 0, len = ref1.length; k < len; k++) {
passage = ref1[k];
if (passage.enclosed_absolute_indices[0] === start && passage.enclosed_absolute_indices[1] === end) {
passage.enclosed_absolute_indices = [-1, -1];
} else {
break;
}
}
return true;
};
return bcv_parser;
})();
root.bcv_parser = bcv_parser;
bcv_passage = (function() {
function bcv_passage() {}
bcv_passage.prototype.books = [];
bcv_passage.prototype.indices = {};
bcv_passage.prototype.options = {};
bcv_passage.prototype.translations = {};
bcv_passage.prototype.handle_array = function(passages, accum, context) {
var k, len, passage, ref;
if (accum == null) {
accum = [];
}
if (context == null) {
context = {};
}
for (k = 0, len = passages.length; k < len; k++) {
passage = passages[k];
if (passage == null) {
continue;
}
if (passage.type === "stop") {
break;
}
ref = this.handle_obj(passage, accum, context), accum = ref[0], context = ref[1];
}
return [accum, context];
};
bcv_passage.prototype.handle_obj = function(passage, accum, context) {
if ((passage.type != null) && (this[passage.type] != null)) {
return this[passage.type](passage, accum, context);
} else {
return [accum, context];
}
};
bcv_passage.prototype.b = function(passage, accum, context) {
var alternates, b, k, len, obj, ref, valid;
passage.start_context = bcv_utils.shallow_clone(context);
passage.passages = [];
alternates = [];
ref = this.books[passage.value].parsed;
for (k = 0, len = ref.length; k < len; k++) {
b = ref[k];
valid = this.validate_ref(passage.start_context.translations, {
b: b
});
obj = {
start: {
b: b
},
end: {
b: b
},
valid: valid
};
if (passage.passages.length === 0 && valid.valid) {
passage.passages.push(obj);
} else {
alternates.push(obj);
}
}
if (passage.passages.length === 0) {
passage.passages.push(alternates.shift());
}
if (alternates.length > 0) {
passage.passages[0].alternates = alternates;
}
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
accum.push(passage);
context = {
b: passage.passages[0].start.b
};
if (passage.start_context.translations != null) {
context.translations = passage.start_context.translations;
}
return [accum, context];
};
bcv_passage.prototype.b_range = function(passage, accum, context) {
return this.range(passage, accum, context);
};
bcv_passage.prototype.b_range_pre = function(passage, accum, context) {
var alternates, book, end, ref, ref1, start_obj;
passage.start_context = bcv_utils.shallow_clone(context);
passage.passages = [];
alternates = [];
book = this.pluck("b", passage.value);
ref = this.b(book, [], context), (ref1 = ref[0], end = ref1[0]), context = ref[1];
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
start_obj = {
b: passage.value[0].value + end.passages[0].start.b.substr(1),
type: "b"
};
passage.passages = [
{
start: start_obj,
end: end.passages[0].end,
valid: end.passages[0].valid
}
];
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.b_range_start = function(passage, accum, context) {
return this.range(passage, accum, context);
};
bcv_passage.prototype.base = function(passage, accum, context) {
this.indices = this.calculate_indices(passage.match, passage.start_index);
return this.handle_array(passage.value, accum, context);
};
bcv_passage.prototype.bc = function(passage, accum, context) {
var alternates, b, c, context_key, k, len, obj, ref, ref1, valid;
passage.start_context = bcv_utils.shallow_clone(context);
passage.passages = [];
this.reset_context(context, ["b", "c", "v"]);
c = this.pluck("c", passage.value).value;
alternates = [];
ref = this.books[this.pluck("b", passage.value).value].parsed;
for (k = 0, len = ref.length; k < len; k++) {
b = ref[k];
context_key = "c";
valid = this.validate_ref(passage.start_context.translations, {
b: b,
c: c
});
obj = {
start: {
b: b
},
end: {
b: b
},
valid: valid
};
if (valid.messages.start_chapter_not_exist_in_single_chapter_book) {
obj.valid = this.validate_ref(passage.start_context.translations, {
b: b,
v: c
});
obj.valid.messages.start_chapter_not_exist_in_single_chapter_book = 1;
obj.start.c = 1;
obj.end.c = 1;
context_key = "v";
}
obj.start[context_key] = c;
ref1 = this.fix_start_zeroes(obj.valid, obj.start.c, obj.start.v), obj.start.c = ref1[0], obj.start.v = ref1[1];
if (obj.start.v == null) {
delete obj.start.v;
}
obj.end[context_key] = obj.start[context_key];
if (passage.passages.length === 0 && obj.valid.valid) {
passage.passages.push(obj);
} else {
alternates.push(obj);
}
}
if (passage.passages.length === 0) {
passage.passages.push(alternates.shift());
}
if (alternates.length > 0) {
passage.passages[0].alternates = alternates;
}
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
this.set_context_from_object(context, ["b", "c", "v"], passage.passages[0].start);
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.bc_title = function(passage, accum, context) {
var bc, i, k, ref, ref1, ref2, title;
passage.start_context = bcv_utils.shallow_clone(context);
ref = this.bc(this.pluck("bc", passage.value), [], context), (ref1 = ref[0], bc = ref1[0]), context = ref[1];
if (bc.passages[0].start.b.substr(0, 2) !== "Ps" && (bc.passages[0].alternates != null)) {
for (i = k = 0, ref2 = bc.passages[0].alternates.length; 0 <= ref2 ? k < ref2 : k > ref2; i = 0 <= ref2 ? ++k : --k) {
if (bc.passages[0].alternates[i].start.b.substr(0, 2) !== "Ps") {
continue;
}
bc.passages[0] = bc.passages[0].alternates[i];
break;
}
}
if (bc.passages[0].start.b.substr(0, 2) !== "Ps") {
accum.push(bc);
return [accum, context];
}
this.books[this.pluck("b", bc.value).value].parsed = ["Ps"];
title = this.pluck("title", passage.value);
if (title == null) {
title = this.pluck("v", passage.value);
}
passage.value[1] = {
type: "v",
value: [
{
type: "integer",
value: 1,
indices: title.indices
}
],
indices: title.indices
};
passage.type = "bcv";
return this.bcv(passage, accum, passage.start_context);
};
bcv_passage.prototype.bcv = function(passage, accum, context) {
var alternates, b, bc, c, k, len, obj, ref, ref1, v, valid;
passage.start_context = bcv_utils.shallow_clone(context);
passage.passages = [];
this.reset_context(context, ["b", "c", "v"]);
bc = this.pluck("bc", passage.value);
c = this.pluck("c", bc.value).value;
v = this.pluck("v", passage.value).value;
alternates = [];
ref = this.books[this.pluck("b", bc.value).value].parsed;
for (k = 0, len = ref.length; k < len; k++) {
b = ref[k];
valid = this.validate_ref(passage.start_context.translations, {
b: b,
c: c,
v: v
});
ref1 = this.fix_start_zeroes(valid, c, v), c = ref1[0], v = ref1[1];
obj = {
start: {
b: b,
c: c,
v: v
},
end: {
b: b,
c: c,
v: v
},
valid: valid
};
if (passage.passages.length === 0 && valid.valid) {
passage.passages.push(obj);
} else {
alternates.push(obj);
}
}
if (passage.passages.length === 0) {
passage.passages.push(alternates.shift());
}
if (alternates.length > 0) {
passage.passages[0].alternates = alternates;
}
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
this.set_context_from_object(context, ["b", "c", "v"], passage.passages[0].start);
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.bv = function(passage, accum, context) {
var b, bcv, ref, ref1, ref2, v;
passage.start_context = bcv_utils.shallow_clone(context);
ref = passage.value, b = ref[0], v = ref[1];
bcv = {
indices: passage.indices,
value: [
{
type: "bc",
value: [
b, {
type: "c",
value: [
{
type: "integer",
value: 1
}
]
}
]
}, v
]
};
ref1 = this.bcv(bcv, [], context), (ref2 = ref1[0], bcv = ref2[0]), context = ref1[1];
passage.passages = bcv.passages;
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.c = function(passage, accum, context) {
var c, valid;
passage.start_context = bcv_utils.shallow_clone(context);
c = passage.type === "integer" ? passage.value : this.pluck("integer", passage.value).value;
valid = this.validate_ref(passage.start_context.translations, {
b: context.b,
c: c
});
if (!valid.valid && valid.messages.start_chapter_not_exist_in_single_chapter_book) {
return this.v(passage, accum, context);
}
c = this.fix_start_zeroes(valid, c)[0];
passage.passages = [
{
start: {
b: context.b,
c: c
},
end: {
b: context.b,
c: c
},
valid: valid
}
];
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
accum.push(passage);
context.c = c;
this.reset_context(context, ["v"]);
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
return [accum, context];
};
bcv_passage.prototype.c_psalm = function(passage, accum, context) {
var c;
passage.type = "bc";
c = parseInt(this.books[passage.value].value.match(/^\d+/)[0], 10);
passage.value = [
{
type: "b",
value: passage.value,
indices: passage.indices
}, {
type: "c",
value: [
{
type: "integer",
value: c,
indices: passage.indices
}
],
indices: passage.indices
}
];
return this.bc(passage, accum, context);
};
bcv_passage.prototype.c_title = function(passage, accum, context) {
var title;
passage.start_context = bcv_utils.shallow_clone(context);
if (context.b !== "Ps") {
return this.c(passage.value[0], accum, context);
}
title = this.pluck("title", passage.value);
passage.value[1] = {
type: "v",
value: [
{
type: "integer",
value: 1,
indices: title.indices
}
],
indices: title.indices
};
passage.type = "cv";
return this.cv(passage, accum, passage.start_context);
};
bcv_passage.prototype.cv = function(passage, accum, context) {
var c, ref, v, valid;
passage.start_context = bcv_utils.shallow_clone(context);
c = this.pluck("c", passage.value).value;
v = this.pluck("v", passage.value).value;
valid = this.validate_ref(passage.start_context.translations, {
b: context.b,
c: c,
v: v
});
ref = this.fix_start_zeroes(valid, c, v), c = ref[0], v = ref[1];
passage.passages = [
{
start: {
b: context.b,
c: c,
v: v
},
end: {
b: context.b,
c: c,
v: v
},
valid: valid
}
];
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
accum.push(passage);
context.c = c;
context.v = v;
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
return [accum, context];
};
bcv_passage.prototype.cb_range = function(passage, accum, context) {
var b, end_c, ref, start_c;
passage.type = "range";
ref = passage.value, b = ref[0], start_c = ref[1], end_c = ref[2];
passage.value = [
{
type: "bc",
value: [b, start_c],
indices: passage.indices
}, end_c
];
end_c.indices[1] = passage.indices[1];
return this.range(passage, accum, context);
};
bcv_passage.prototype.context = function(passage, accum, context) {
var key, ref;
passage.start_context = bcv_utils.shallow_clone(context);
passage.passages = [];
ref = this.books[passage.value].context;
for (key in ref) {
if (!hasProp.call(ref, key)) continue;
context[key] = this.books[passage.value].context[key];
}
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.cv_psalm = function(passage, accum, context) {
var bc, c_psalm, ref, v;
passage.start_context = bcv_utils.shallow_clone(context);
passage.type = "bcv";
ref = passage.value, c_psalm = ref[0], v = ref[1];
bc = this.c_psalm(c_psalm, [], passage.start_context)[0][0];
passage.value = [bc, v];
return this.bcv(passage, accum, context);
};
bcv_passage.prototype.ff = function(passage, accum, context) {
var ref, ref1;
passage.start_context = bcv_utils.shallow_clone(context);
passage.value.push({
type: "integer",
indices: passage.indices,
value: 999
});
ref = this.range(passage, [], passage.start_context), (ref1 = ref[0], passage = ref1[0]), context = ref[1];
passage.value[0].indices = passage.value[1].indices;
passage.value[0].absolute_indices = passage.value[1].absolute_indices;
passage.value.pop();
if (passage.passages[0].valid.messages.end_verse_not_exist != null) {
delete passage.passages[0].valid.messages.end_verse_not_exist;
}
if (passage.passages[0].valid.messages.end_chapter_not_exist != null) {
delete passage.passages[0].valid.messages.end_chapter_not_exist;
}
if (passage.passages[0].end.original_c != null) {
delete passage.passages[0].end.original_c;
}
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.integer_title = function(passage, accum, context) {
var v_indices;
passage.start_context = bcv_utils.shallow_clone(context);
if (context.b !== "Ps") {
return this.integer(passage.value[0], accum, context);
}
passage.value[0] = {
type: "c",
value: [passage.value[0]],
indices: [passage.value[0].indices[0], passage.value[0].indices[1]]
};
v_indices = [passage.indices[1] - 5, passage.indices[1]];
passage.value[1] = {
type: "v",
value: [
{
type: "integer",
value: 1,
indices: v_indices
}
],
indices: v_indices
};
passage.type = "cv";
return this.cv(passage, accum, passage.start_context);
};
bcv_passage.prototype.integer = function(passage, accum, context) {
if (context.v != null) {
return this.v(passage, accum, context);
}
return this.c(passage, accum, context);
};
bcv_passage.prototype.sequence = function(passage, accum, context) {
var k, l, len, len1, obj, psg, ref, ref1, ref2, ref3, sub_psg;
passage.start_context = bcv_utils.shallow_clone(context);
passage.passages = [];
ref = passage.value;
for (k = 0, len = ref.length; k < len; k++) {
obj = ref[k];
ref1 = this.handle_array(obj, [], context), (ref2 = ref1[0], psg = ref2[0]), context = ref1[1];
ref3 = psg.passages;
for (l = 0, len1 = ref3.length; l < len1; l++) {
sub_psg = ref3[l];
if (sub_psg.type == null) {
sub_psg.type = psg.type;
}
if (sub_psg.absolute_indices == null) {
sub_psg.absolute_indices = psg.absolute_indices;
}
if (psg.start_context.translations != null) {
sub_psg.translations = psg.start_context.translations;
}
sub_psg.enclosed_absolute_indices = psg.type === "sequence_post_enclosed" ? psg.absolute_indices : [-1, -1];
passage.passages.push(sub_psg);
}
}
if (passage.absolute_indices == null) {
if (passage.passages.length > 0 && passage.type === "sequence") {
passage.absolute_indices = [passage.passages[0].absolute_indices[0], passage.passages[passage.passages.length - 1].absolute_indices[1]];
} else {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
}
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.sequence_post_enclosed = function(passage, accum, context) {
return this.sequence(passage, accum, context);
};
bcv_passage.prototype.v = function(passage, accum, context) {
var c, no_c, ref, v, valid;
v = passage.type === "integer" ? passage.value : this.pluck("integer", passage.value).value;
passage.start_context = bcv_utils.shallow_clone(context);
c = context.c != null ? context.c : 1;
valid = this.validate_ref(passage.start_context.translations, {
b: context.b,
c: c,
v: v
});
ref = this.fix_start_zeroes(valid, 0, v), no_c = ref[0], v = ref[1];
passage.passages = [
{
start: {
b: context.b,
c: c,
v: v
},
end: {
b: context.b,
c: c,
v: v
},
valid: valid
}
];
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
accum.push(passage);
context.v = v;
return [accum, context];
};
bcv_passage.prototype.range = function(passage, accum, context) {
var end, end_obj, ref, ref1, ref2, ref3, ref4, ref5, ref6, ref7, ref8, ref9, return_now, return_value, start, start_obj, valid;
passage.start_context = bcv_utils.shallow_clone(context);
ref = passage.value, start = ref[0], end = ref[1];
ref1 = this.handle_obj(start, [], context), (ref2 = ref1[0], start = ref2[0]), context = ref1[1];
if (end.type === "v" && ((start.type === "bc" && !((ref3 = start.passages) != null ? (ref4 = ref3[0]) != null ? (ref5 = ref4.valid) != null ? (ref6 = ref5.messages) != null ? ref6.start_chapter_not_exist_in_single_chapter_book : void 0 : void 0 : void 0 : void 0)) || start.type === "c") && this.options.end_range_digits_strategy === "verse") {
passage.value[0] = start;
return this.range_change_integer_end(passage, accum);
}
ref7 = this.handle_obj(end, [], context), (ref8 = ref7[0], end = ref8[0]), context = ref7[1];
passage.value = [start, end];
passage.indices = [start.indices[0], end.indices[1]];
delete passage.absolute_indices;
start_obj = {
b: start.passages[0].start.b,
c: start.passages[0].start.c,
v: start.passages[0].start.v,
type: start.type
};
end_obj = {
b: end.passages[0].end.b,
c: end.passages[0].end.c,
v: end.passages[0].end.v,
type: end.type
};
if (end.passages[0].valid.messages.start_chapter_is_zero) {
end_obj.c = 0;
}
if (end.passages[0].valid.messages.start_verse_is_zero) {
end_obj.v = 0;
}
valid = this.validate_ref(passage.start_context.translations, start_obj, end_obj);
if (valid.valid) {
ref9 = this.range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum), return_now = ref9[0], return_value = ref9[1];
if (return_now) {
return return_value;
}
} else {
return this.range_handle_invalid(valid, passage, start, start_obj, end, end_obj, accum);
}
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
passage.passages = [
{
start: start_obj,
end: end_obj,
valid: valid
}
];
if (passage.start_context.translations != null) {
passage.passages[0].translations = passage.start_context.translations;
}
if (start_obj.type === "b") {
if (end_obj.type === "b") {
passage.type = "b_range";
} else {
passage.type = "b_range_start";
}
} else if (end_obj.type === "b") {
passage.type = "range_end_b";
}
accum.push(passage);
return [accum, context];
};
bcv_passage.prototype.range_change_end = function(passage, accum, new_end) {
var end, new_obj, ref, start;
ref = passage.value, start = ref[0], end = ref[1];
if (end.type === "integer") {
end.original_value = end.value;
end.value = new_end;
} else if (end.type === "v") {
new_obj = this.pluck("integer", end.value);
new_obj.original_value = new_obj.value;
new_obj.value = new_end;
} else if (end.type === "cv") {
new_obj = this.pluck("c", end.value);
new_obj.original_value = new_obj.value;
new_obj.value = new_end;
}
return this.handle_obj(passage, accum, passage.start_context);
};
bcv_passage.prototype.range_change_integer_end = function(passage, accum) {
var end, ref, start;
ref = passage.value, start = ref[0], end = ref[1];
if (passage.original_type == null) {
passage.original_type = passage.type;
}
if (passage.original_value == null) {
passage.original_value = [start, end];
}
passage.type = start.type === "integer" ? "cv" : start.type + "v";
if (start.type === "integer") {
passage.value[0] = {
type: "c",
value: [start],
indices: start.indices
};
}
if (end.type === "integer") {
passage.value[1] = {
type: "v",
value: [end],
indices: end.indices
};
}
return this.handle_obj(passage, accum, passage.start_context);
};
bcv_passage.prototype.range_check_new_end = function(translations, start_obj, end_obj, valid) {
var new_end, new_valid, obj_to_validate, type;
new_end = 0;
type = null;
if (valid.messages.end_chapter_before_start) {
type = "c";
} else if (valid.messages.end_verse_before_start) {
type = "v";
}
if (type != null) {
new_end = this.range_get_new_end_value(start_obj, end_obj, valid, type);
}
if (new_end > 0) {
obj_to_validate = {
b: end_obj.b,
c: end_obj.c,
v: end_obj.v
};
obj_to_validate[type] = new_end;
new_valid = this.validate_ref(translations, obj_to_validate);
if (!new_valid.valid) {
new_end = 0;
}
}
return new_end;
};
bcv_passage.prototype.range_end_b = function(passage, accum, context) {
return this.range(passage, accum, context);
};
bcv_passage.prototype.range_get_new_end_value = function(start_obj, end_obj, valid, key) {
var new_end;
new_end = 0;
if ((key === "c" && valid.messages.end_chapter_is_zero) || (key === "v" && valid.messages.end_verse_is_zero)) {
return new_end;
}
if (start_obj[key] >= 10 && end_obj[key] < 10 && start_obj[key] - 10 * Math.floor(start_obj[key] / 10) < end_obj[key]) {
new_end = end_obj[key] + 10 * Math.floor(start_obj[key] / 10);
} else if (start_obj[key] >= 100 && end_obj[key] < 100 && start_obj[key] - 100 < end_obj[key]) {
new_end = end_obj[key] + 100;
}
return new_end;
};
bcv_passage.prototype.range_handle_invalid = function(valid, passage, start, start_obj, end, end_obj, accum) {
var new_end, ref, temp_valid, temp_value;
if (valid.valid === false && (valid.messages.end_chapter_before_start || valid.messages.end_verse_before_start) && (end.type === "integer" || end.type === "v") || (valid.valid === false && valid.messages.end_chapter_before_start && end.type === "cv")) {
new_end = this.range_check_new_end(passage.start_context.translations, start_obj, end_obj, valid);
if (new_end > 0) {
return this.range_change_end(passage, accum, new_end);
}
}
if (this.options.end_range_digits_strategy === "verse" && (start_obj.v == null) && (end.type === "integer" || end.type === "v")) {
temp_value = end.type === "v" ? this.pluck("integer", end.value) : end.value;
temp_valid = this.validate_ref(passage.start_context.translations, {
b: start_obj.b,
c: start_obj.c,
v: temp_value
});
if (temp_valid.valid) {
return this.range_change_integer_end(passage, accum);
}
}
if (passage.original_type == null) {
passage.original_type = passage.type;
}
passage.type = "sequence";
ref = [[start, end], [[start], [end]]], passage.original_value = ref[0], passage.value = ref[1];
return this.sequence(passage, accum, passage.start_context);
};
bcv_passage.prototype.range_handle_valid = function(valid, passage, start, start_obj, end, end_obj, accum) {
var temp_valid, temp_value;
if (valid.messages.end_chapter_not_exist && this.options.end_range_digits_strategy === "verse" && (start_obj.v == null) && (end.type === "integer" || end.type === "v") && this.options.passage_existence_strategy.indexOf("v") >= 0) {
temp_value = end.type === "v" ? this.pluck("integer", end.value) : end.value;
temp_valid = this.validate_ref(passage.start_context.translations, {
b: start_obj.b,
c: start_obj.c,
v: temp_value
});
if (temp_valid.valid) {
return [true, this.range_change_integer_end(passage, accum)];
}
}
this.range_validate(valid, start_obj, end_obj, passage);
return [false, null];
};
bcv_passage.prototype.range_validate = function(valid, start_obj, end_obj, passage) {
var ref;
if (valid.messages.end_chapter_not_exist || valid.messages.end_chapter_not_exist_in_single_chapter_book) {
end_obj.original_c = end_obj.c;
end_obj.c = valid.messages.end_chapter_not_exist ? valid.messages.end_chapter_not_exist : valid.messages.end_chapter_not_exist_in_single_chapter_book;
if (end_obj.v != null) {
end_obj.v = this.validate_ref(passage.start_context.translations, {
b: end_obj.b,
c: end_obj.c,
v: 999
}).messages.end_verse_not_exist;
delete valid.messages.end_verse_is_zero;
}
} else if (valid.messages.end_verse_not_exist) {
end_obj.original_v = end_obj.v;
end_obj.v = valid.messages.end_verse_not_exist;
}
if (valid.messages.end_verse_is_zero && this.options.zero_verse_strategy !== "allow") {
end_obj.v = valid.messages.end_verse_is_zero;
}
if (valid.messages.end_chapter_is_zero) {
end_obj.c = valid.messages.end_chapter_is_zero;
}
ref = this.fix_start_zeroes(valid, start_obj.c, start_obj.v), start_obj.c = ref[0], start_obj.v = ref[1];
return true;
};
bcv_passage.prototype.translation_sequence = function(passage, accum, context) {
var k, l, len, len1, ref, translation, translations, val;
passage.start_context = bcv_utils.shallow_clone(context);
translations = [];
translations.push({
translation: this.books[passage.value[0].value].parsed
});
ref = passage.value[1];
for (k = 0, len = ref.length; k < len; k++) {
val = ref[k];
val = this.books[this.pluck("translation", val).value].parsed;
if (val != null) {
translations.push({
translation: val
});
}
}
for (l = 0, len1 = translations.length; l < len1; l++) {
translation = translations[l];
if (this.translations.aliases[translation.translation] != null) {
translation.alias = this.translations.aliases[translation.translation].alias;
translation.osis = this.translations.aliases[translation.translation].osis || "";
} else {
translation.alias = "default";
translation.osis = translation.translation.toUpperCase();
}
}
if (accum.length > 0) {
context = this.translation_sequence_apply(accum, translations);
}
if (passage.absolute_indices == null) {
passage.absolute_indices = this.get_absolute_indices(passage.indices);
}
accum.push(passage);
this.reset_context(context, ["translations"]);
return [accum, context];
};
bcv_passage.prototype.translation_sequence_apply = function(accum, translations) {
var context, i, k, new_accum, ref, ref1, use_i;
use_i = 0;
for (i = k = ref = accum.length - 1; ref <= 0 ? k <= 0 : k >= 0; i = ref <= 0 ? ++k : --k) {
if (accum[i].original_type != null) {
accum[i].type = accum[i].original_type;
}
if (accum[i].original_value != null) {
accum[i].value = accum[i].original_value;
}
if (accum[i].type !== "translation_sequence") {
continue;
}
use_i = i + 1;
break;
}
if (use_i < accum.length) {
accum[use_i].start_context.translations = translations;
ref1 = this.handle_array(accum.slice(use_i), [], accum[use_i].start_context), new_accum = ref1[0], context = ref1[1];
} else {
context = bcv_utils.shallow_clone(accum[accum.length - 1].start_context);
}
return context;
};
bcv_passage.prototype.pluck = function(type, passages) {
var k, len, passage;
for (k = 0, len = passages.length; k < len; k++) {
passage = passages[k];
if (!((passage != null) && (passage.type != null) && passage.type === type)) {
continue;
}
if (type === "c" || type === "v") {
return this.pluck("integer", passage.value);
}
return passage;
}
return null;
};
bcv_passage.prototype.set_context_from_object = function(context, keys, obj) {
var k, len, results, type;
results = [];
for (k = 0, len = keys.length; k < len; k++) {
type = keys[k];
if (obj[type] == null) {
continue;
}
results.push(context[type] = obj[type]);
}
return results;
};
bcv_passage.prototype.reset_context = function(context, keys) {
var k, len, results, type;
results = [];
for (k = 0, len = keys.length; k < len; k++) {
type = keys[k];
results.push(delete context[type]);
}
return results;
};
bcv_passage.prototype.fix_start_zeroes = function(valid, c, v) {
if (valid.messages.start_chapter_is_zero && this.options.zero_chapter_strategy === "upgrade") {
c = valid.messages.start_chapter_is_zero;
}
if (valid.messages.start_verse_is_zero && this.options.zero_verse_strategy === "upgrade") {
v = valid.messages.start_verse_is_zero;
}
return [c, v];
};
bcv_passage.prototype.calculate_indices = function(match, adjust) {
var character, end_index, indices, k, l, len, len1, len2, m, match_index, part, part_length, parts, ref, switch_type, temp;
switch_type = "book";
indices = [];
match_index = 0;
adjust = parseInt(adjust, 10);
parts = [match];
ref = ["\x1e", "\x1f"];
for (k = 0, len = ref.length; k < len; k++) {
character = ref[k];
temp = [];
for (l = 0, len1 = parts.length; l < len1; l++) {
part = parts[l];
temp = temp.concat(part.split(character));
}
parts = temp;
}
for (m = 0, len2 = parts.length; m < len2; m++) {
part = parts[m];
switch_type = switch_type === "book" ? "rest" : "book";
part_length = part.length;
if (part_length === 0) {
continue;
}
if (switch_type === "book") {
part = part.replace(/\/\d+$/, "");
end_index = match_index + part_length;
if (indices.length > 0 && indices[indices.length - 1].index === adjust) {
indices[indices.length - 1].end = end_index;
} else {
indices.push({
start: match_index,
end: end_index,
index: adjust
});
}
match_index += part_length + 2;
adjust = this.books[part].start_index + this.books[part].value.length - match_index;
indices.push({
start: end_index + 1,
end: end_index + 1,
index: adjust
});
} else {
end_index = match_index + part_length - 1;
if (indices.length > 0 && indices[indices.length - 1].index === adjust) {
indices[indices.length - 1].end = end_index;
} else {
indices.push({
start: match_index,
end: end_index,
index: adjust
});
}
match_index += part_length;
}
}
return indices;
};
bcv_passage.prototype.get_absolute_indices = function(arg1) {
var end, end_out, index, k, len, ref, start, start_out;
start = arg1[0], end = arg1[1];
start_out = null;
end_out = null;
ref = this.indices;
for (k = 0, len = ref.length; k < len; k++) {
index = ref[k];
if (start_out === null && (index.start <= start && start <= index.end)) {
start_out = start + index.index;
}
if ((index.start <= end && end <= index.end)) {
end_out = end + index.index + 1;
break;
}
}
return [start_out, end_out];
};
bcv_passage.prototype.validate_ref = function(translations, start, end) {
var k, len, messages, temp_valid, translation, valid;
if (!((translations != null) && translations.length > 0)) {
translations = [
{
translation: "default",
osis: "",
alias: "default"
}
];
}
valid = false;
messages = {};
for (k = 0, len = translations.length; k < len; k++) {
translation = translations[k];
if (translation.alias == null) {
translation.alias = "default";
}
if (translation.alias == null) {
if (messages.translation_invalid == null) {
messages.translation_invalid = [];
}
messages.translation_invalid.push(translation);
continue;
}
if (this.translations.aliases[translation.alias] == null) {
translation.alias = "default";
if (messages.translation_unknown == null) {
messages.translation_unknown = [];
}
messages.translation_unknown.push(translation);
}
temp_valid = this.validate_start_ref(translation.alias, start, messages)[0];
if (end) {
temp_valid = this.validate_end_ref(translation.alias, start, end, temp_valid, messages)[0];
}
if (temp_valid === true) {
valid = true;
}
}
return {
valid: valid,
messages: messages
};
};
bcv_passage.prototype.validate_start_ref = function(translation, start, messages) {
var ref, ref1, translation_order, valid;
valid = true;
if (translation !== "default" && (((ref = this.translations[translation]) != null ? ref.chapters[start.b] : void 0) == null)) {
this.promote_book_to_translation(start.b, translation);
}
translation_order = ((ref1 = this.translations[translation]) != null ? ref1.order : void 0) != null ? translation : "default";
if (start.v != null) {
start.v = parseInt(start.v, 10);
}
if (this.translations[translation_order].order[start.b] != null) {
if (start.c == null) {
start.c = 1;
}
start.c = parseInt(start.c, 10);
if (isNaN(start.c)) {
valid = false;
messages.start_chapter_not_numeric = true;
return [valid, messages];
}
if (start.c === 0) {
messages.start_chapter_is_zero = 1;
if (this.options.zero_chapter_strategy === "error") {
valid = false;
} else {
start.c = 1;
}
}
if ((start.v != null) && start.v === 0) {
messages.start_verse_is_zero = 1;
if (this.options.zero_verse_strategy === "error") {
valid = false;
} else if (this.options.zero_verse_strategy === "upgrade") {
start.v = 1;
}
}
if (start.c > 0 && (this.translations[translation].chapters[start.b][start.c - 1] != null)) {
if (start.v != null) {
if (isNaN(start.v)) {
valid = false;
messages.start_verse_not_numeric = true;
} else if (start.v > this.translations[translation].chapters[start.b][start.c - 1]) {
if (this.options.passage_existence_strategy.indexOf("v") >= 0) {
valid = false;
messages.start_verse_not_exist = this.translations[translation].chapters[start.b][start.c - 1];
}
}
}
} else {
if (start.c !== 1 && this.translations[translation].chapters[start.b].length === 1) {
valid = false;
messages.start_chapter_not_exist_in_single_chapter_book = 1;
} else if (start.c > 0 && this.options.passage_existence_strategy.indexOf("c") >= 0) {
valid = false;
messages.start_chapter_not_exist = this.translations[translation].chapters[start.b].length;
}
}
} else {
if (this.options.passage_existence_strategy.indexOf("b") >= 0) {
valid = false;
}
messages.start_book_not_exist = true;
}
return [valid, messages];
};
bcv_passage.prototype.validate_end_ref = function(translation, start, end, valid, messages) {
var ref, translation_order;
translation_order = ((ref = this.translations[translation]) != null ? ref.order : void 0) != null ? translation : "default";
if (end.c != null) {
end.c = parseInt(end.c, 10);
if (isNaN(end.c)) {
valid = false;
messages.end_chapter_not_numeric = true;
} else if (end.c === 0) {
messages.end_chapter_is_zero = 1;
if (this.options.zero_chapter_strategy === "error") {
valid = false;
} else {
end.c = 1;
}
}
}
if (end.v != null) {
end.v = parseInt(end.v, 10);
if (isNaN(end.v)) {
valid = false;
messages.end_verse_not_numeric = true;
} else if (end.v === 0) {
messages.end_verse_is_zero = 1;
if (this.options.zero_verse_strategy === "error") {
valid = false;
} else if (this.options.zero_verse_strategy === "upgrade") {
end.v = 1;
}
}
}
if (this.translations[translation_order].order[end.b] != null) {
if ((end.c == null) && this.translations[translation].chapters[end.b].length === 1) {
end.c = 1;
}
if ((this.translations[translation_order].order[start.b] != null) && this.translations[translation_order].order[start.b] > this.translations[translation_order].order[end.b]) {
if (this.options.passage_existence_strategy.indexOf("b") >= 0) {
valid = false;
}
messages.end_book_before_start = true;
}
if (start.b === end.b && (end.c != null) && !isNaN(end.c)) {
if (start.c == null) {
start.c = 1;
}
if (!isNaN(parseInt(start.c, 10)) && start.c > end.c) {
valid = false;
messages.end_chapter_before_start = true;
} else if (start.c === end.c && (end.v != null) && !isNaN(end.v)) {
if (start.v == null) {
start.v = 1;
}
if (!isNaN(parseInt(start.v, 10)) && start.v > end.v) {
valid = false;
messages.end_verse_before_start = true;
}
}
}
if ((end.c != null) && !isNaN(end.c)) {
if (this.translations[translation].chapters[end.b][end.c - 1] == null) {
if (this.translations[translation].chapters[end.b].length === 1) {
messages.end_chapter_not_exist_in_single_chapter_book = 1;
} else if (end.c > 0 && this.options.passage_existence_strategy.indexOf("c") >= 0) {
messages.end_chapter_not_exist = this.translations[translation].chapters[end.b].length;
}
}
}
if ((end.v != null) && !isNaN(end.v)) {
if (end.c == null) {
end.c = this.translations[translation].chapters[end.b].length;
}
if (end.v > this.translations[translation].chapters[end.b][end.c - 1] && this.options.passage_existence_strategy.indexOf("v") >= 0) {
messages.end_verse_not_exist = this.translations[translation].chapters[end.b][end.c - 1];
}
}
} else {
valid = false;
messages.end_book_not_exist = true;
}
return [valid, messages];
};
bcv_passage.prototype.promote_book_to_translation = function(book, translation) {
var base, base1;
if ((base = this.translations)[translation] == null) {
base[translation] = {};
}
if ((base1 = this.translations[translation]).chapters == null) {
base1.chapters = {};
}
if (this.translations[translation].chapters[book] == null) {
return this.translations[translation].chapters[book] = bcv_utils.shallow_clone_array(this.translations["default"].chapters[book]);
}
};
return bcv_passage;
})();
bcv_utils = {
shallow_clone: function(obj) {
var key, out, val;
if (obj == null) {
return obj;
}
out = {};
for (key in obj) {
if (!hasProp.call(obj, key)) continue;
val = obj[key];
out[key] = val;
}
return out;
},
shallow_clone_array: function(arr) {
var i, k, out, ref;
if (arr == null) {
return arr;
}
out = [];
for (i = k = 0, ref = arr.length; 0 <= ref ? k <= ref : k >= ref; i = 0 <= ref ? ++k : --k) {
if (typeof arr[i] !== "undefined") {
out[i] = arr[i];
}
}
return out;
}
};
bcv_parser.prototype.regexps.translations = /(?:(?:RUSV|SZ))\b/gi;
bcv_parser.prototype.translations = {
aliases: {
"default": {
osis: "",
alias: "default"
}
},
alternates: {},
"default": {
order: {
"Gen": 1,
"Exod": 2,
"Lev": 3,
"Num": 4,
"Deut": 5,
"Josh": 6,
"Judg": 7,
"Ruth": 8,
"1Sam": 9,
"2Sam": 10,
"1Kgs": 11,
"2Kgs": 12,
"1Chr": 13,
"2Chr": 14,
"Ezra": 15,
"Neh": 16,
"Esth": 17,
"Job": 18,
"Ps": 19,
"Prov": 20,
"Eccl": 21,
"Song": 22,
"Isa": 23,
"Jer": 24,
"Lam": 25,
"Ezek": 26,
"Dan": 27,
"Hos": 28,
"Joel": 29,
"Amos": 30,
"Obad": 31,
"Jonah": 32,
"Mic": 33,
"Nah": 34,
"Hab": 35,
"Zeph": 36,
"Hag": 37,
"Zech": 38,
"Mal": 39,
"Matt": 40,
"Mark": 41,
"Luke": 42,
"John": 43,
"Acts": 44,
"Rom": 45,
"1Cor": 46,
"2Cor": 47,
"Gal": 48,
"Eph": 49,
"Phil": 50,
"Col": 51,
"1Thess": 52,
"2Thess": 53,
"1Tim": 54,
"2Tim": 55,
"Titus": 56,
"Phlm": 57,
"Heb": 58,
"Jas": 59,
"1Pet": 60,
"2Pet": 61,
"1John": 62,
"2John": 63,
"3John": 64,
"Jude": 65,
"Rev": 66,
"Tob": 67,
"Jdt": 68,
"GkEsth": 69,
"Wis": 70,
"Sir": 71,
"Bar": 72,
"PrAzar": 73,
"Sus": 74,
"Bel": 75,
"SgThree": 76,
"EpJer": 77,
"1Macc": 78,
"2Macc": 79,
"3Macc": 80,
"4Macc": 81,
"1Esd": 82,
"2Esd": 83,
"PrMan": 84
},
chapters: {
"Gen": [31, 25, 24, 26, 32, 22, 24, 22, 29, 32, 32, 20, 18, 24, 21, 16, 27, 33, 38, 18, 34, 24, 20, 67, 34, 35, 46, 22, 35, 43, 55, 32, 20, 31, 29, 43, 36, 30, 23, 23, 57, 38, 34, 34, 28, 34, 31, 22, 33, 26],
"Exod": [22, 25, 22, 31, 23, 30, 25, 32, 35, 29, 10, 51, 22, 31, 27, 36, 16, 27, 25, 26, 36, 31, 33, 18, 40, 37, 21, 43, 46, 38, 18, 35, 23, 35, 35, 38, 29, 31, 43, 38],
"Lev": [17, 16, 17, 35, 19, 30, 38, 36, 24, 20, 47, 8, 59, 57, 33, 34, 16, 30, 37, 27, 24, 33, 44, 23, 55, 46, 34],
"Num": [54, 34, 51, 49, 31, 27, 89, 26, 23, 36, 35, 16, 33, 45, 41, 50, 13, 32, 22, 29, 35, 41, 30, 25, 18, 65, 23, 31, 40, 16, 54, 42, 56, 29, 34, 13],
"Deut": [46, 37, 29, 49, 33, 25, 26, 20, 29, 22, 32, 32, 18, 29, 23, 22, 20, 22, 21, 20, 23, 30, 25, 22, 19, 19, 26, 68, 29, 20, 30, 52, 29, 12],
"Josh": [18, 24, 17, 24, 15, 27, 26, 35, 27, 43, 23, 24, 33, 15, 63, 10, 18, 28, 51, 9, 45, 34, 16, 33],
"Judg": [36, 23, 31, 24, 31, 40, 25, 35, 57, 18, 40, 15, 25, 20, 20, 31, 13, 31, 30, 48, 25],
"Ruth": [22, 23, 18, 22],
"1Sam": [28, 36, 21, 22, 12, 21, 17, 22, 27, 27, 15, 25, 23, 52, 35, 23, 58, 30, 24, 42, 15, 23, 29, 22, 44, 25, 12, 25, 11, 31, 13],
"2Sam": [27, 32, 39, 12, 25, 23, 29, 18, 13, 19, 27, 31, 39, 33, 37, 23, 29, 33, 43, 26, 22, 51, 39, 25],
"1Kgs": [53, 46, 28, 34, 18, 38, 51, 66, 28, 29, 43, 33, 34, 31, 34, 34, 24, 46, 21, 43, 29, 53],
"2Kgs": [18, 25, 27, 44, 27, 33, 20, 29, 37, 36, 21, 21, 25, 29, 38, 20, 41, 37, 37, 21, 26, 20, 37, 20, 30],
"1Chr": [54, 55, 24, 43, 26, 81, 40, 40, 44, 14, 47, 40, 14, 17, 29, 43, 27, 17, 19, 8, 30, 19, 32, 31, 31, 32, 34, 21, 30],
"2Chr": [17, 18, 17, 22, 14, 42, 22, 18, 31, 19, 23, 16, 22, 15, 19, 14, 19, 34, 11, 37, 20, 12, 21, 27, 28, 23, 9, 27, 36, 27, 21, 33, 25, 33, 27, 23],
"Ezra": [11, 70, 13, 24, 17, 22, 28, 36, 15, 44],
"Neh": [11, 20, 32, 23, 19, 19, 73, 18, 38, 39, 36, 47, 31],
"Esth": [22, 23, 15, 17, 14, 14, 10, 17, 32, 3],
"Job": [22, 13, 26, 21, 27, 30, 21, 22, 35, 22, 20, 25, 28, 22, 35, 22, 16, 21, 29, 29, 34, 30, 17, 25, 6, 14, 23, 28, 25, 31, 40, 22, 33, 37, 16, 33, 24, 41, 30, 24, 34, 17],
"Ps": [6, 12, 8, 8, 12, 10, 17, 9, 20, 18, 7, 8, 6, 7, 5, 11, 15, 50, 14, 9, 13, 31, 6, 10, 22, 12, 14, 9, 11, 12, 24, 11, 22, 22, 28, 12, 40, 22, 13, 17, 13, 11, 5, 26, 17, 11, 9, 14, 20, 23, 19, 9, 6, 7, 23, 13, 11, 11, 17, 12, 8, 12, 11, 10, 13, 20, 7, 35, 36, 5, 24, 20, 28, 23, 10, 12, 20, 72, 13, 19, 16, 8, 18, 12, 13, 17, 7, 18, 52, 17, 16, 15, 5, 23, 11, 13, 12, 9, 9, 5, 8, 28, 22, 35, 45, 48, 43, 13, 31, 7, 10, 10, 9, 8, 18, 19, 2, 29, 176, 7, 8, 9, 4, 8, 5, 6, 5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 13, 10, 7, 12, 15, 21, 10, 20, 14, 9, 6],
"Prov": [33, 22, 35, 27, 23, 35, 27, 36, 18, 32, 31, 28, 25, 35, 33, 33, 28, 24, 29, 30, 31, 29, 35, 34, 28, 28, 27, 28, 27, 33, 31],
"Eccl": [18, 26, 22, 16, 20, 12, 29, 17, 18, 20, 10, 14],
"Song": [17, 17, 11, 16, 16, 13, 13, 14],
"Isa": [31, 22, 26, 6, 30, 13, 25, 22, 21, 34, 16, 6, 22, 32, 9, 14, 14, 7, 25, 6, 17, 25, 18, 23, 12, 21, 13, 29, 24, 33, 9, 20, 24, 17, 10, 22, 38, 22, 8, 31, 29, 25, 28, 28, 25, 13, 15, 22, 26, 11, 23, 15, 12, 17, 13, 12, 21, 14, 21, 22, 11, 12, 19, 12, 25, 24],
"Jer": [19, 37, 25, 31, 31, 30, 34, 22, 26, 25, 23, 17, 27, 22, 21, 21, 27, 23, 15, 18, 14, 30, 40, 10, 38, 24, 22, 17, 32, 24, 40, 44, 26, 22, 19, 32, 21, 28, 18, 16, 18, 22, 13, 30, 5, 28, 7, 47, 39, 46, 64, 34],
"Lam": [22, 22, 66, 22, 22],
"Ezek": [28, 10, 27, 17, 17, 14, 27, 18, 11, 22, 25, 28, 23, 23, 8, 63, 24, 32, 14, 49, 32, 31, 49, 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38, 28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35],
"Dan": [21, 49, 30, 37, 31, 28, 28, 27, 27, 21, 45, 13],
"Hos": [11, 23, 5, 19, 15, 11, 16, 14, 17, 15, 12, 14, 16, 9],
"Joel": [20, 32, 21],
"Amos": [15, 16, 15, 13, 27, 14, 17, 14, 15],
"Obad": [21],
"Jonah": [17, 10, 10, 11],
"Mic": [16, 13, 12, 13, 15, 16, 20],
"Nah": [15, 13, 19],
"Hab": [17, 20, 19],
"Zeph": [18, 15, 20],
"Hag": [15, 23],
"Zech": [21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21],
"Mal": [14, 17, 18, 6],
"Matt": [25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35, 30, 34, 46, 46, 39, 51, 46, 75, 66, 20],
"Mark": [45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20],
"Luke": [80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43, 48, 47, 38, 71, 56, 53],
"John": [51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40, 42, 31, 25],
"Acts": [26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28, 41, 38, 40, 30, 35, 27, 27, 32, 44, 31],
"Rom": [32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27],
"1Cor": [31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24],
"2Cor": [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14],
"Gal": [24, 21, 29, 31, 26, 18],
"Eph": [23, 22, 21, 32, 33, 24],
"Phil": [30, 30, 21, 23],
"Col": [29, 23, 25, 18],
"1Thess": [10, 20, 13, 18, 28],
"2Thess": [12, 17, 18],
"1Tim": [20, 15, 16, 16, 25, 21],
"2Tim": [18, 26, 17, 22],
"Titus": [16, 15, 15],
"Phlm": [25],
"Heb": [14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25],
"Jas": [27, 26, 18, 17, 20],
"1Pet": [25, 25, 22, 19, 14],
"2Pet": [21, 22, 18],
"1John": [10, 29, 24, 21, 21],
"2John": [13],
"3John": [15],
"Jude": [25],
"Rev": [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21],
"Tob": [22, 14, 17, 21, 22, 18, 16, 21, 6, 13, 18, 22, 17, 15],
"Jdt": [16, 28, 10, 15, 24, 21, 32, 36, 14, 23, 23, 20, 20, 19, 14, 25],
"GkEsth": [22, 23, 15, 17, 14, 14, 10, 17, 32, 13, 12, 6, 18, 19, 16, 24],
"Wis": [16, 24, 19, 20, 23, 25, 30, 21, 18, 21, 26, 27, 19, 31, 19, 29, 21, 25, 22],
"Sir": [30, 18, 31, 31, 15, 37, 36, 19, 18, 31, 34, 18, 26, 27, 20, 30, 32, 33, 30, 31, 28, 27, 27, 34, 26, 29, 30, 26, 28, 25, 31, 24, 33, 31, 26, 31, 31, 34, 35, 30, 22, 25, 33, 23, 26, 20, 25, 25, 16, 29, 30],
"Bar": [22, 35, 37, 37, 9],
"PrAzar": [68],
"Sus": [64],
"Bel": [42],
"SgThree": [39],
"EpJer": [73],
"1Macc": [64, 70, 60, 61, 68, 63, 50, 32, 73, 89, 74, 53, 53, 49, 41, 24],
"2Macc": [36, 32, 40, 50, 27, 31, 42, 36, 29, 38, 38, 45, 26, 46, 39],
"3Macc": [29, 33, 30, 21, 51, 41, 23],
"4Macc": [35, 24, 21, 26, 38, 35, 23, 29, 32, 21, 27, 19, 27, 20, 32, 25, 24, 24],
"1Esd": [58, 30, 24, 63, 73, 34, 15, 96, 55],
"2Esd": [40, 48, 36, 52, 56, 59, 70, 63, 47, 59, 46, 51, 58, 48, 63, 78],
"PrMan": [15],
"Ps151": [7]
}
},
vulgate: {
chapters: {
"Ps": [6, 13, 9, 10, 13, 11, 18, 10, 39, 8, 9, 6, 7, 5, 10, 15, 51, 15, 10, 14, 32, 6, 10, 22, 12, 14, 9, 11, 13, 25, 11, 22, 23, 28, 13, 40, 23, 14, 18, 14, 12, 5, 26, 18, 12, 10, 15, 21, 23, 21, 11, 7, 9, 24, 13, 12, 12, 18, 14, 9, 13, 12, 11, 14, 20, 8, 36, 37, 6, 24, 20, 28, 23, 11, 13, 21, 72, 13, 20, 17, 8, 19, 13, 14, 17, 7, 19, 53, 17, 16, 16, 5, 23, 11, 13, 12, 9, 9, 5, 8, 29, 22, 35, 45, 48, 43, 14, 31, 7, 10, 10, 9, 26, 9, 19, 2, 29, 176, 7, 8, 9, 4, 8, 5, 6, 5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 14, 10, 8, 12, 15, 21, 10, 11, 20, 14, 9, 7]
}
},
ceb: {
chapters: {
"2Cor": [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 13],
"Rev": [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 18, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21],
"Tob": [22, 14, 17, 21, 22, 18, 16, 21, 6, 13, 18, 22, 18, 15],
"PrAzar": [67],
"EpJer": [72],
"1Esd": [55, 26, 24, 63, 71, 33, 15, 92, 55]
}
},
kjv: {
chapters: {
"3John": [14]
}
},
nab: {
order: {
"Gen": 1,
"Exod": 2,
"Lev": 3,
"Num": 4,
"Deut": 5,
"Josh": 6,
"Judg": 7,
"Ruth": 8,
"1Sam": 9,
"2Sam": 10,
"1Kgs": 11,
"2Kgs": 12,
"1Chr": 13,
"2Chr": 14,
"PrMan": 15,
"Ezra": 16,
"Neh": 17,
"1Esd": 18,
"2Esd": 19,
"Tob": 20,
"Jdt": 21,
"Esth": 22,
"GkEsth": 23,
"1Macc": 24,
"2Macc": 25,
"3Macc": 26,
"4Macc": 27,
"Job": 28,
"Ps": 29,
"Prov": 30,
"Eccl": 31,
"Song": 32,
"Wis": 33,
"Sir": 34,
"Isa": 35,
"Jer": 36,
"Lam": 37,
"Bar": 38,
"EpJer": 39,
"Ezek": 40,
"Dan": 41,
"PrAzar": 42,
"Sus": 43,
"Bel": 44,
"SgThree": 45,
"Hos": 46,
"Joel": 47,
"Amos": 48,
"Obad": 49,
"Jonah": 50,
"Mic": 51,
"Nah": 52,
"Hab": 53,
"Zeph": 54,
"Hag": 55,
"Zech": 56,
"Mal": 57,
"Matt": 58,
"Mark": 59,
"Luke": 60,
"John": 61,
"Acts": 62,
"Rom": 63,
"1Cor": 64,
"2Cor": 65,
"Gal": 66,
"Eph": 67,
"Phil": 68,
"Col": 69,
"1Thess": 70,
"2Thess": 71,
"1Tim": 72,
"2Tim": 73,
"Titus": 74,
"Phlm": 75,
"Heb": 76,
"Jas": 77,
"1Pet": 78,
"2Pet": 79,
"1John": 80,
"2John": 81,
"3John": 82,
"Jude": 83,
"Rev": 84
},
chapters: {
"Gen": [31, 25, 24, 26, 32, 22, 24, 22, 29, 32, 32, 20, 18, 24, 21, 16, 27, 33, 38, 18, 34, 24, 20, 67, 34, 35, 46, 22, 35, 43, 54, 33, 20, 31, 29, 43, 36, 30, 23, 23, 57, 38, 34, 34, 28, 34, 31, 22, 33, 26],
"Exod": [22, 25, 22, 31, 23, 30, 29, 28, 35, 29, 10, 51, 22, 31, 27, 36, 16, 27, 25, 26, 37, 30, 33, 18, 40, 37, 21, 43, 46, 38, 18, 35, 23, 35, 35, 38, 29, 31, 43, 38],
"Lev": [17, 16, 17, 35, 26, 23, 38, 36, 24, 20, 47, 8, 59, 57, 33, 34, 16, 30, 37, 27, 24, 33, 44, 23, 55, 46, 34],
"Num": [54, 34, 51, 49, 31, 27, 89, 26, 23, 36, 35, 16, 33, 45, 41, 35, 28, 32, 22, 29, 35, 41, 30, 25, 19, 65, 23, 31, 39, 17, 54, 42, 56, 29, 34, 13],
"Deut": [46, 37, 29, 49, 33, 25, 26, 20, 29, 22, 32, 31, 19, 29, 23, 22, 20, 22, 21, 20, 23, 29, 26, 22, 19, 19, 26, 69, 28, 20, 30, 52, 29, 12],
"1Sam": [28, 36, 21, 22, 12, 21, 17, 22, 27, 27, 15, 25, 23, 52, 35, 23, 58, 30, 24, 42, 16, 23, 28, 23, 44, 25, 12, 25, 11, 31, 13],
"2Sam": [27, 32, 39, 12, 25, 23, 29, 18, 13, 19, 27, 31, 39, 33, 37, 23, 29, 32, 44, 26, 22, 51, 39, 25],
"1Kgs": [53, 46, 28, 20, 32, 38, 51, 66, 28, 29, 43, 33, 34, 31, 34, 34, 24, 46, 21, 43, 29, 54],
"2Kgs": [18, 25, 27, 44, 27, 33, 20, 29, 37, 36, 20, 22, 25, 29, 38, 20, 41, 37, 37, 21, 26, 20, 37, 20, 30],
"1Chr": [54, 55, 24, 43, 41, 66, 40, 40, 44, 14, 47, 41, 14, 17, 29, 43, 27, 17, 19, 8, 30, 19, 32, 31, 31, 32, 34, 21, 30],
"2Chr": [18, 17, 17, 22, 14, 42, 22, 18, 31, 19, 23, 16, 23, 14, 19, 14, 19, 34, 11, 37, 20, 12, 21, 27, 28, 23, 9, 27, 36, 27, 21, 33, 25, 33, 27, 23],
"Neh": [11, 20, 38, 17, 19, 19, 72, 18, 37, 40, 36, 47, 31],
"Job": [22, 13, 26, 21, 27, 30, 21, 22, 35, 22, 20, 25, 28, 22, 35, 22, 16, 21, 29, 29, 34, 30, 17, 25, 6, 14, 23, 28, 25, 31, 40, 22, 33, 37, 16, 33, 24, 41, 30, 32, 26, 17],
"Ps": [6, 11, 9, 9, 13, 11, 18, 10, 21, 18, 7, 9, 6, 7, 5, 11, 15, 51, 15, 10, 14, 32, 6, 10, 22, 12, 14, 9, 11, 13, 25, 11, 22, 23, 28, 13, 40, 23, 14, 18, 14, 12, 5, 27, 18, 12, 10, 15, 21, 23, 21, 11, 7, 9, 24, 14, 12, 12, 18, 14, 9, 13, 12, 11, 14, 20, 8, 36, 37, 6, 24, 20, 28, 23, 11, 13, 21, 72, 13, 20, 17, 8, 19, 13, 14, 17, 7, 19, 53, 17, 16, 16, 5, 23, 11, 13, 12, 9, 9, 5, 8, 29, 22, 35, 45, 48, 43, 14, 31, 7, 10, 10, 9, 8, 18, 19, 2, 29, 176, 7, 8, 9, 4, 8, 5, 6, 5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 14, 10, 8, 12, 15, 21, 10, 20, 14, 9, 6],
"Eccl": [18, 26, 22, 17, 19, 12, 29, 17, 18, 20, 10, 14],
"Song": [17, 17, 11, 16, 16, 12, 14, 14],
"Isa": [31, 22, 26, 6, 30, 13, 25, 23, 20, 34, 16, 6, 22, 32, 9, 14, 14, 7, 25, 6, 17, 25, 18, 23, 12, 21, 13, 29, 24, 33, 9, 20, 24, 17, 10, 22, 38, 22, 8, 31, 29, 25, 28, 28, 25, 13, 15, 22, 26, 11, 23, 15, 12, 17, 13, 12, 21, 14, 21, 22, 11, 12, 19, 11, 25, 24],
"Jer": [19, 37, 25, 31, 31, 30, 34, 23, 25, 25, 23, 17, 27, 22, 21, 21, 27, 23, 15, 18, 14, 30, 40, 10, 38, 24, 22, 17, 32, 24, 40, 44, 26, 22, 19, 32, 21, 28, 18, 16, 18, 22, 13, 30, 5, 28, 7, 47, 39, 46, 64, 34],
"Ezek": [28, 10, 27, 17, 17, 14, 27, 18, 11, 22, 25, 28, 23, 23, 8, 63, 24, 32, 14, 44, 37, 31, 49, 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38, 28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35],
"Dan": [21, 49, 100, 34, 30, 29, 28, 27, 27, 21, 45, 13, 64, 42],
"Hos": [9, 25, 5, 19, 15, 11, 16, 14, 17, 15, 11, 15, 15, 10],
"Joel": [20, 27, 5, 21],
"Jonah": [16, 11, 10, 11],
"Mic": [16, 13, 12, 14, 14, 16, 20],
"Nah": [14, 14, 19],
"Zech": [17, 17, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21],
"Mal": [14, 17, 24],
"Acts": [26, 47, 26, 37, 42, 15, 60, 40, 43, 49, 30, 25, 52, 28, 41, 40, 34, 28, 40, 38, 40, 30, 35, 27, 27, 32, 44, 31],
"2Cor": [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 13],
"Rev": [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 18, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21],
"Tob": [22, 14, 17, 21, 22, 18, 17, 21, 6, 13, 18, 22, 18, 15],
"Sir": [30, 18, 31, 31, 15, 37, 36, 19, 18, 31, 34, 18, 26, 27, 20, 30, 32, 33, 30, 31, 28, 27, 27, 33, 26, 29, 30, 26, 28, 25, 31, 24, 33, 31, 26, 31, 31, 34, 35, 30, 22, 25, 33, 23, 26, 20, 25, 25, 16, 29, 30],
"Bar": [22, 35, 38, 37, 9, 72],
"2Macc": [36, 32, 40, 50, 27, 31, 42, 36, 29, 38, 38, 46, 26, 46, 39]
}
},
nlt: {
chapters: {
"Rev": [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 18, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21]
}
},
nrsv: {
chapters: {
"2Cor": [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 13],
"Rev": [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 18, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21]
}
}
};
bcv_parser.prototype.regexps.space = "[\\s\\xa0]";
bcv_parser.prototype.regexps.escaped_passage = RegExp("(?:^|[^\\x1f\\x1e\\dA-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:(?:ch(?:apters?|a?pts?\\.?|a?p?s?\\.?)?\\s*\\d+\\s*(?:[\\u2013\\u2014\\-]|through|thru|to)\\s*\\d+\\s*(?:from|of|in)(?:\\s+the\\s+book\\s+of)?\\s*)|(?:ch(?:apters?|a?pts?\\.?|a?p?s?\\.?)?\\s*\\d+\\s*(?:from|of|in)(?:\\s+the\\s+book\\s+of)?\\s*)|(?:\\d+(?:th|nd|st)\\s*ch(?:apter|a?pt\\.?|a?p?\\.?)?\\s*(?:from|of|in)(?:\\s+the\\s+book\\s+of)?\\s*))?\\x1f(\\d+)(?:/\\d+)?\\x1f(?:/\\d+\\x1f|[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014]|надписаниях(?![a-z])|и" + bcv_parser.prototype.regexps.space + "+далее|главы|стихи|глав|стих|гл|—|и|[аб](?!\\w)|$)+)", "gi");
bcv_parser.prototype.regexps.match_end_split = RegExp("\\d\\W*надписаниях|\\d\\W*и" + bcv_parser.prototype.regexps.space + "+далее(?:[\\s\\xa0*]*\\.)?|\\d[\\s\\xa0*]*[аб](?!\\w)|\\x1e(?:[\\s\\xa0*]*[)\\]\\uff09])?|[\\d\\x1f]", "gi");
bcv_parser.prototype.regexps.control = /[\x1e\x1f]/g;
bcv_parser.prototype.regexps.pre_book = "[^A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ]";
bcv_parser.prototype.regexps.first = "(?:1-?я|1-?е|1)\\.?" + bcv_parser.prototype.regexps.space + "*";
bcv_parser.prototype.regexps.second = "(?:2-?я|2-?е|2)\\.?" + bcv_parser.prototype.regexps.space + "*";
bcv_parser.prototype.regexps.third = "(?:3-?я|3-?е|3)\\.?" + bcv_parser.prototype.regexps.space + "*";
bcv_parser.prototype.regexps.range_and = "(?:[&\u2013\u2014-]|и|—)";
bcv_parser.prototype.regexps.range_only = "(?:[\u2013\u2014-]|—)";
bcv_parser.prototype.regexps.get_books = function(include_apocrypha, case_sensitive) {
var book, books, k, len, out;
books = [
{
osis: ["Ps"],
apocrypha: true,
extra: "2",
regexp: /(\b)(Ps151)(?=\.1)/g
}, {
osis: ["Gen"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Бытия|Gen|Быт(?:ие)?|Нач(?:ало)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Exod"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Исход|Exod|Исх(?:од)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Bel"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Виле[\\s\\xa0]*и[\\s\\xa0]*драконе|Bel|Бел(?:[\\s\\xa0]*и[\\s\\xa0]*Дракон|е)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Lev"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Левит|Lev|Лев(?:ит)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Num"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Чисел|Num|Чис(?:ла)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Sir"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Премудрост(?:и[\\s\\xa0]*Иисуса,[\\s\\xa0]*сына[\\s\\xa0]*Сирахова|ь[\\s\\xa0]*Сираха)|Ekkleziastik|Sir|Сир(?:ахова)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Wis"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Прем(?:удрости[\\s\\xa0]*Соломона)?|Wis))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Lam"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Плач(?:[\\s\\xa0]*Иеремии)?|Lam))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["EpJer"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*Иеремии|EpJer))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Rev"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Rev|Отк(?:р(?:овение)?)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["PrMan"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Молитва[\\s\\xa0]*Манассии|PrMan))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Deut"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Deut|Втор(?:озаконие)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Josh"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Иисуса[\\s\\xa0]*Навина|Josh|И(?:исус(?:а[\\s\\xa0]*Навина|[\\s\\xa0]*Навин)|еш(?:уа)?)|Нав))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Judg"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Суде(?:[ий](?:[\\s\\xa0]*Израилевых)?)|Judg|Суд(?:е[ий]|ьи)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Ruth"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Руфи|Ruth|Ру(?:т|фь?)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["1Esd"],
apocrypha: true,
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:2(?:-?(?:[ея](?:\.[\s\xa0]*Ездры|[\s\xa0]*Ездры))|\.[\s\xa0]*Ездры|(?:[ея](?:\.[\s\xa0]*Ездры|[\s\xa0]*Ездры))|[\s\xa0]*Езд(?:ры)?)|1Esd))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["2Esd"],
apocrypha: true,
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:3(?:-?(?:[ея](?:\.[\s\xa0]*Ездры|[\s\xa0]*Ездры))|\.[\s\xa0]*Ездры|(?:[ея](?:\.[\s\xa0]*Ездры|[\s\xa0]*Ездры))|[\s\xa0]*Езд(?:ры)?)|2Esd))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["Isa"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Исаии|Isa|Ис(?:аи[ия]?)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["2Sam"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(2(?:-?(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)))|\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)|(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)))|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Цар(?:ств)?)|Sam))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Sam"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(1(?:-?(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)))|\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)|(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Царств)))|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Самуила|Цар(?:ств)?)|Sam))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["2Kgs"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:4(?:-?(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)))|\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)|(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)))|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Цар(?:ств)?))|2(?:-?(?:[ея](?:\.[\s\xa0]*Царе[ий]|[\s\xa0]*Царе[ий]))|\.[\s\xa0]*Царе[ий]|(?:[ея](?:\.[\s\xa0]*Царе[ий]|[\s\xa0]*Царе[ий]))|[\s\xa0]*Царе[ий]|Kgs)))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Kgs"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:3(?:-?(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)))|\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)|(?:[ея](?:\.[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Царств)))|[\s\xa0]*(?:Книга[\s\xa0]*Царств|Цар(?:ств)?))|1(?:-?(?:[ея](?:\.[\s\xa0]*Царе[ий]|[\s\xa0]*Царе[ий]))|\.[\s\xa0]*Царе[ий]|(?:[ея](?:\.[\s\xa0]*Царе[ий]|[\s\xa0]*Царе[ий]))|[\s\xa0]*Царе[ий]|Kgs)))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["2Chr"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(2(?:-?(?:[ея](?:\.[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)|[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)))|\.[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)|(?:[ея](?:\.[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)|[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)))|[\s\xa0]*(?:Хроник|Лет(?:опись)?|Пар(?:алипоменон)?)|Chr))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Chr"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(1(?:-?(?:[ея](?:\.[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)|[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)))|\.[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)|(?:[ея](?:\.[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)|[\s\xa0]*(?:Паралипоменон|Летопись|Хроник)))|[\s\xa0]*(?:Хроник|Лет(?:опись)?|Пар(?:алипоменон)?)|Chr))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["Ezra"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:Первая[\s\xa0]*Ездры|Книга[\s\xa0]*Ездры|1[\s\xa0]*Езд|Уза[ий]р|Ezra|Езд(?:р[аы])?))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["Neh"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Неемии|Неем(?:и[ия])?|Neh))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["GkEsth"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Дополнения[\\s\\xa0]*к[\\s\\xa0]*Есфири|GkEsth))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Esth"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Есфири|Esth|Есф(?:ирь)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Job"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Иова|Job|Аюб|Иова?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Ps"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Заб(?:ур)?|Ps|Пс(?:ал(?:тирь|мы|ом)?)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["PrAzar"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Молитва[\\s\\xa0]*Азария|PrAzar))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Prov"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*притче[ий][\\s\\xa0]*Соломоновых|Prov|Мудр(?:ые[\\s\\xa0]*изречения)?|Пр(?:ит(?:чи)?)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Eccl"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*Екклесиаста|Eccl|Разм(?:ышления)?|Екк(?:лесиаст)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["SgThree"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Благодарственная[\\s\\xa0]*песнь[\\s\\xa0]*отроков|Молитва[\\s\\xa0]*святых[\\s\\xa0]*трех[\\s\\xa0]*отроков|Песнь[\\s\\xa0]*тр[её]х[\\s\\xa0]*отроков|SgThree))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Song"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Song|Песн(?:и[\\s\\xa0]*Песне[ий]|ь(?:[\\s\\xa0]*(?:песне[ий][\\s\\xa0]*Соломона|Суле[ий]мана))?)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Jer"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Иеремии|Jer|Иер(?:еми[ия])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Ezek"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Иезекииля|Ezek|Езек(?:иил)?|Иез(?:екиил[ья])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Dan"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Даниила|Dan|Д(?:ан(?:и(?:ила?|ял))?|он)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Hos"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Осии|Hos|Ос(?:и[ия])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Joel"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Иоиля|Joel|Иоил[ья]?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Amos"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Амоса|Amos|Ам(?:оса?)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Obad"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Авдия|Obad|Авд(?:и[ийя])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Jonah"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Ионы|Jonah|Ион[аы]|Юнус))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Mic"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Михея|Mic|Мих(?:е[ийя])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Nah"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Наума|Наума?|Nah))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Hab"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Аввакума|Hab|Авв(?:акума?)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Zeph"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Софонии|Zeph|Соф(?:они[ия])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Hag"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Аггея|Hag|Агг(?:е[ийя])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Zech"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Захарии|Zech|За(?:к(?:ария)?|х(?:ари[ия])?)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Mal"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*пророка[\\s\\xa0]*Малахии|Mal|Мал(?:ахи[ия])?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Matt"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Евангелие[\\s\\xa0]*от[\\s\\xa0]*Матфея|От[\\s\\xa0]*Матфея|Matt|М(?:ат(?:а[ий])?|[тф])))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Mark"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Евангелие[\\s\\xa0]*от[\\s\\xa0]*Марка|От[\\s\\xa0]*Марка|Mark|М(?:арк|[кр])))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Luke"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Евангелие[\\s\\xa0]*от[\\s\\xa0]*Луки|От[\\s\\xa0]*Луки|Luke|Л(?:ука|к)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["1John"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(1(?:-?(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))))|\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))))|John|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|И(?:о(?:анна|хана)|н))))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["2John"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(2(?:-?(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))))|\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))))|John|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|И(?:о(?:анна|хана)|н))))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["3John"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(3(?:-?(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))))|\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|Ио(?:анна|хана))))|John|[\s\xa0]*(?:послание[\s\xa0]*Иоанна|И(?:о(?:анна|хана)|н))))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["John"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Евангелие[\\s\\xa0]*от[\\s\\xa0]*Иоанна|От[\\s\\xa0]*Иоанна|John|И(?:охан|н)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Acts"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Acts|Деян(?:ия)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Rom"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Римлянам|К[\\s\\xa0]*Римлянам|Rom|Рим(?:лянам)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["2Cor"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(2(?:-?(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)|[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)))|\.[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)|(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)|[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)))|[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Кор(?:инфянам)?)|Cor))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Cor"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(1(?:-?(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)|[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)))|\.[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)|(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)|[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Коринфянам)))|[\s\xa0]*(?:к[\s\xa0]*Коринфянам|Кор(?:инфянам)?)|Cor))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["Gal"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Галатам|К[\\s\\xa0]*Галатам|Gal|Гал(?:атам)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Eph"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Ефесянам|К[\\s\\xa0]*Ефесянам|Eph|(?:[ЕЭ]ф(?:есянам)?)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Phil"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Филиппи[ий]цам|К[\\s\\xa0]*Филиппи[ий]цам|Phil|Ф(?:ил(?:иппи[ий]цам)?|лп)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Col"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Колоссянам|Col|К(?:[\\s\\xa0]*Колоссянам|ол(?:оссянам)?)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["2Thess"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(2(?:-?[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам)|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)))|\.[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)|[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам)|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)))|Thess|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фес(?:салоники[ий]цам)?))|2(?:-?[ея](?:\.[\s\xa0]*Фессалоники(?:[ий]цам|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)))|[ея](?:\.[\s\xa0]*Фессалоники(?:[ий]цам|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам))))|2(?:-?[ея][\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам))|[ея][\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам)))|2(?:-?[ея][\s\xa0]*Фессалоники(?:[ий]цам)|[ея][\s\xa0]*Фессалоники(?:[ий]цам)))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Thess"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(1(?:-?[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам)|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)))|\.[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)|[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам)|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)))|Thess|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фес(?:салоники[ий]цам)?))|1(?:-?[ея](?:\.[\s\xa0]*Фессалоники(?:[ий]цам|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам)))|[ея](?:\.[\s\xa0]*Фессалоники(?:[ий]цам|[\s\xa0]*(?:к[\s\xa0]*Фессалоники[ий]цам|Фессалоники[ий]цам))))|1(?:-?[ея][\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам))|[ея][\s\xa0]*(?:к[\s\xa0]*Фессалоники(?:[ий]цам|Фессалоники[ий]цам)))|1(?:-?[ея][\s\xa0]*Фессалоники(?:[ий]цам)|[ея][\s\xa0]*Фессалоники(?:[ий]цам)))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["2Tim"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(2(?:-?(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))|[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))))|\.[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))|(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))|[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))))|[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею)?)|Tim))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Tim"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(1(?:-?(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))|[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))))|\.[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))|(?:[ея](?:\.[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))|[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею))))|[\s\xa0]*(?:к[\s\xa0]*Тимофею|Тим(?:етею|офею)?)|Tim))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["Titus"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Титу|К[\\s\\xa0]*Титу|Titus|Титу?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Phlm"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Филимону|К[\\s\\xa0]*Филимону|Phlm|Ф(?:илимону|лм)))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Heb"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*к[\\s\\xa0]*Евреям|К[\\s\\xa0]*Евреям|Heb|Евр(?:еям)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Jas"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*Иакова|Якуб|Jas|Иак(?:ова)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["2Pet"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(2(?:-?(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))|[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))))|\.[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))|(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))|[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))))|[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра)?)|Pet))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Pet"],
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(1(?:-?(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))|[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))))|\.[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))|(?:[ея](?:\.[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))|[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра))))|[\s\xa0]*(?:послание[\s\xa0]*Петра|Пет(?:ира|ра)?)|Pet))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["Jude"],
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Послание[\\s\\xa0]*Иуды|Jude|Иуд[аы]?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Tob"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Tob|Тов(?:ита)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Jdt"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Jdt|Юди(?:фь)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Bar"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:Книга[\\s\\xa0]*(?:пророка[\\s\\xa0]*Вару́ха|Варуха)|Бару́ха|Bar|Вар(?:уха)?))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["Sus"],
apocrypha: true,
regexp: RegExp("(^|" + bcv_parser.prototype.regexps.pre_book + ")((?:С(?:казанию[\\s\\xa0]*о[\\s\\xa0]*Сусанне[\\s\\xa0]*и[\\s\\xa0]*Данииле|усанна(?:[\\s\\xa0]*и[\\s\\xa0]*старцы)?)|Sus))(?:(?=[\\d\\s\\xa0.:,;\\x1e\\x1f&\\(\\)()\\[\\]/\"'\\*=~\\-\\u2013\\u2014])|$)", "gi")
}, {
osis: ["2Macc"],
apocrypha: true,
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:Вторая[\s\xa0]*книга[\s\xa0]*Маккаве[ий]ская|2(?:-?(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|\.[\s\xa0]*Маккавеев|(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|[\s\xa0]*Макк(?:авеев)?|Macc)))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["3Macc"],
apocrypha: true,
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:Третья[\s\xa0]*книга[\s\xa0]*Маккаве[ий]ская|3(?:-?(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|\.[\s\xa0]*Маккавеев|(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|[\s\xa0]*Макк(?:авеев)?|Macc)))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["4Macc"],
apocrypha: true,
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])(4(?:-?(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|\.[\s\xa0]*Маккавеев|(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|[\s\xa0]*Макк(?:авеев)?|Macc))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}, {
osis: ["1Macc"],
apocrypha: true,
regexp: /(^|[^0-9A-Za-zЀ-ҁ҃-҇Ҋ-ԧⷠ-ⷿꙀ-꙯ꙴ-꙽ꙿ-ꚗꚟ])((?:Первая[\s\xa0]*книга[\s\xa0]*Маккаве[ий]ская|1(?:-?(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|\.[\s\xa0]*Маккавеев|(?:[ея](?:\.[\s\xa0]*Маккавеев|[\s\xa0]*Маккавеев))|[\s\xa0]*Макк(?:авеев)?|Macc)))(?:(?=[\d\s\xa0.:,;\x1e\x1f&\(\)()\[\]\/"'\*=~\-\u2013\u2014])|$)/gi
}
];
if (include_apocrypha === true && case_sensitive === "none") {
return books;
}
out = [];
for (k = 0, len = books.length; k < len; k++) {
book = books[k];
if (include_apocrypha === false && (book.apocrypha != null) && book.apocrypha === true) {
continue;
}
if (case_sensitive === "books") {
book.regexp = new RegExp(book.regexp.source, "g");
}
out.push(book);
}
return out;
};
bcv_parser.prototype.regexps.books = bcv_parser.prototype.regexps.get_books(false, "none");
var grammar = (function() {
/*
* Generated by PEG.js 0.8.0.
*
* http://pegjs.majda.cz/
*/
function peg$subclass(child, parent) {
function ctor() { this.constructor = child; }
ctor.prototype = parent.prototype;
child.prototype = new ctor();
}
function SyntaxError(message, expected, found, offset, line, column) {
this.message = message;
this.expected = expected;
this.found = found;
this.offset = offset;
this.line = line;
this.column = column;
this.name = "SyntaxError";
}
peg$subclass(SyntaxError, Error);
function parse(input) {
var options = arguments.length > 1 ? arguments[1] : {},
peg$FAILED = {},
peg$startRuleFunctions = { start: peg$parsestart },
peg$startRuleFunction = peg$parsestart,
peg$c0 = [],
peg$c1 = peg$FAILED,
peg$c2 = null,
peg$c3 = function(val_1, val_2) { val_2.unshift([val_1]); return {"type": "sequence", "value": val_2, "indices": [offset(), peg$currPos - 1]} },
peg$c4 = "(",
peg$c5 = { type: "literal", value: "(", description: "\"(\"" },
peg$c6 = ")",
peg$c7 = { type: "literal", value: ")", description: "\")\"" },
peg$c8 = function(val_1, val_2) { if (typeof(val_2) === "undefined") val_2 = []; val_2.unshift([val_1]); return {"type": "sequence_post_enclosed", "value": val_2, "indices": [offset(), peg$currPos - 1]} },
peg$c9 = void 0,
peg$c10 = function(val_1, val_2) { if (val_1.length && val_1.length === 2) val_1 = val_1[0]; // for `b`, which returns [object, undefined]
return {"type": "range", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c11 = "\x1F",
peg$c12 = { type: "literal", value: "\x1F", description: "\"\\x1F\"" },
peg$c13 = "/",
peg$c14 = { type: "literal", value: "/", description: "\"/\"" },
peg$c15 = /^[1-8]/,
peg$c16 = { type: "class", value: "[1-8]", description: "[1-8]" },
peg$c17 = function(val) { return {"type": "b", "value": val.value, "indices": [offset(), peg$currPos - 1]} },
peg$c18 = function(val_1, val_2) { return {"type": "bc", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c19 = ",",
peg$c20 = { type: "literal", value: ",", description: "\",\"" },
peg$c21 = function(val_1, val_2) { return {"type": "bc_title", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c22 = ".",
peg$c23 = { type: "literal", value: ".", description: "\".\"" },
peg$c24 = function(val_1, val_2) { return {"type": "bcv", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c25 = "-",
peg$c26 = { type: "literal", value: "-", description: "\"-\"" },
peg$c27 = function(val_1, val_2, val_3, val_4) { return {"type": "range", "value": [{"type": "bcv", "value": [{"type": "bc", "value": [val_1, val_2], "indices": [val_1.indices[0], val_2.indices[1]]}, val_3], "indices": [val_1.indices[0], val_3.indices[1]]}, val_4], "indices": [offset(), peg$currPos - 1]} },
peg$c28 = function(val_1, val_2) { return {"type": "bv", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c29 = function(val_1, val_2) { return {"type": "bc", "value": [val_2, val_1], "indices": [offset(), peg$currPos - 1]} },
peg$c30 = function(val_1, val_2, val_3) { return {"type": "cb_range", "value": [val_3, val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c31 = "th",
peg$c32 = { type: "literal", value: "th", description: "\"th\"" },
peg$c33 = "nd",
peg$c34 = { type: "literal", value: "nd", description: "\"nd\"" },
peg$c35 = "st",
peg$c36 = { type: "literal", value: "st", description: "\"st\"" },
peg$c37 = "/1\x1F",
peg$c38 = { type: "literal", value: "/1\x1F", description: "\"/1\\x1F\"" },
peg$c39 = function(val) { return {"type": "c_psalm", "value": val.value, "indices": [offset(), peg$currPos - 1]} },
peg$c40 = function(val_1, val_2) { return {"type": "cv_psalm", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c41 = function(val_1, val_2) { return {"type": "c_title", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c42 = function(val_1, val_2) { return {"type": "cv", "value": [val_1, val_2], "indices": [offset(), peg$currPos - 1]} },
peg$c43 = function(val) { return {"type": "c", "value": [val], "indices": [offset(), peg$currPos - 1]} },
peg$c44 = "\u0438",
peg$c45 = { type: "literal", value: "\u0438", description: "\"\\u0438\"" },
peg$c46 = "\u0434\u0430\u043B\u0435\u0435",
peg$c47 = { type: "literal", value: "\u0434\u0430\u043B\u0435\u0435", description: "\"\\u0434\\u0430\\u043B\\u0435\\u0435\"" },
peg$c48 = /^[a-z]/,
peg$c49 = { type: "class", value: "[a-z]", description: "[a-z]" },
peg$c50 = function(val_1) { return {"type": "ff", "value": [val_1], "indices": [offset(), peg$currPos - 1]} },
peg$c51 = "\u043D\u0430\u0434\u043F\u0438\u0441\u0430\u043D\u0438\u044F\u0445",
peg$c52 = { type: "literal", value: "\u043D\u0430\u0434\u043F\u0438\u0441\u0430\u043D\u0438\u044F\u0445", description: "\"\\u043D\\u0430\\u0434\\u043F\\u0438\\u0441\\u0430\\u043D\\u0438\\u044F\\u0445\"" },
peg$c53 = function(val_1) { return {"type": "integer_title", "value": [val_1], "indices": [offset(), peg$currPos - 1]} },
peg$c54 = "/9\x1F",
peg$c55 = { type: "literal", value: "/9\x1F", description: "\"/9\\x1F\"" },
peg$c56 = function(val) { return {"type": "context", "value": val.value, "indices": [offset(), peg$currPos - 1]} },
peg$c57 = "/2\x1F",
peg$c58 = { type: "literal", value: "/2\x1F", description: "\"/2\\x1F\"" },
peg$c59 = ".1",
peg$c60 = { type: "literal", value: ".1", description: "\".1\"" },
peg$c61 = /^[0-9]/,
peg$c62 = { type: "class", value: "[0-9]", description: "[0-9]" },
peg$c63 = function(val) { return {"type": "bc", "value": [val, {"type": "c", "value": [{"type": "integer", "value": 151, "indices": [peg$currPos - 2, peg$currPos - 1]}], "indices": [peg$currPos - 2, peg$currPos - 1]}], "indices": [offset(), peg$currPos - 1]} },
peg$c64 = function(val_1, val_2) { return {"type": "bcv", "value": [val_1, {"type": "v", "value": [val_2], "indices": [val_2.indices[0], val_2.indices[1]]}], "indices": [offset(), peg$currPos - 1]} },
peg$c65 = /^[\u0430\u0431]/i,
peg$c66 = { type: "class", value: "[\\u0430\\u0431]i", description: "[\\u0430\\u0431]i" },
peg$c67 = function(val) { return {"type": "v", "value": [val], "indices": [offset(), peg$currPos - 1]} },
peg$c68 = "\u0433\u043B",
peg$c69 = { type: "literal", value: "\u0433\u043B", description: "\"\\u0433\\u043B\"" },
peg$c70 = "\u0430\u0432\u044B",
peg$c71 = { type: "literal", value: "\u0430\u0432\u044B", description: "\"\\u0430\\u0432\\u044B\"" },
peg$c72 = "\u0430\u0432",
peg$c73 = { type: "literal", value: "\u0430\u0432", description: "\"\\u0430\\u0432\"" },
peg$c74 = "",
peg$c75 = function() { return {"type": "c_explicit"} },
peg$c76 = "\u0441\u0442\u0438\u0445",
peg$c77 = { type: "literal", value: "\u0441\u0442\u0438\u0445", description: "\"\\u0441\\u0442\\u0438\\u0445\"" },
peg$c78 = function() { return {"type": "v_explicit"} },
peg$c79 = /^["']/,
peg$c80 = { type: "class", value: "[\"']", description: "[\"']" },
peg$c81 = /^[;\/:&\-\u2013\u2014~]/,
peg$c82 = { type: "class", value: "[;\\/:&\\-\\u2013\\u2014~]", description: "[;\\/:&\\-\\u2013\\u2014~]" },
peg$c83 = function() { return "" },
peg$c84 = /^[\-\u2013\u2014]/,
peg$c85 = { type: "class", value: "[\\-\\u2013\\u2014]", description: "[\\-\\u2013\\u2014]" },
peg$c86 = "\u2014",
peg$c87 = { type: "literal", value: "\u2014", description: "\"\\u2014\"" },
peg$c88 = function(val) { return {type:"title", value: [val], "indices": [offset(), peg$currPos - 1]} },
peg$c89 = "from",
peg$c90 = { type: "literal", value: "from", description: "\"from\"" },
peg$c91 = "of",
peg$c92 = { type: "literal", value: "of", description: "\"of\"" },
peg$c93 = "in",
peg$c94 = { type: "literal", value: "in", description: "\"in\"" },
peg$c95 = "the",
peg$c96 = { type: "literal", value: "the", description: "\"the\"" },
peg$c97 = "book",
peg$c98 = { type: "literal", value: "book", description: "\"book\"" },
peg$c99 = /^[([]/,
peg$c100 = { type: "class", value: "[([]", description: "[([]" },
peg$c101 = /^[)\]]/,
peg$c102 = { type: "class", value: "[)\\]]", description: "[)\\]]" },
peg$c103 = function(val) { return {"type": "translation_sequence", "value": val, "indices": [offset(), peg$currPos - 1]} },
peg$c104 = "\x1E",
peg$c105 = { type: "literal", value: "\x1E", description: "\"\\x1E\"" },
peg$c106 = function(val) { return {"type": "translation", "value": val.value, "indices": [offset(), peg$currPos - 1]} },
peg$c107 = ",000",
peg$c108 = { type: "literal", value: ",000", description: "\",000\"" },
peg$c109 = function(val) { return {"type": "integer", "value": parseInt(val.join(""), 10), "indices": [offset(), peg$currPos - 1]} },
peg$c110 = /^[^\x1F\x1E([]/,
peg$c111 = { type: "class", value: "[^\\x1F\\x1E([]", description: "[^\\x1F\\x1E([]" },
peg$c112 = function(val) { return {"type": "word", "value": val.join(""), "indices": [offset(), peg$currPos - 1]} },
peg$c113 = function(val) { return {"type": "stop", "value": val, "indices": [offset(), peg$currPos - 1]} },
peg$c114 = /^[\s\xa0*]/,
peg$c115 = { type: "class", value: "[\\s\\xa0*]", description: "[\\s\\xa0*]" },
peg$currPos = 0,
peg$reportedPos = 0,
peg$cachedPos = 0,
peg$cachedPosDetails = { line: 1, column: 1, seenCR: false },
peg$maxFailPos = 0,
peg$maxFailExpected = [],
peg$silentFails = 0,
peg$result;
if ("startRule" in options) {
if (!(options.startRule in peg$startRuleFunctions)) {
throw new Error("Can't start parsing from rule \"" + options.startRule + "\".");
}
peg$startRuleFunction = peg$startRuleFunctions[options.startRule];
}
function text() {
return input.substring(peg$reportedPos, peg$currPos);
}
function offset() {
return peg$reportedPos;
}
function line() {
return peg$computePosDetails(peg$reportedPos).line;
}
function column() {
return peg$computePosDetails(peg$reportedPos).column;
}
function expected(description) {
throw peg$buildException(
null,
[{ type: "other", description: description }],
peg$reportedPos
);
}
function error(message) {
throw peg$buildException(message, null, peg$reportedPos);
}
function peg$computePosDetails(pos) {
function advance(details, startPos, endPos) {
var p, ch;
for (p = startPos; p < endPos; p++) {
ch = input.charAt(p);
if (ch === "\n") {
if (!details.seenCR) { details.line++; }
details.column = 1;
details.seenCR = false;
} else if (ch === "\r" || ch === "\u2028" || ch === "\u2029") {
details.line++;
details.column = 1;
details.seenCR = true;
} else {
details.column++;
details.seenCR = false;
}
}
}
if (peg$cachedPos !== pos) {
if (peg$cachedPos > pos) {
peg$cachedPos = 0;
peg$cachedPosDetails = { line: 1, column: 1, seenCR: false };
}
advance(peg$cachedPosDetails, peg$cachedPos, pos);
peg$cachedPos = pos;
}
return peg$cachedPosDetails;
}
function peg$fail(expected) {
if (peg$currPos < peg$maxFailPos) { return; }
if (peg$currPos > peg$maxFailPos) {
peg$maxFailPos = peg$currPos;
peg$maxFailExpected = [];
}
peg$maxFailExpected.push(expected);
}
function peg$buildException(message, expected, pos) {
function cleanupExpected(expected) {
var i = 1;
expected.sort(function(a, b) {
if (a.description < b.description) {
return -1;
} else if (a.description > b.description) {
return 1;
} else {
return 0;
}
});
while (i < expected.length) {
if (expected[i - 1] === expected[i]) {
expected.splice(i, 1);
} else {
i++;
}
}
}
function buildMessage(expected, found) {
function stringEscape(s) {
function hex(ch) { return ch.charCodeAt(0).toString(16).toUpperCase(); }
return s
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\x08/g, '\\b')
.replace(/\t/g, '\\t')
.replace(/\n/g, '\\n')
.replace(/\f/g, '\\f')
.replace(/\r/g, '\\r')
.replace(/[\x00-\x07\x0B\x0E\x0F]/g, function(ch) { return '\\x0' + hex(ch); })
.replace(/[\x10-\x1F\x80-\xFF]/g, function(ch) { return '\\x' + hex(ch); })
.replace(/[\u0180-\u0FFF]/g, function(ch) { return '\\u0' + hex(ch); })
.replace(/[\u1080-\uFFFF]/g, function(ch) { return '\\u' + hex(ch); });
}
var expectedDescs = new Array(expected.length),
expectedDesc, foundDesc, i;
for (i = 0; i < expected.length; i++) {
expectedDescs[i] = expected[i].description;
}
expectedDesc = expected.length > 1
? expectedDescs.slice(0, -1).join(", ")
+ " or "
+ expectedDescs[expected.length - 1]
: expectedDescs[0];
foundDesc = found ? "\"" + stringEscape(found) + "\"" : "end of input";
return "Expected " + expectedDesc + " but " + foundDesc + " found.";
}
var posDetails = peg$computePosDetails(pos),
found = pos < input.length ? input.charAt(pos) : null;
if (expected !== null) {
cleanupExpected(expected);
}
return new SyntaxError(
message !== null ? message : buildMessage(expected, found),
expected,
found,
pos,
posDetails.line,
posDetails.column
);
}
function peg$parsestart() {
var s0, s1;
s0 = [];
s1 = peg$parsebcv_hyphen_range();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_range();
if (s1 === peg$FAILED) {
s1 = peg$parserange();
if (s1 === peg$FAILED) {
s1 = peg$parseff();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parseb();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence_enclosed();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence_sep();
if (s1 === peg$FAILED) {
s1 = peg$parsec_title();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger_title();
if (s1 === peg$FAILED) {
s1 = peg$parsecv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parsev_letter();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger();
if (s1 === peg$FAILED) {
s1 = peg$parsec();
if (s1 === peg$FAILED) {
s1 = peg$parsev();
if (s1 === peg$FAILED) {
s1 = peg$parseword();
if (s1 === peg$FAILED) {
s1 = peg$parseword_parenthesis();
if (s1 === peg$FAILED) {
s1 = peg$parsecontext();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
if (s1 !== peg$FAILED) {
while (s1 !== peg$FAILED) {
s0.push(s1);
s1 = peg$parsebcv_hyphen_range();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_range();
if (s1 === peg$FAILED) {
s1 = peg$parserange();
if (s1 === peg$FAILED) {
s1 = peg$parseff();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parseb();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence_enclosed();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence_sep();
if (s1 === peg$FAILED) {
s1 = peg$parsec_title();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger_title();
if (s1 === peg$FAILED) {
s1 = peg$parsecv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parsev_letter();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger();
if (s1 === peg$FAILED) {
s1 = peg$parsec();
if (s1 === peg$FAILED) {
s1 = peg$parsev();
if (s1 === peg$FAILED) {
s1 = peg$parseword();
if (s1 === peg$FAILED) {
s1 = peg$parseword_parenthesis();
if (s1 === peg$FAILED) {
s1 = peg$parsecontext();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
} else {
s0 = peg$c1;
}
return s0;
}
function peg$parsesequence() {
var s0, s1, s2, s3, s4, s5;
s0 = peg$currPos;
s1 = peg$parsecb_range();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_hyphen_range();
if (s1 === peg$FAILED) {
s1 = peg$parserange();
if (s1 === peg$FAILED) {
s1 = peg$parseff();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parseb();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecontext();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
if (s1 !== peg$FAILED) {
s2 = [];
s3 = peg$currPos;
s4 = peg$parsesequence_sep();
if (s4 === peg$FAILED) {
s4 = peg$c2;
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesequence_post();
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$currPos;
s4 = peg$parsesequence_sep();
if (s4 === peg$FAILED) {
s4 = peg$c2;
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesequence_post();
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
}
} else {
s2 = peg$c1;
}
if (s2 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c3(s1, s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsesequence_post_enclosed() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8;
s0 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 40) {
s1 = peg$c4;
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c5); }
}
if (s1 !== peg$FAILED) {
s2 = peg$parsesp();
if (s2 !== peg$FAILED) {
s3 = peg$parsesequence_sep();
if (s3 === peg$FAILED) {
s3 = peg$c2;
}
if (s3 !== peg$FAILED) {
s4 = peg$parsesequence_post();
if (s4 !== peg$FAILED) {
s5 = [];
s6 = peg$currPos;
s7 = peg$parsesequence_sep();
if (s7 === peg$FAILED) {
s7 = peg$c2;
}
if (s7 !== peg$FAILED) {
s8 = peg$parsesequence_post();
if (s8 !== peg$FAILED) {
s7 = [s7, s8];
s6 = s7;
} else {
peg$currPos = s6;
s6 = peg$c1;
}
} else {
peg$currPos = s6;
s6 = peg$c1;
}
while (s6 !== peg$FAILED) {
s5.push(s6);
s6 = peg$currPos;
s7 = peg$parsesequence_sep();
if (s7 === peg$FAILED) {
s7 = peg$c2;
}
if (s7 !== peg$FAILED) {
s8 = peg$parsesequence_post();
if (s8 !== peg$FAILED) {
s7 = [s7, s8];
s6 = s7;
} else {
peg$currPos = s6;
s6 = peg$c1;
}
} else {
peg$currPos = s6;
s6 = peg$c1;
}
}
if (s5 !== peg$FAILED) {
s6 = peg$parsesp();
if (s6 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 41) {
s7 = peg$c6;
peg$currPos++;
} else {
s7 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c7); }
}
if (s7 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c8(s4, s5);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsesequence_post() {
var s0;
s0 = peg$parsesequence_post_enclosed();
if (s0 === peg$FAILED) {
s0 = peg$parsecb_range();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv_hyphen_range();
if (s0 === peg$FAILED) {
s0 = peg$parserange();
if (s0 === peg$FAILED) {
s0 = peg$parseff();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv_comma();
if (s0 === peg$FAILED) {
s0 = peg$parsebc_title();
if (s0 === peg$FAILED) {
s0 = peg$parseps151_bcv();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv_weak();
if (s0 === peg$FAILED) {
s0 = peg$parseps151_bc();
if (s0 === peg$FAILED) {
s0 = peg$parsebc();
if (s0 === peg$FAILED) {
s0 = peg$parsecv_psalm();
if (s0 === peg$FAILED) {
s0 = peg$parsebv();
if (s0 === peg$FAILED) {
s0 = peg$parsec_psalm();
if (s0 === peg$FAILED) {
s0 = peg$parseb();
if (s0 === peg$FAILED) {
s0 = peg$parsecbv();
if (s0 === peg$FAILED) {
s0 = peg$parsecbv_ordinal();
if (s0 === peg$FAILED) {
s0 = peg$parsecb();
if (s0 === peg$FAILED) {
s0 = peg$parsecb_ordinal();
if (s0 === peg$FAILED) {
s0 = peg$parsec_title();
if (s0 === peg$FAILED) {
s0 = peg$parseinteger_title();
if (s0 === peg$FAILED) {
s0 = peg$parsecv();
if (s0 === peg$FAILED) {
s0 = peg$parsecv_weak();
if (s0 === peg$FAILED) {
s0 = peg$parsev_letter();
if (s0 === peg$FAILED) {
s0 = peg$parseinteger();
if (s0 === peg$FAILED) {
s0 = peg$parsec();
if (s0 === peg$FAILED) {
s0 = peg$parsev();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
return s0;
}
function peg$parserange() {
var s0, s1, s2, s3, s4, s5, s6;
s0 = peg$currPos;
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$currPos;
s2 = peg$parseb();
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
peg$silentFails++;
s4 = peg$currPos;
s5 = peg$parserange_sep();
if (s5 !== peg$FAILED) {
s6 = peg$parsebcv_comma();<|fim▁hole|> if (s6 === peg$FAILED) {
s6 = peg$parsebcv();
if (s6 === peg$FAILED) {
s6 = peg$parsebcv_weak();
if (s6 === peg$FAILED) {
s6 = peg$parseps151_bc();
if (s6 === peg$FAILED) {
s6 = peg$parsebc();
if (s6 === peg$FAILED) {
s6 = peg$parsebv();
if (s6 === peg$FAILED) {
s6 = peg$parseb();
}
}
}
}
}
}
}
}
if (s6 !== peg$FAILED) {
s5 = [s5, s6];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
peg$silentFails--;
if (s4 !== peg$FAILED) {
peg$currPos = s3;
s3 = peg$c9;
} else {
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
s2 = [s2, s3];
s1 = s2;
} else {
peg$currPos = s1;
s1 = peg$c1;
}
} else {
peg$currPos = s1;
s1 = peg$c1;
}
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsec_title();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger_title();
if (s1 === peg$FAILED) {
s1 = peg$parsecv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parsev_letter();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger();
if (s1 === peg$FAILED) {
s1 = peg$parsec();
if (s1 === peg$FAILED) {
s1 = peg$parsev();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
if (s1 !== peg$FAILED) {
s2 = peg$parserange_sep();
if (s2 !== peg$FAILED) {
s3 = peg$parseff();
if (s3 === peg$FAILED) {
s3 = peg$parsebcv_comma();
if (s3 === peg$FAILED) {
s3 = peg$parsebc_title();
if (s3 === peg$FAILED) {
s3 = peg$parseps151_bcv();
if (s3 === peg$FAILED) {
s3 = peg$parsebcv();
if (s3 === peg$FAILED) {
s3 = peg$parsebcv_weak();
if (s3 === peg$FAILED) {
s3 = peg$parseps151_bc();
if (s3 === peg$FAILED) {
s3 = peg$parsebc();
if (s3 === peg$FAILED) {
s3 = peg$parsecv_psalm();
if (s3 === peg$FAILED) {
s3 = peg$parsebv();
if (s3 === peg$FAILED) {
s3 = peg$parseb();
if (s3 === peg$FAILED) {
s3 = peg$parsecbv();
if (s3 === peg$FAILED) {
s3 = peg$parsecbv_ordinal();
if (s3 === peg$FAILED) {
s3 = peg$parsec_psalm();
if (s3 === peg$FAILED) {
s3 = peg$parsecb();
if (s3 === peg$FAILED) {
s3 = peg$parsecb_ordinal();
if (s3 === peg$FAILED) {
s3 = peg$parsec_title();
if (s3 === peg$FAILED) {
s3 = peg$parseinteger_title();
if (s3 === peg$FAILED) {
s3 = peg$parsecv();
if (s3 === peg$FAILED) {
s3 = peg$parsev_letter();
if (s3 === peg$FAILED) {
s3 = peg$parseinteger();
if (s3 === peg$FAILED) {
s3 = peg$parsecv_weak();
if (s3 === peg$FAILED) {
s3 = peg$parsec();
if (s3 === peg$FAILED) {
s3 = peg$parsev();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c10(s1, s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseb() {
var s0, s1, s2, s3, s4, s5;
s0 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 31) {
s1 = peg$c11;
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c12); }
}
if (s1 !== peg$FAILED) {
s2 = peg$parseany_integer();
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 47) {
s4 = peg$c13;
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c14); }
}
if (s4 !== peg$FAILED) {
if (peg$c15.test(input.charAt(peg$currPos))) {
s5 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s5 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c16); }
}
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 === peg$FAILED) {
s3 = peg$c2;
}
if (s3 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 31) {
s4 = peg$c11;
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c12); }
}
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c17(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebc() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8;
s0 = peg$currPos;
s1 = peg$parseb();
if (s1 !== peg$FAILED) {
s2 = peg$currPos;
s3 = peg$parsev_explicit();
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
peg$silentFails++;
s5 = peg$currPos;
s6 = peg$parsec();
if (s6 !== peg$FAILED) {
s7 = peg$parsecv_sep();
if (s7 !== peg$FAILED) {
s8 = peg$parsev();
if (s8 !== peg$FAILED) {
s6 = [s6, s7, s8];
s5 = s6;
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
peg$silentFails--;
if (s5 !== peg$FAILED) {
peg$currPos = s4;
s4 = peg$c9;
} else {
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
s3 = [s3, s4];
s2 = s3;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = [];
s3 = peg$parsecv_sep();
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$parsecv_sep();
}
} else {
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = [];
s3 = peg$parsecv_sep_weak();
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$parsecv_sep_weak();
}
} else {
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = [];
s3 = peg$parserange_sep();
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$parserange_sep();
}
} else {
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = peg$parsesp();
}
}
}
}
if (s2 !== peg$FAILED) {
s3 = peg$parsec();
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c18(s1, s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebc_comma() {
var s0, s1, s2, s3, s4, s5;
s0 = peg$currPos;
s1 = peg$parseb();
if (s1 !== peg$FAILED) {
s2 = peg$parsesp();
if (s2 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 44) {
s3 = peg$c19;
peg$currPos++;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c20); }
}
if (s3 !== peg$FAILED) {
s4 = peg$parsesp();
if (s4 !== peg$FAILED) {
s5 = peg$parsec();
if (s5 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c18(s1, s5);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebc_title() {
var s0, s1, s2;
s0 = peg$currPos;
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
}
if (s1 !== peg$FAILED) {
s2 = peg$parsetitle();
if (s2 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c21(s1, s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebcv() {
var s0, s1, s2, s3, s4, s5, s6;
s0 = peg$currPos;
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
}
if (s1 !== peg$FAILED) {
s2 = peg$currPos;
peg$silentFails++;
s3 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 46) {
s4 = peg$c22;
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s4 !== peg$FAILED) {
s5 = peg$parsev_explicit();
if (s5 !== peg$FAILED) {
s6 = peg$parsev();
if (s6 !== peg$FAILED) {
s4 = [s4, s5, s6];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 === peg$FAILED) {
s3 = peg$currPos;
s4 = peg$parsesequence_sep();
if (s4 === peg$FAILED) {
s4 = peg$c2;
}
if (s4 !== peg$FAILED) {
s5 = peg$parsev_explicit();
if (s5 !== peg$FAILED) {
s6 = peg$parsecv();
if (s6 !== peg$FAILED) {
s4 = [s4, s5, s6];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
}
peg$silentFails--;
if (s3 === peg$FAILED) {
s2 = peg$c9;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
s4 = peg$parsecv_sep();
if (s4 === peg$FAILED) {
s4 = peg$parsesequence_sep();
}
if (s4 === peg$FAILED) {
s4 = peg$c2;
}
if (s4 !== peg$FAILED) {
s5 = peg$parsev_explicit();
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 === peg$FAILED) {
s3 = peg$parsecv_sep();
}
if (s3 !== peg$FAILED) {
s4 = peg$parsev_letter();
if (s4 === peg$FAILED) {
s4 = peg$parsev();
}
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c24(s1, s4);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebcv_weak() {
var s0, s1, s2, s3, s4, s5, s6, s7;
s0 = peg$currPos;
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
}
if (s1 !== peg$FAILED) {
s2 = peg$parsecv_sep_weak();
if (s2 !== peg$FAILED) {
s3 = peg$parsev_letter();
if (s3 === peg$FAILED) {
s3 = peg$parsev();
}
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
peg$silentFails++;
s5 = peg$currPos;
s6 = peg$parsecv_sep();
if (s6 !== peg$FAILED) {
s7 = peg$parsev();
if (s7 !== peg$FAILED) {
s6 = [s6, s7];
s5 = s6;
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
peg$silentFails--;
if (s5 === peg$FAILED) {
s4 = peg$c9;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c24(s1, s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebcv_comma() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9;
s0 = peg$currPos;
s1 = peg$parsebc_comma();
if (s1 !== peg$FAILED) {
s2 = peg$parsesp();
if (s2 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 44) {
s3 = peg$c19;
peg$currPos++;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c20); }
}
if (s3 !== peg$FAILED) {
s4 = peg$parsesp();
if (s4 !== peg$FAILED) {
s5 = peg$parsev_letter();
if (s5 === peg$FAILED) {
s5 = peg$parsev();
}
if (s5 !== peg$FAILED) {
s6 = peg$currPos;
peg$silentFails++;
s7 = peg$currPos;
s8 = peg$parsecv_sep();
if (s8 !== peg$FAILED) {
s9 = peg$parsev();
if (s9 !== peg$FAILED) {
s8 = [s8, s9];
s7 = s8;
} else {
peg$currPos = s7;
s7 = peg$c1;
}
} else {
peg$currPos = s7;
s7 = peg$c1;
}
peg$silentFails--;
if (s7 === peg$FAILED) {
s6 = peg$c9;
} else {
peg$currPos = s6;
s6 = peg$c1;
}
if (s6 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c24(s1, s5);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebcv_hyphen_range() {
var s0, s1, s2, s3, s4, s5, s6, s7;
s0 = peg$currPos;
s1 = peg$parseb();
if (s1 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 45) {
s2 = peg$c25;
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c26); }
}
if (s2 === peg$FAILED) {
s2 = peg$parsespace();
}
if (s2 === peg$FAILED) {
s2 = peg$c2;
}
if (s2 !== peg$FAILED) {
s3 = peg$parsec();
if (s3 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 45) {
s4 = peg$c25;
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c26); }
}
if (s4 !== peg$FAILED) {
s5 = peg$parsev();
if (s5 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 45) {
s6 = peg$c25;
peg$currPos++;
} else {
s6 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c26); }
}
if (s6 !== peg$FAILED) {
s7 = peg$parsev();
if (s7 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c27(s1, s3, s5, s7);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsebv() {
var s0, s1, s2, s3, s4, s5;
s0 = peg$currPos;
s1 = peg$parseb();
if (s1 !== peg$FAILED) {
s2 = [];
s3 = peg$parsecv_sep();
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$parsecv_sep();
}
} else {
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = [];
s3 = peg$parsecv_sep_weak();
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$parsecv_sep_weak();
}
} else {
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = [];
s3 = peg$parserange_sep();
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$parserange_sep();
}
} else {
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = peg$currPos;
s3 = [];
s4 = peg$parsesequence_sep();
if (s4 !== peg$FAILED) {
while (s4 !== peg$FAILED) {
s3.push(s4);
s4 = peg$parsesequence_sep();
}
} else {
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
peg$silentFails++;
s5 = peg$parsev_explicit();
peg$silentFails--;
if (s5 !== peg$FAILED) {
peg$currPos = s4;
s4 = peg$c9;
} else {
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
s3 = [s3, s4];
s2 = s3;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = peg$parsesp();
}
}
}
}
if (s2 !== peg$FAILED) {
s3 = peg$parsev_letter();
if (s3 === peg$FAILED) {
s3 = peg$parsev();
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c28(s1, s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecb() {
var s0, s1, s2, s3, s4;
s0 = peg$currPos;
s1 = peg$parsec_explicit();
if (s1 !== peg$FAILED) {
s2 = peg$parsec();
if (s2 !== peg$FAILED) {
s3 = peg$parsein_book_of();
if (s3 === peg$FAILED) {
s3 = peg$c2;
}
if (s3 !== peg$FAILED) {
s4 = peg$parseb();
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c29(s2, s4);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecb_range() {
var s0, s1, s2, s3, s4, s5, s6;
s0 = peg$currPos;
s1 = peg$parsec_explicit();
if (s1 !== peg$FAILED) {
s2 = peg$parsec();
if (s2 !== peg$FAILED) {
s3 = peg$parserange_sep();
if (s3 !== peg$FAILED) {
s4 = peg$parsec();
if (s4 !== peg$FAILED) {
s5 = peg$parsein_book_of();
if (s5 === peg$FAILED) {
s5 = peg$c2;
}
if (s5 !== peg$FAILED) {
s6 = peg$parseb();
if (s6 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c30(s2, s4, s6);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecbv() {
var s0, s1, s2, s3, s4;
s0 = peg$currPos;
s1 = peg$parsecb();
if (s1 !== peg$FAILED) {
s2 = peg$parsesequence_sep();
if (s2 === peg$FAILED) {
s2 = peg$c2;
}
if (s2 !== peg$FAILED) {
s3 = peg$parsev_explicit();
if (s3 !== peg$FAILED) {
s4 = peg$parsev();
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c24(s1, s4);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecb_ordinal() {
var s0, s1, s2, s3, s4, s5;
s0 = peg$currPos;
s1 = peg$parsec();
if (s1 !== peg$FAILED) {
if (input.substr(peg$currPos, 2) === peg$c31) {
s2 = peg$c31;
peg$currPos += 2;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c32); }
}
if (s2 === peg$FAILED) {
if (input.substr(peg$currPos, 2) === peg$c33) {
s2 = peg$c33;
peg$currPos += 2;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c34); }
}
if (s2 === peg$FAILED) {
if (input.substr(peg$currPos, 2) === peg$c35) {
s2 = peg$c35;
peg$currPos += 2;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c36); }
}
}
}
if (s2 !== peg$FAILED) {
s3 = peg$parsec_explicit();
if (s3 !== peg$FAILED) {
s4 = peg$parsein_book_of();
if (s4 === peg$FAILED) {
s4 = peg$c2;
}
if (s4 !== peg$FAILED) {
s5 = peg$parseb();
if (s5 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c29(s1, s5);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecbv_ordinal() {
var s0, s1, s2, s3, s4;
s0 = peg$currPos;
s1 = peg$parsecb_ordinal();
if (s1 !== peg$FAILED) {
s2 = peg$parsesequence_sep();
if (s2 === peg$FAILED) {
s2 = peg$c2;
}
if (s2 !== peg$FAILED) {
s3 = peg$parsev_explicit();
if (s3 !== peg$FAILED) {
s4 = peg$parsev();
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c24(s1, s4);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsec_psalm() {
var s0, s1, s2, s3;
s0 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 31) {
s1 = peg$c11;
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c12); }
}
if (s1 !== peg$FAILED) {
s2 = peg$parseany_integer();
if (s2 !== peg$FAILED) {
if (input.substr(peg$currPos, 3) === peg$c37) {
s3 = peg$c37;
peg$currPos += 3;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c38); }
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c39(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecv_psalm() {
var s0, s1, s2, s3, s4;
s0 = peg$currPos;
s1 = peg$parsec_psalm();
if (s1 !== peg$FAILED) {
s2 = peg$parsesequence_sep();
if (s2 === peg$FAILED) {
s2 = peg$c2;
}
if (s2 !== peg$FAILED) {
s3 = peg$parsev_explicit();
if (s3 !== peg$FAILED) {
s4 = peg$parsev();
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c40(s1, s4);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsec_title() {
var s0, s1, s2, s3;
s0 = peg$currPos;
s1 = peg$parsec_explicit();
if (s1 !== peg$FAILED) {
s2 = peg$parsec();
if (s2 !== peg$FAILED) {
s3 = peg$parsetitle();
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c41(s2, s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecv() {
var s0, s1, s2, s3, s4, s5, s6, s7;
s0 = peg$currPos;
s1 = peg$parsev_explicit();
if (s1 === peg$FAILED) {
s1 = peg$c2;
}
if (s1 !== peg$FAILED) {
s2 = peg$parsec();
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
peg$silentFails++;
s4 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 46) {
s5 = peg$c22;
peg$currPos++;
} else {
s5 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s5 !== peg$FAILED) {
s6 = peg$parsev_explicit();
if (s6 !== peg$FAILED) {
s7 = peg$parsev();
if (s7 !== peg$FAILED) {
s5 = [s5, s6, s7];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
peg$silentFails--;
if (s4 === peg$FAILED) {
s3 = peg$c9;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
s5 = peg$parsecv_sep();
if (s5 === peg$FAILED) {
s5 = peg$c2;
}
if (s5 !== peg$FAILED) {
s6 = peg$parsev_explicit();
if (s6 !== peg$FAILED) {
s5 = [s5, s6];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 === peg$FAILED) {
s4 = peg$parsecv_sep();
}
if (s4 !== peg$FAILED) {
s5 = peg$parsev_letter();
if (s5 === peg$FAILED) {
s5 = peg$parsev();
}
if (s5 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c42(s2, s5);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecv_weak() {
var s0, s1, s2, s3, s4, s5, s6, s7;
s0 = peg$currPos;
s1 = peg$parsec();
if (s1 !== peg$FAILED) {
s2 = peg$parsecv_sep_weak();
if (s2 !== peg$FAILED) {
s3 = peg$parsev_letter();
if (s3 === peg$FAILED) {
s3 = peg$parsev();
}
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
peg$silentFails++;
s5 = peg$currPos;
s6 = peg$parsecv_sep();
if (s6 !== peg$FAILED) {
s7 = peg$parsev();
if (s7 !== peg$FAILED) {
s6 = [s6, s7];
s5 = s6;
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
peg$silentFails--;
if (s5 === peg$FAILED) {
s4 = peg$c9;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c42(s1, s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsec() {
var s0, s1, s2;
s0 = peg$currPos;
s1 = peg$parsec_explicit();
if (s1 === peg$FAILED) {
s1 = peg$c2;
}
if (s1 !== peg$FAILED) {
s2 = peg$parseinteger();
if (s2 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c43(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseff() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8;
s0 = peg$currPos;
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger();
if (s1 === peg$FAILED) {
s1 = peg$parsec();
if (s1 === peg$FAILED) {
s1 = peg$parsev();
}
}
}
}
}
}
}
}
if (s1 !== peg$FAILED) {
s2 = peg$parsesp();
if (s2 !== peg$FAILED) {
if (input.substr(peg$currPos, 1).toLowerCase() === peg$c44) {
s3 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c45); }
}
if (s3 !== peg$FAILED) {
s4 = peg$parsespace();
if (s4 !== peg$FAILED) {
if (input.substr(peg$currPos, 5).toLowerCase() === peg$c46) {
s5 = input.substr(peg$currPos, 5);
peg$currPos += 5;
} else {
s5 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c47); }
}
if (s5 !== peg$FAILED) {
s6 = peg$parseabbrev();
if (s6 === peg$FAILED) {
s6 = peg$c2;
}
if (s6 !== peg$FAILED) {
s7 = peg$currPos;
peg$silentFails++;
if (peg$c48.test(input.charAt(peg$currPos))) {
s8 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s8 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c49); }
}
peg$silentFails--;
if (s8 === peg$FAILED) {
s7 = peg$c9;
} else {
peg$currPos = s7;
s7 = peg$c1;
}
if (s7 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c50(s1);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseinteger_title() {
var s0, s1, s2, s3;
s0 = peg$currPos;
s1 = peg$parseinteger();
if (s1 !== peg$FAILED) {
s2 = peg$parsecv_sep();
if (s2 === peg$FAILED) {
s2 = peg$parsesequence_sep();
}
if (s2 === peg$FAILED) {
s2 = peg$c2;
}
if (s2 !== peg$FAILED) {
if (input.substr(peg$currPos, 11).toLowerCase() === peg$c51) {
s3 = input.substr(peg$currPos, 11);
peg$currPos += 11;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c52); }
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c53(s1);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecontext() {
var s0, s1, s2, s3;
s0 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 31) {
s1 = peg$c11;
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c12); }
}
if (s1 !== peg$FAILED) {
s2 = peg$parseany_integer();
if (s2 !== peg$FAILED) {
if (input.substr(peg$currPos, 3) === peg$c54) {
s3 = peg$c54;
peg$currPos += 3;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c55); }
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c56(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseps151_b() {
var s0, s1, s2, s3;
s0 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 31) {
s1 = peg$c11;
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c12); }
}
if (s1 !== peg$FAILED) {
s2 = peg$parseany_integer();
if (s2 !== peg$FAILED) {
if (input.substr(peg$currPos, 3) === peg$c57) {
s3 = peg$c57;
peg$currPos += 3;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c58); }
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c17(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseps151_bc() {
var s0, s1, s2, s3, s4;
s0 = peg$currPos;
s1 = peg$parseps151_b();
if (s1 !== peg$FAILED) {
if (input.substr(peg$currPos, 2) === peg$c59) {
s2 = peg$c59;
peg$currPos += 2;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c60); }
}
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
peg$silentFails++;
if (peg$c61.test(input.charAt(peg$currPos))) {
s4 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c62); }
}
peg$silentFails--;
if (s4 === peg$FAILED) {
s3 = peg$c9;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c63(s1);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseps151_bcv() {
var s0, s1, s2, s3;
s0 = peg$currPos;
s1 = peg$parseps151_bc();
if (s1 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s2 = peg$c22;
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s2 !== peg$FAILED) {
s3 = peg$parseinteger();
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c64(s1, s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsev_letter() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8;
s0 = peg$currPos;
s1 = peg$parsev_explicit();
if (s1 === peg$FAILED) {
s1 = peg$c2;
}
if (s1 !== peg$FAILED) {
s2 = peg$parseinteger();
if (s2 !== peg$FAILED) {
s3 = peg$parsesp();
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
peg$silentFails++;
s5 = peg$currPos;
if (input.substr(peg$currPos, 1).toLowerCase() === peg$c44) {
s6 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s6 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c45); }
}
if (s6 !== peg$FAILED) {
s7 = peg$parsespace();
if (s7 !== peg$FAILED) {
if (input.substr(peg$currPos, 5).toLowerCase() === peg$c46) {
s8 = input.substr(peg$currPos, 5);
peg$currPos += 5;
} else {
s8 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c47); }
}
if (s8 !== peg$FAILED) {
s6 = [s6, s7, s8];
s5 = s6;
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
peg$silentFails--;
if (s5 === peg$FAILED) {
s4 = peg$c9;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
if (peg$c65.test(input.charAt(peg$currPos))) {
s5 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s5 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c66); }
}
if (s5 !== peg$FAILED) {
s6 = peg$currPos;
peg$silentFails++;
if (peg$c48.test(input.charAt(peg$currPos))) {
s7 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s7 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c49); }
}
peg$silentFails--;
if (s7 === peg$FAILED) {
s6 = peg$c9;
} else {
peg$currPos = s6;
s6 = peg$c1;
}
if (s6 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c67(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsev() {
var s0, s1, s2;
s0 = peg$currPos;
s1 = peg$parsev_explicit();
if (s1 === peg$FAILED) {
s1 = peg$c2;
}
if (s1 !== peg$FAILED) {
s2 = peg$parseinteger();
if (s2 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c67(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsec_explicit() {
var s0, s1, s2, s3, s4, s5, s6;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
s2 = peg$currPos;
if (input.substr(peg$currPos, 2).toLowerCase() === peg$c68) {
s3 = input.substr(peg$currPos, 2);
peg$currPos += 2;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c69); }
}
if (s3 !== peg$FAILED) {
if (input.substr(peg$currPos, 3).toLowerCase() === peg$c70) {
s4 = input.substr(peg$currPos, 3);
peg$currPos += 3;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c71); }
}
if (s4 === peg$FAILED) {
if (input.substr(peg$currPos, 2).toLowerCase() === peg$c72) {
s4 = input.substr(peg$currPos, 2);
peg$currPos += 2;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c73); }
}
if (s4 === peg$FAILED) {
s4 = peg$currPos;
s5 = peg$c74;
if (s5 !== peg$FAILED) {
s6 = peg$parseabbrev();
if (s6 === peg$FAILED) {
s6 = peg$c2;
}
if (s6 !== peg$FAILED) {
s5 = [s5, s6];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
}
}
if (s4 !== peg$FAILED) {
s3 = [s3, s4];
s2 = s3;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 !== peg$FAILED) {
s3 = peg$parsesp();
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c75();
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsev_explicit() {
var s0, s1, s2, s3, s4;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
s2 = peg$currPos;
if (input.substr(peg$currPos, 4).toLowerCase() === peg$c76) {
s3 = input.substr(peg$currPos, 4);
peg$currPos += 4;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c77); }
}
if (s3 !== peg$FAILED) {
if (input.substr(peg$currPos, 1).toLowerCase() === peg$c44) {
s4 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c45); }
}
if (s4 === peg$FAILED) {
s4 = peg$c74;
}
if (s4 !== peg$FAILED) {
s3 = [s3, s4];
s2 = s3;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
peg$silentFails++;
if (peg$c48.test(input.charAt(peg$currPos))) {
s4 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c49); }
}
peg$silentFails--;
if (s4 === peg$FAILED) {
s3 = peg$c9;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
s4 = peg$parsesp();
if (s4 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c78();
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecv_sep() {
var s0, s1, s2, s3;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 44) {
s2 = peg$c19;
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c20); }
}
if (s2 !== peg$FAILED) {
s3 = peg$parsesp();
if (s3 !== peg$FAILED) {
s1 = [s1, s2, s3];
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsecv_sep_weak() {
var s0, s1, s2, s3;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
if (peg$c79.test(input.charAt(peg$currPos))) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c80); }
}
if (s2 !== peg$FAILED) {
s3 = peg$parsesp();
if (s3 !== peg$FAILED) {
s1 = [s1, s2, s3];
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
if (s0 === peg$FAILED) {
s0 = peg$parsespace();
}
return s0;
}
function peg$parsesequence_sep() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9;
s0 = peg$currPos;
s1 = [];
if (peg$c81.test(input.charAt(peg$currPos))) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c82); }
}
if (s2 === peg$FAILED) {
s2 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 46) {
s3 = peg$c22;
peg$currPos++;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
peg$silentFails++;
s5 = peg$currPos;
s6 = peg$parsesp();
if (s6 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s7 = peg$c22;
peg$currPos++;
} else {
s7 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s7 !== peg$FAILED) {
s8 = peg$parsesp();
if (s8 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s9 = peg$c22;
peg$currPos++;
} else {
s9 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s9 !== peg$FAILED) {
s6 = [s6, s7, s8, s9];
s5 = s6;
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
peg$silentFails--;
if (s5 === peg$FAILED) {
s4 = peg$c9;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
s3 = [s3, s4];
s2 = s3;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
if (input.substr(peg$currPos, 1).toLowerCase() === peg$c44) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c45); }
}
if (s2 === peg$FAILED) {
s2 = peg$parsespace();
}
}
}
if (s2 !== peg$FAILED) {
while (s2 !== peg$FAILED) {
s1.push(s2);
if (peg$c81.test(input.charAt(peg$currPos))) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c82); }
}
if (s2 === peg$FAILED) {
s2 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 46) {
s3 = peg$c22;
peg$currPos++;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
peg$silentFails++;
s5 = peg$currPos;
s6 = peg$parsesp();
if (s6 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s7 = peg$c22;
peg$currPos++;
} else {
s7 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s7 !== peg$FAILED) {
s8 = peg$parsesp();
if (s8 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s9 = peg$c22;
peg$currPos++;
} else {
s9 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s9 !== peg$FAILED) {
s6 = [s6, s7, s8, s9];
s5 = s6;
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
} else {
peg$currPos = s5;
s5 = peg$c1;
}
peg$silentFails--;
if (s5 === peg$FAILED) {
s4 = peg$c9;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
s3 = [s3, s4];
s2 = s3;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
if (input.substr(peg$currPos, 1).toLowerCase() === peg$c44) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c45); }
}
if (s2 === peg$FAILED) {
s2 = peg$parsespace();
}
}
}
}
} else {
s1 = peg$c1;
}
if (s1 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c83();
}
s0 = s1;
return s0;
}
function peg$parserange_sep() {
var s0, s1, s2, s3, s4, s5;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
s2 = [];
s3 = peg$currPos;
if (peg$c84.test(input.charAt(peg$currPos))) {
s4 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c85); }
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesp();
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 === peg$FAILED) {
s3 = peg$currPos;
if (input.substr(peg$currPos, 1).toLowerCase() === peg$c86) {
s4 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c87); }
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesp();
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
}
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$currPos;
if (peg$c84.test(input.charAt(peg$currPos))) {
s4 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c85); }
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesp();
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 === peg$FAILED) {
s3 = peg$currPos;
if (input.substr(peg$currPos, 1).toLowerCase() === peg$c86) {
s4 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s4 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c87); }
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesp();
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
}
}
} else {
s2 = peg$c1;
}
if (s2 !== peg$FAILED) {
s1 = [s1, s2];
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsetitle() {
var s0, s1, s2;
s0 = peg$currPos;
s1 = peg$parsecv_sep();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence_sep();
}
if (s1 === peg$FAILED) {
s1 = peg$c2;
}
if (s1 !== peg$FAILED) {
if (input.substr(peg$currPos, 11).toLowerCase() === peg$c51) {
s2 = input.substr(peg$currPos, 11);
peg$currPos += 11;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c52); }
}
if (s2 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c88(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsein_book_of() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
if (input.substr(peg$currPos, 4) === peg$c89) {
s2 = peg$c89;
peg$currPos += 4;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c90); }
}
if (s2 === peg$FAILED) {
if (input.substr(peg$currPos, 2) === peg$c91) {
s2 = peg$c91;
peg$currPos += 2;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c92); }
}
if (s2 === peg$FAILED) {
if (input.substr(peg$currPos, 2) === peg$c93) {
s2 = peg$c93;
peg$currPos += 2;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c94); }
}
}
}
if (s2 !== peg$FAILED) {
s3 = peg$parsesp();
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
if (input.substr(peg$currPos, 3) === peg$c95) {
s5 = peg$c95;
peg$currPos += 3;
} else {
s5 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c96); }
}
if (s5 !== peg$FAILED) {
s6 = peg$parsesp();
if (s6 !== peg$FAILED) {
if (input.substr(peg$currPos, 4) === peg$c97) {
s7 = peg$c97;
peg$currPos += 4;
} else {
s7 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c98); }
}
if (s7 !== peg$FAILED) {
s8 = peg$parsesp();
if (s8 !== peg$FAILED) {
if (input.substr(peg$currPos, 2) === peg$c91) {
s9 = peg$c91;
peg$currPos += 2;
} else {
s9 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c92); }
}
if (s9 !== peg$FAILED) {
s10 = peg$parsesp();
if (s10 !== peg$FAILED) {
s5 = [s5, s6, s7, s8, s9, s10];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 === peg$FAILED) {
s4 = peg$c2;
}
if (s4 !== peg$FAILED) {
s1 = [s1, s2, s3, s4];
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseabbrev() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s2 = peg$c22;
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
peg$silentFails++;
s4 = peg$currPos;
s5 = peg$parsesp();
if (s5 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s6 = peg$c22;
peg$currPos++;
} else {
s6 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s6 !== peg$FAILED) {
s7 = peg$parsesp();
if (s7 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 46) {
s8 = peg$c22;
peg$currPos++;
} else {
s8 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c23); }
}
if (s8 !== peg$FAILED) {
s5 = [s5, s6, s7, s8];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
peg$silentFails--;
if (s4 === peg$FAILED) {
s3 = peg$c9;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
s1 = [s1, s2, s3];
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsetranslation_sequence_enclosed() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
if (peg$c99.test(input.charAt(peg$currPos))) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c100); }
}
if (s2 !== peg$FAILED) {
s3 = peg$parsesp();
if (s3 !== peg$FAILED) {
s4 = peg$currPos;
s5 = peg$parsetranslation();
if (s5 !== peg$FAILED) {
s6 = [];
s7 = peg$currPos;
s8 = peg$parsesequence_sep();
if (s8 !== peg$FAILED) {
s9 = peg$parsetranslation();
if (s9 !== peg$FAILED) {
s8 = [s8, s9];
s7 = s8;
} else {
peg$currPos = s7;
s7 = peg$c1;
}
} else {
peg$currPos = s7;
s7 = peg$c1;
}
while (s7 !== peg$FAILED) {
s6.push(s7);
s7 = peg$currPos;
s8 = peg$parsesequence_sep();
if (s8 !== peg$FAILED) {
s9 = peg$parsetranslation();
if (s9 !== peg$FAILED) {
s8 = [s8, s9];
s7 = s8;
} else {
peg$currPos = s7;
s7 = peg$c1;
}
} else {
peg$currPos = s7;
s7 = peg$c1;
}
}
if (s6 !== peg$FAILED) {
s5 = [s5, s6];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$c1;
}
} else {
peg$currPos = s4;
s4 = peg$c1;
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesp();
if (s5 !== peg$FAILED) {
if (peg$c101.test(input.charAt(peg$currPos))) {
s6 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s6 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c102); }
}
if (s6 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c103(s4);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsetranslation_sequence() {
var s0, s1, s2, s3, s4, s5, s6, s7, s8;
s0 = peg$currPos;
s1 = peg$parsesp();
if (s1 !== peg$FAILED) {
s2 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 44) {
s3 = peg$c19;
peg$currPos++;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c20); }
}
if (s3 !== peg$FAILED) {
s4 = peg$parsesp();
if (s4 !== peg$FAILED) {
s3 = [s3, s4];
s2 = s3;
} else {
peg$currPos = s2;
s2 = peg$c1;
}
} else {
peg$currPos = s2;
s2 = peg$c1;
}
if (s2 === peg$FAILED) {
s2 = peg$c2;
}
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
s4 = peg$parsetranslation();
if (s4 !== peg$FAILED) {
s5 = [];
s6 = peg$currPos;
s7 = peg$parsesequence_sep();
if (s7 !== peg$FAILED) {
s8 = peg$parsetranslation();
if (s8 !== peg$FAILED) {
s7 = [s7, s8];
s6 = s7;
} else {
peg$currPos = s6;
s6 = peg$c1;
}
} else {
peg$currPos = s6;
s6 = peg$c1;
}
while (s6 !== peg$FAILED) {
s5.push(s6);
s6 = peg$currPos;
s7 = peg$parsesequence_sep();
if (s7 !== peg$FAILED) {
s8 = peg$parsetranslation();
if (s8 !== peg$FAILED) {
s7 = [s7, s8];
s6 = s7;
} else {
peg$currPos = s6;
s6 = peg$c1;
}
} else {
peg$currPos = s6;
s6 = peg$c1;
}
}
if (s5 !== peg$FAILED) {
s4 = [s4, s5];
s3 = s4;
} else {
peg$currPos = s3;
s3 = peg$c1;
}
} else {
peg$currPos = s3;
s3 = peg$c1;
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c103(s3);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parsetranslation() {
var s0, s1, s2, s3;
s0 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 30) {
s1 = peg$c104;
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c105); }
}
if (s1 !== peg$FAILED) {
s2 = peg$parseany_integer();
if (s2 !== peg$FAILED) {
if (input.charCodeAt(peg$currPos) === 30) {
s3 = peg$c104;
peg$currPos++;
} else {
s3 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c105); }
}
if (s3 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c106(s2);
s0 = s1;
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
} else {
peg$currPos = s0;
s0 = peg$c1;
}
return s0;
}
function peg$parseinteger() {
var res;
if (res = /^[0-9]{1,3}(?!\d|,000)/.exec(input.substr(peg$currPos))) {
peg$reportedPos = peg$currPos;
peg$currPos += res[0].length;
return {"type": "integer", "value": parseInt(res[0], 10), "indices": [peg$reportedPos, peg$currPos - 1]}
} else {
return peg$c1;
}
}
function peg$parseany_integer() {
var res;
if (res = /^[0-9]+/.exec(input.substr(peg$currPos))) {
peg$reportedPos = peg$currPos;
peg$currPos += res[0].length;
return {"type": "integer", "value": parseInt(res[0], 10), "indices": [peg$reportedPos, peg$currPos - 1]}
} else {
return peg$c1;
}
}
function peg$parseword() {
var s0, s1, s2;
s0 = peg$currPos;
s1 = [];
if (peg$c110.test(input.charAt(peg$currPos))) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c111); }
}
if (s2 !== peg$FAILED) {
while (s2 !== peg$FAILED) {
s1.push(s2);
if (peg$c110.test(input.charAt(peg$currPos))) {
s2 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s2 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c111); }
}
}
} else {
s1 = peg$c1;
}
if (s1 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c112(s1);
}
s0 = s1;
return s0;
}
function peg$parseword_parenthesis() {
var s0, s1;
s0 = peg$currPos;
if (peg$c99.test(input.charAt(peg$currPos))) {
s1 = input.charAt(peg$currPos);
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) { peg$fail(peg$c100); }
}
if (s1 !== peg$FAILED) {
peg$reportedPos = s0;
s1 = peg$c113(s1);
}
s0 = s1;
return s0;
}
function peg$parsesp() {
var s0;
s0 = peg$parsespace();
if (s0 === peg$FAILED) {
s0 = peg$c2;
}
return s0;
}
function peg$parsespace() {
var res;
if (res = /^[\s\xa0*]+/.exec(input.substr(peg$currPos))) {
peg$reportedPos = peg$currPos;
peg$currPos += res[0].length;
return [];
}
return peg$c1;
}
peg$result = peg$startRuleFunction();
if (peg$result !== peg$FAILED && peg$currPos === input.length) {
return peg$result;
} else {
if (peg$result !== peg$FAILED && peg$currPos < input.length) {
peg$fail({ type: "end", description: "end of input" });
}
throw peg$buildException(null, peg$maxFailExpected, peg$maxFailPos);
}
}
return {
SyntaxError: SyntaxError,
parse: parse
};
})();
}).call(this);<|fim▁end|> | if (s6 === peg$FAILED) {
s6 = peg$parsebc_title();
if (s6 === peg$FAILED) {
s6 = peg$parseps151_bcv(); |
<|file_name|>whois_ati_tn.py<|end_file_name|><|fim▁begin|>from .base import ScannerBase
class WhoisAtiTnScanner(ScannerBase):
def __init__(self, *args):
super(WhoisAtiTnScanner, self).__init__(*args)
self._tokenizer += [
'skip_empty_line',
'scan_available',
'scan_disclaimer',
'scan_keyvalue'
]
def scan_available(self):
if self._input.skip("^Domain (.+) not found"):<|fim▁hole|> if self._input.match("All rights reserved"):
self._ast['field:disclaimer'] = "\n".join(self._scan_lines_to_array("(.+)\n"))
return True<|fim▁end|> | self._ast['status:available'] = True
return True
def scan_disclaimer(self): |
<|file_name|>ArtistIDsOptionsScreen.tsx<|end_file_name|><|fim▁begin|>import { StackScreenProps } from "@react-navigation/stack"
import { ArtworkFilterNavigationStack } from "app/Components/ArtworkFilter"
import { ArtworksFiltersStore } from "app/Components/ArtworkFilter/ArtworkFilterStore"
import React from "react"
import { ArtistIDsArtworksOptionsScreen } from "./ArtistIDsArtworksOptions"
import { ArtistIDsSaleArtworksOptionsScreen } from "./ArtistIDsSaleArtworksOptionsScreen"
export const ArtistIDsOptionsScreen = (
props: StackScreenProps<ArtworkFilterNavigationStack, "ArtistIDsOptionsScreen">
) => {
const filterType = ArtworksFiltersStore.useStoreState((state) => state.filterType)
<|fim▁hole|> return <ArtistIDsSaleArtworksOptionsScreen {...props} />
}
return <ArtistIDsArtworksOptionsScreen {...props} />
}<|fim▁end|> | if (filterType === "saleArtwork") { |
<|file_name|>Main.java<|end_file_name|><|fim▁begin|>package be.kwakeroni.workshop.java9.solution;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
public class Main {
public static void main(String[] args) throws Exception {<|fim▁hole|> }
private final StudentParser parser = new StudentParser();
private void handleFiles(String[] args) throws Exception {
if (args.length == 0) {
handleGroups(
parser.parseStudents(Paths.get("group1.csv")),
parser.parseStudents(Paths.get("group2.csv")));
} else {
handleGroups(parse(args));
}
}
@SuppressWarnings("unchecked")
private List<Student>[] parse(String[] paths) {
return Arrays.stream(paths)
.map(Paths::get)
.map(this::parse)
.toArray(List[]::new);
}
private List<Student> parse(Path path) {
try {
return parser.parseStudents(path);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@java.lang.SafeVarargs
private void handleGroups(List<Student>... groups) {
for (int i = 0; i < groups.length; i++) {
System.out.println("- Group #%s".formatted(i));
groups[i].forEach(student -> System.out.println("-- %s %s (aged %s)".formatted(student.lastName(), student.firstName(), student.age())));
}
}
}<|fim▁end|> | new Main().handleFiles(args); |
<|file_name|>manage.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "QiuDaBao.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)<|fim▁end|> | #!/usr/bin/env python |
<|file_name|>car.js<|end_file_name|><|fim▁begin|>/*
Um carro, e só
*/
class Car {
constructor(posx, posy, width, height, lifeTime, color) {
<|fim▁hole|> this.width = width;
this.height = height;
this.im = new Image();
this.im.src = carSprite.src;
}
draw(ctx) {
ctx.save();
ctx.fillStyle = this.color;
ctx.lineWidth = 1;
ctx.drawImage(
this.im,
carSprite.x,
carSprite.y,
carSprite.w,
carSprite.h,
this.x,
this.y,
this.width,
this.height
);
}
}<|fim▁end|> | this.x = posx;
this.y = posy;
this.lifeTime = lifeTime;
this.color = color;
|
<|file_name|>camera3.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python3
import asyncio
import subprocess
import numpy as np
import time
comm = None
class Camera:
def __init__(self, notify):
self._process = None
self._now_pos = np.array([0., 0., 0.])
self._running = False
self._notify = notify
@asyncio.coroutine
def connect(self):
self._process = yield from asyncio.create_subprocess_exec(
'python2', 'camera.py',
stdin=asyncio.subprocess.PIPE,<|fim▁hole|> self._running = True
@asyncio.coroutine
def run(self):
while self._running:
data = yield from self._process.stdout.readline()
print(data)
self._now_pos = np.array(list(map(float, data.split())))
yield from self._notify(time.time(), self._now_pos)
def stop(self):
self._running = False
self._process.terminate()<|fim▁end|> | stdout=asyncio.subprocess.PIPE
) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:mod:`MDAnalysis` --- analysis of molecular simulations in python
=================================================================
MDAnalysis (https://www.mdanalysis.org) is a python toolkit to analyze
molecular dynamics trajectories generated by CHARMM, NAMD, Amber,
Gromacs, or LAMMPS.<|fim▁hole|>atomic coordinates through numpy arrays. This provides a flexible and
relatively fast framework for complex analysis tasks. In addition,
CHARMM-style atom selection commands are implemented. Trajectories can
also be manipulated (for instance, fit to a reference structure) and
written out. Time-critical code is written in C for speed.
Help is also available through the mailinglist at
http://groups.google.com/group/mdnalysis-discussion
Please report bugs and feature requests through the issue tracker at
https://github.com/MDAnalysis/mdanalysis/issues
Citation
--------
When using MDAnalysis in published work, please cite
R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
MDAnalysis: A Python package for the rapid analysis of molecular dynamics
simulations. In S. Benthall and S. Rostrup, editors, Proceedings of the 15th
Python in Science Conference, pages 98-105, Austin, TX, 2016. SciPy,
doi:10.25080/majora-629e541a-00e
N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and
O. Beckstein. MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics
Simulations. J. Comput. Chem. 32 (2011), 2319--2327, doi:`10.1002/jcc.21787`_
https://www.mdanalysis.org
For citations of included algorithms and sub-modules please see the references_.
.. _`10.1002/jcc.21787`: http://dx.doi.org/10.1002/jcc.21787
.. _references: https://docs.mdanalysis.org/documentation_pages/references.html
Getting started
---------------
Import the package::
>>> import MDAnalysis
(note that not everything in MDAnalysis is imported right away; for
additional functionality you might have to import sub-modules
separately, e.g. for RMS fitting ``import MDAnalysis.analysis.align``.)
Build a "universe" from a topology (PSF, PDB) and a trajectory (DCD, XTC/TRR);
here we are assuming that PSF, DCD, etc contain file names. If you don't have
trajectories at hand you can play with the ones that come with MDAnalysis for
testing (see below under `Examples`_)::
>>> u = MDAnalysis.Universe(PSF, DCD)
Select the C-alpha atoms and store them as a group of atoms::
>>> ca = u.select_atoms('name CA')
>>> len(ca)
214
Calculate the centre of mass of the CA and of all atoms::
>>> ca.center_of_mass()
array([ 0.06873595, -0.04605918, -0.24643682])
>>> u.atoms.center_of_mass()
array([-0.01094035, 0.05727601, -0.12885778])
Calculate the CA end-to-end distance (in angstroem)::
>>> import numpy as np
>>> coord = ca.positions
>>> v = coord[-1] - coord[0] # last Ca minus first one
>>> np.sqrt(np.dot(v, v,))
10.938133
Define a function eedist():
>>> def eedist(atoms):
... coord = atoms.positions
... v = coord[-1] - coord[0]
... return sqrt(dot(v, v,))
...
>>> eedist(ca)
10.938133
and analyze all timesteps *ts* of the trajectory::
>>> for ts in u.trajectory:
... print eedist(ca)
10.9381
10.8459
10.4141
9.72062
....
See Also
--------
:class:`MDAnalysis.core.universe.Universe` for details
Examples
--------
MDAnalysis comes with a number of real trajectories for testing. You
can also use them to explore the functionality and ensure that
everything is working properly::
from MDAnalysis import *
from MDAnalysis.tests.datafiles import PSF,DCD, PDB,XTC
u_dims_adk = Universe(PSF,DCD)
u_eq_adk = Universe(PDB, XTC)
The PSF and DCD file are a closed-form-to-open-form transition of
Adenylate Kinase (from [Beckstein2009]_) and the PDB+XTC file are ten
frames from a Gromacs simulation of AdK solvated in TIP4P water with
the OPLS/AA force field.
.. [Beckstein2009] O. Beckstein, E.J. Denning, J.R. Perilla and T.B. Woolf,
Zipping and Unzipping of Adenylate Kinase: Atomistic Insights into the
Ensemble of Open <--> Closed Transitions. J Mol Biol 394 (2009), 160--176,
doi:10.1016/j.jmb.2009.09.009
"""
__all__ = ['Universe', 'Writer', 'fetch_mmtf',
'AtomGroup', 'ResidueGroup', 'SegmentGroup']
import logging
import warnings
logger = logging.getLogger("MDAnalysis.__init__")
from .version import __version__
try:
from .authors import __authors__
except ImportError:
logger.info('Could not find authors.py, __authors__ will be empty.')
__authors__ = []
# Registry of Readers, Parsers and Writers known to MDAnalysis
# Metaclass magic fills these as classes are declared.
_READERS = {}
_READER_HINTS = {}
_SINGLEFRAME_WRITERS = {}
_MULTIFRAME_WRITERS = {}
_PARSERS = {}
_PARSER_HINTS = {}
_SELECTION_WRITERS = {}
_CONVERTERS = {}
# Registry of TopologyAttributes
_TOPOLOGY_ATTRS = {} # {attrname: cls}
_TOPOLOGY_TRANSPLANTS = {} # {name: [attrname, method, transplant class]}
_TOPOLOGY_ATTRNAMES = {} # {lower case name w/o _ : name}
# custom exceptions and warnings
from .exceptions import (
SelectionError, NoDataError, ApplicationError, SelectionWarning,
MissingDataWarning, ConversionWarning, FileFormatWarning,
StreamWarning
)
from .lib import log
from .lib.log import start_logging, stop_logging
logging.getLogger("MDAnalysis").addHandler(log.NullHandler())
del logging
# only MDAnalysis DeprecationWarnings are loud by default
warnings.filterwarnings(action='once', category=DeprecationWarning,
module='MDAnalysis')
from . import units
# Bring some often used objects into the current namespace
from .core.universe import Universe, Merge
from .core.groups import AtomGroup, ResidueGroup, SegmentGroup
from .coordinates.core import writer as Writer
# After Universe import
from .coordinates.MMTF import fetch_mmtf
from . import converters
from .due import due, Doi, BibTeX
due.cite(Doi("10.25080/majora-629e541a-00e"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
due.cite(Doi("10.1002/jcc.21787"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
del Doi, BibTeX<|fim▁end|> |
It allows one to read molecular dynamics trajectories and access the |
<|file_name|>ListMapper.java<|end_file_name|><|fim▁begin|>package jkanvas.table;
import java.util.Objects;
import jkanvas.animation.GenericPaintList;
/**
* Maps rows of tables to shapes.
*
* @author Joschi <josua.krause@gmail.com>
* @param <T> The list of shapes.
*/
public abstract class ListMapper<T extends GenericPaintList<?>> {
/** The table. */
private final DataTable table;
/**
* Creates a map for the table.
*
* @param table The table.
*/
public ListMapper(final DataTable table) {
this.table = Objects.requireNonNull(table);
}
/**
* Getter.
*
* @return Creates a new list.
*/
protected abstract T createList();
/**
* Creates a shape for the given row.
*
* @param list The shape list.
* @param row The row.
* @return The index of the new shape.
*/
protected abstract int createForRow(T list, int row);
/**
* Fills the list.
*
* @return The list.
*/<|fim▁hole|> final int rows = table.rows();
for(int el = 0; el < rows; ++el) {
final int i = createForRow(res, el);
// TODO allow arbitrary mappings
if(i != el) throw new IllegalStateException(
"unpredicted index: " + i + " != " + el);
}
return res;
}
/** The list. */
private T list;
/**
* Getter.
*
* @return The list.
*/
public T getList() {
if(list == null) {
list = fillList();
}
return list;
}
/**
* Getter.
*
* @return The table.
*/
public DataTable getTable() {
return table;
}
/**
* Getter.
*
* @param row The row.
* @return The index of the shape in the list.
*/
public int getIndexForRow(final int row) {
return row;
}
/**
* Getter.
*
* @param index The index of the shape.
* @return The row in the table.
*/
public int getRowForIndex(final int index) {
return index;
}
}<|fim▁end|> | private T fillList() {
final T res = createList(); |
<|file_name|>nexthop_gendecoder.go<|end_file_name|><|fim▁begin|>// Code generated - DO NOT EDIT.
package topology
import (
"github.com/skydive-project/skydive/graffiti/getter"
"strings"
)
func (obj *NextHop) GetFieldBool(key string) (bool, error) {
return false, getter.ErrFieldNotFound
}
func (obj *NextHop) GetFieldInt64(key string) (int64, error) {
switch key {
case "Priority":
return int64(obj.Priority), nil
case "IfIndex":
return int64(obj.IfIndex), nil
}
return 0, getter.ErrFieldNotFound
}
func (obj *NextHop) GetFieldString(key string) (string, error) {
switch key {
case "IP":
return obj.IP.String(), nil
case "MAC":
return string(obj.MAC), nil
}
return "", getter.ErrFieldNotFound
}
func (obj *NextHop) GetFieldKeys() []string {
return []string{
"Priority",
"IP",
"MAC",
"IfIndex",
}
}
func (obj *NextHop) MatchBool(key string, predicate getter.BoolPredicate) bool {
return false
}
func (obj *NextHop) MatchInt64(key string, predicate getter.Int64Predicate) bool {
if b, err := obj.GetFieldInt64(key); err == nil {
return predicate(b)
}
return false
}
func (obj *NextHop) MatchString(key string, predicate getter.StringPredicate) bool {
if b, err := obj.GetFieldString(key); err == nil {
return predicate(b)
}
return false
}
func (obj *NextHop) GetField(key string) (interface{}, error) {
if s, err := obj.GetFieldString(key); err == nil {
return s, nil
}<|fim▁hole|> if i, err := obj.GetFieldInt64(key); err == nil {
return i, nil
}
return nil, getter.ErrFieldNotFound
}
func init() {
strings.Index("", ".")
}<|fim▁end|> | |
<|file_name|>list.controller.ts<|end_file_name|><|fim▁begin|>class ListController implements ng.IComponentController {
private itemsList: Array<any>;
private sortType: string;
private sortReverse: Boolean;
constructor($stateParams) {
console.log('list controller');
console.log("this.itemsList=", this.itemsList);
}
private sort(fieldName) {
console.log('sort called!');
if (this.sortType !== fieldName) {
this.sortType = fieldName;
this.sortReverse = true;
} else {
this.sortReverse = !this.sortReverse;
}
}
}
export default ListController;<|fim▁hole|><|fim▁end|> | ListController.$inject = ['$stateParams']; |
<|file_name|>TirageModele.py<|end_file_name|><|fim▁begin|>from google.appengine.ext import ndb
class Tirage(ndb.Model):<|fim▁hole|><|fim▁end|> | nomtirage = ndb.StringProperty()
datecreation = ndb.DateTimeProperty(auto_now_add=True) |
<|file_name|>complete_byte_raw_string_quotes.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>fn main() {
br###<caret>
}<|fim▁end|> | |
<|file_name|>set.cc<|end_file_name|><|fim▁begin|>// Copyright (C) 2003-2015 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <testsuite_performance.h>
template<typename Container, int Iter>
void
do_loop()
{
// avoid excessive swap file use!
static const unsigned max_size = 250000;
// make results less random while
static const unsigned iterations = 10;
// keeping the total time reasonable
static const unsigned step = 50000;
using namespace std;
typedef int test_type;
typedef Container container_type;
typedef vector<test_type> vector_type;
// Initialize sorted array.
vector_type v(max_size, 0);
for (unsigned int i = 0; i != max_size; ++i)
v[i] = i;
for (unsigned int count = step; count <= max_size; count += step)
{
for (unsigned i = 0; i != iterations; ++i)
{
container_type test_set;
typename container_type::iterator iter = test_set.end();
// Each insert in amortized constant time (Table 69)
for (unsigned j = 0; j != count; ++j)<|fim▁hole|> iter = test_set.insert(iter, v[j]);
}
}
}
int
main()
{
#ifdef TEST_S1
#define thread_type false
#endif
#ifdef TEST_T1
#define thread_type true
#endif
typedef __gnu_test::sets<int, thread_type>::type container_types;
typedef test_sequence<thread_type> test_type;
test_type test("insert_from_sorted");
__gnu_cxx::typelist::apply(test, container_types());
return 0;
}<|fim▁end|> | |
<|file_name|>p4.cpp<|end_file_name|><|fim▁begin|>#include <set>
#include <map>
#include <string>
#include <vector>
#include <queue>
#include <stack>
#include <cmath>
#include <list>
#include <cassert>
#include <climits>
#include <cstring>
#include <cstdio>
#include <cstdlib>
#include <cctype>
#include <fstream>
#include <sstream>
#include <iostream>
#include <algorithm>
#include <stdexcept>
using namespace std;
#define LL long long
#define F first
#define S second
#define PB push_back
#define PF push front
#define MP make_pair
#define REP(x, n) for(int x=0; x<(n); ++x)
#define FOR(x, b, e) for(int x=b; x<=(e); ++x)
#define FORD(x, b, e) for(int x=b; x>=(e); --x)
#define VAR(v,n) __typeof(n) v=(n)
#define FOREACH(i,c) for(VAR(i,(c).begin());i!=(c).end();++i)
#define MOD(x, n) ((x)%(n)+(n))%(n)
#define SZ(x) (int((x).size()))
#define ALL(x) ((x).begin()),((x).end())
#define SORT(v) sort((v).begin(),(v).end())
#define UNIQUE(v) SORT(v),(v).erase(unique((v).begin(),(v).end()),(v).end())
LL GCD( LL a , LL b ) { while( b ) b ^= a ^= b ^= a %= b ; return a ; }
LL LCM( LL a , LL b ) { return a * ( b / GCD( a , b ) ) ; }
typedef vector<int> VI;<|fim▁hole|>typedef vector<bool> VB;
typedef vector<double> VD;
typedef vector<string> VS;
typedef pair<int, int> PII;
typedef vector<PII> VPII;
typedef pair<LL, LL> PLL;
typedef vector<PLL> VPLL;
typedef list<int> LI;
const double EPS = 10e-9;
const double INF = (1LL << 30);
using namespace std;
std::string int2str( int n )
{
std::ostringstream result;
result << n;
return result.str();
}
int str2int( const std::string& s )
{
int result;
std::istringstream ss( s );
ss >> result;
if (!ss) throw std::invalid_argument( "StrToInt" );
return result;
}
int main() {
#ifndef ONLINE_JUDGE
freopen("input.txt", "r", stdin);
#endif
return 0;
}<|fim▁end|> | typedef vector<VI> VVI;
typedef vector<LL> VLL; |
<|file_name|>string.cpp<|end_file_name|><|fim▁begin|>#include <string.hpp>
#include "yatf/include/yatf.hpp"
using namespace yacppl;
TEST(string, can_be_created) {
string str;
REQUIRE(str == "");
REQUIRE(str.empty());
REQUIRE_EQ(str.length(), 0u);
REQUIRE_EQ(str.size(), 0u);
REQUIRE_FALSE(str == "abc");
}
TEST(string, can_be_constructed_from_cstring) {
{
string str("test_string");
REQUIRE(str == "test_string");
REQUIRE_EQ(str.length(), 11u);
REQUIRE_EQ(str.size(), 11u);
REQUIRE_EQ(*(str.cend() - 1), 'g');
REQUIRE_EQ(*(str.end() - 1), 'g');
REQUIRE_EQ(*(str.cbegin()), 't');
REQUIRE_EQ(*(str.begin()), 't');
}
{
string str("test_string", 4);
REQUIRE(str == "test");
REQUIRE_EQ(str.length(), 4u);
REQUIRE_EQ(str.size(), 4u);
REQUIRE_EQ(*(str.cend() - 1), 't');
REQUIRE_EQ(*(str.end() - 1), 't');
REQUIRE_EQ(*(str.cbegin()), 't');
REQUIRE_EQ(*(str.begin()), 't');
}
}
string get_string() {
return "some_string";
}
TEST(string, can_be_created_from_other_string) {
string str("test_string");
{
auto str2 = str;
REQUIRE(not str.empty());
REQUIRE(not str2.empty());
REQUIRE(str == "test_string");
REQUIRE(str2 == "test_string");
}
{
string str2(str);
REQUIRE(not str.empty());
REQUIRE(not str2.empty());
REQUIRE(str == "test_string");
REQUIRE(str2 == "test_string");
}
{
string str2;
string str3(str2);
REQUIRE_EQ(str3.operator const char *(), nullptr);
}
{
string str2("some_string");
string str3(move(str2));
REQUIRE_EQ(str2.operator const char *(), nullptr);
REQUIRE(str3 == "some_string");
}
str = string("some");<|fim▁hole|> str = nullptr;
REQUIRE(!str);
str = "something";
REQUIRE(str == "something");
auto str2 = get_string();
REQUIRE(str2 == "some_string");
str = str2;
REQUIRE(str == "some_string");
// FIXME
str = string();
str = string(str2);
str = string();
str = string(string("some other"));
}
TEST(string, can_be_iterated) {
string str("test_string");
size_t i = 0;
for (auto c : str) {
REQUIRE_EQ(c, "test_string"[i++]);
}
}
TEST(string, can_be_appended) {
{
string str("hello ");
str.append("world");
REQUIRE_EQ((const char *)str, "hello world");
}
{
string str;
str.append("world");
REQUIRE_EQ((const char *)str, "world");
str.append("hello hello hello");
REQUIRE_EQ((const char *)str, "worldhello hello hello");
REQUIRE_EQ(str.length(), 22u);
str.append(" test test");
REQUIRE_EQ((const char *)str, "worldhello hello hello test test");
REQUIRE_EQ(str.length(), 32u);
}
}
TEST(string, can_get_substring) {
string str("hello world");
auto str2 = str.substring(6, 5);
REQUIRE_EQ((const char *)str2, "world");
auto str3 = str.substring(6, 1024);
REQUIRE_EQ((const char *)str3, "world");
auto str4 = str.substring(11, 1024);
REQUIRE_EQ((const char *)str4, "");
}
TEST(string, can_be_erased) {
string str("hello world");
str.erase(str.begin() + 5, str.end());
REQUIRE_EQ((const char *)str, "hello");
REQUIRE_EQ(str.length(), 5u);
REQUIRE_EQ(str.size(), 11u);
REQUIRE(not str.empty());
str.erase(str.end() - 1, str.end());
REQUIRE_EQ((const char *)str, "hell");
REQUIRE_EQ(str.length(), 4u);
REQUIRE_EQ(str.size(), 11u);
REQUIRE(not str.empty());
}
TEST(string, cannot_be_erased_if_begin_after_end) {
string str("hello world");
str.erase(str.end(), str.begin());
REQUIRE_EQ((const char *)str, "hello world");
}
TEST(string, can_append_chars) {
string str;
for (auto i = 0u; i < 1024u; ++i) {
str.append('a');
REQUIRE_EQ(str.length(), i + 1);
}
}
TEST(string, can_reserve_size) {
string str;
str.reserve(1024);
REQUIRE_EQ(str.size(), 1024u);
for (auto i = 0u; i < 1023u; ++i) {
str.append('a');
REQUIRE_EQ(str.length(), i + 1);
REQUIRE_EQ(str.size(), 1024u);
}
str.append('c');
REQUIRE(str.size() != 1024u);
str.reserve(4096);
REQUIRE_EQ(str.size(), 4096u);
}
TEST(string, split_on_empty_string_should_return_empty_vec) {
string str;
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 0u);
}
TEST(string, can_split) {
{
string str("some");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 1u);
REQUIRE_EQ((const char *)splitted[0], "some");
}
{
string str("some string");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 2u);
REQUIRE_EQ((const char *)splitted[0], "some");
REQUIRE_EQ((const char *)splitted[1], "string");
}
{
string str("some string ");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 2u);
REQUIRE_EQ((const char *)splitted[0], "some");
REQUIRE_EQ((const char *)splitted[1], "string");
}
{
string str("some string ");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 2u);
REQUIRE_EQ((const char *)splitted[0], "some");
REQUIRE_EQ((const char *)splitted[1], "string");
}
{
string str(" some string ");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 2u);
REQUIRE_EQ((const char *)splitted[0], "some");
REQUIRE_EQ((const char *)splitted[1], "string");
}
{
string str(" some string ");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 2u);
REQUIRE_EQ((const char *)splitted[0], "some");
REQUIRE_EQ((const char *)splitted[1], "string");
}
{
string str(" some string ");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 2u);
REQUIRE_EQ((const char *)splitted[0], "some");
REQUIRE_EQ((const char *)splitted[1], "string");
}
{
string str(" some other string ");
const auto splitted = str.split();
REQUIRE_EQ(splitted.size(), 3u);
REQUIRE_EQ((const char *)splitted[0], "some");
REQUIRE_EQ((const char *)splitted[1], "other");
REQUIRE_EQ((const char *)splitted[2], "string");
}
}<|fim▁end|> | REQUIRE(str == "some");
str = string("some very, very, very long string");
REQUIRE(str == "some very, very, very long string"); |
<|file_name|>DbC_methods_with_both_pre_and_post.py<|end_file_name|><|fim▁begin|># coding:ascii
""" Example usage of DbC (design by contract)
In this example we show you how to use pre- and post-condition checkers
decorating the same function.
"""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import DbC
DbC.ASSERTION_LEVEL = DbC.ASSERTION_ALL
# in this example we bring `pre` and `post` into our namespace
from DbC import pre, post
def check_pre(*args):
'Pre-condition checker.'
# must have an even number of args
assert ( len(args) & 1 ) == 0, 'Expected an even number of arguments'
# all numbers must be non-negative ints
assert all(i>=0 and isinstance(i,int) for i in args), \
'Numbers must be positive integers'
# all second numbers must be < 10
assert all(i<10 for i in args[1::2]), 'Numbers must be < 10'
def check_post(*args):
'Post-condition checker.'
# return value from decorated function is always the last positional
# parameter
rval = args[-1]
# simple check of the number of items in the return
assert 2 * len(rval) == len(args) - 1
# check units
units_out = [i%10 for i in rval]
units_in = [i for i in args[1:-1:2]]
assert units_out == units_in
# check tens
tens_out = [i//10 for i in rval]
tens_in = [i for i in args[0:-1:2]]
assert tens_out == tens_in
# It doesn't matter which order you include the decorators
@pre(check_pre)
@post(check_post)
def pairoff(*args):
'Make tens+units from pairs of numbers.'
it = iter(args)
return [10*a+b for a,b in zip(it,it)]
# Test data
print( pairoff(*range(8)) )
print( pairoff(4,2, 10,1) )
try: # odd number of args
pairoff(1,2,3,4,5)
except AssertionError as e:
print(e)
try: # unit >= 10
pairoff(4,2, 9,10)
except AssertionError as e:
print(e)<|fim▁hole|> print(e)
try: # non-integer
pairoff(1.25,0.6)
except AssertionError as e:
print(e)<|fim▁end|> | try: # negative
pairoff(4,2, -1,2)
except AssertionError as e: |
<|file_name|>resnet_model_fast.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the resnet model.
Adapted from
https://github.com/tensorflow/models/tree/master/official/vision/image_classification/resnet.
The following code is based on its v1 version.
"""
import tensorflow.compat.v1 as tf
<|fim▁hole|>CASTABLE_TYPES = (tf.float16,)
ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES
NUM_CLASSES = 10
################################################################################
# Convenience functions for building the ResNet model.
################################################################################
def batch_norm(inputs, training, data_format, name=''):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.compat.v1.layers.batch_normalization(
inputs=inputs,
axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=training,
fused=True,
name=name)
# add name later if necessary
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format,
name):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.compat.v1.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
reuse=tf.AUTO_REUSE,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
data_format=data_format,
name=name)
################################################################################
# ResNet block definitions.
################################################################################
def _building_block_v2(inputs, filters, training, projection_shortcut, strides,
data_format, name):
"""A single block for ResNet v2, without a bottleneck.
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference mode.
Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
name: Block name.
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
first_name = name + 'first'
inputs = batch_norm(
inputs, training, data_format, name=first_name + 'batch_norm')
inputs = tf.nn.relu(inputs, name=first_name + 'relu')
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs, name=first_name + 'proj')
second_name = name + 'second'
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format,
name=second_name + 'input')
inputs = batch_norm(
inputs, training, data_format, name=second_name + 'batch_norm')
inputs = tf.nn.relu(inputs, name=second_name + 'relu')
third_name = name + 'third'
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format,
name=third_name + 'input')
return inputs + shortcut
def block_layer(inputs,
filters,
bottleneck,
block_fn,
blocks,
strides,
training,
name,
data_format,
shortcut=True):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
bottleneck: Is the block created a bottleneck block.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
training: Either True or False, whether we are currently training the model.
Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
shortcut: Whether to use projection shortcut in the first block.
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = filters * 4 if bottleneck else filters
def projection_shortcut(inputs, name):
return conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
data_format=data_format,
name=name)
# Only the first block per block_layer uses projection_shortcut and strides.
# Skip the projection shortcut in the first block layer.
shortcut_fn = projection_shortcut if shortcut else None
inputs = block_fn(
inputs,
filters,
training,
shortcut_fn,
strides,
data_format,
name=name + 'input')
for j in range(1, blocks):
inputs = block_fn(
inputs,
filters,
training,
None,
1,
data_format,
name=name + 'block' + str(j))
return tf.identity(inputs, name)
class Model(object):
"""Base class for building the Resnet Model."""
def __init__(self,
resnet_size,
bottleneck,
num_classes,
num_filters,
kernel_size,
conv_stride,
first_pool_size,
first_pool_stride,
block_sizes,
block_strides,
resnet_version=DEFAULT_VERSION,
data_format=None,
dtype=DEFAULT_DTYPE):
"""Creates a model for classifying an image.
Args:
resnet_size: A single integer for the size of the ResNet model.
bottleneck: Use regular blocks or bottleneck blocks.
num_classes: The number of classes used as labels.
num_filters: The number of filters to use for the first block layer of the
model. This number is then doubled for each subsequent block layer.
kernel_size: The kernel size to use for convolution.
conv_stride: stride size for the initial convolutional layer
first_pool_size: Pool size to be used for the first pooling layer. If
none, the first pooling layer is skipped.
first_pool_stride: stride size for the first pooling layer. Not used if
first_pool_size is None.
block_sizes: A list containing n values, where n is the number of sets of
block layers desired. Each value should be the number of blocks in the
i-th set.
block_strides: List of integers representing the desired stride size for
each of the sets of block layers. Should be same length as block_sizes.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
data_format: Input format ('channels_last', 'channels_first', or None). If
set to None, the format is dependent on whether a GPU is available.
dtype: The TensorFlow dtype to use for calculations. If not specified
tf.float32 is used.
Raises:
ValueError: if invalid version is selected.
"""
self.resnet_size = resnet_size
if not data_format:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
self.resnet_version = resnet_version
if resnet_version not in (1, 2):
raise ValueError(
'Resnet version should be 1 or 2. See README for citations.')
self.bottleneck = bottleneck
self.block_fn = _building_block_v2
if dtype not in ALLOWED_TYPES:
raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES))
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
self.block_sizes = block_sizes
self.block_strides = block_strides
self.dtype = dtype
self.pre_activation = resnet_version == 2
def _custom_dtype_getter(self, # pylint: disable=keyword-arg-before-vararg
getter,
name,
shape=None,
dtype=DEFAULT_DTYPE,
*args,
**kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
if dtype in CASTABLE_TYPES:
var = getter(name, shape, tf.float32, *args, **kwargs)
return tf.cast(var, dtype=dtype, name=name + '_cast')
else:
return getter(name, shape, dtype, *args, **kwargs)
def _model_variable_scope(self):
"""Returns a variable scope that the model should be created under.
If self.dtype is a castable type, model variable will be created in fp32
then cast to self.dtype before being used.
Returns:
A variable scope for the model.
"""
return tf.compat.v1.variable_scope(
'resnet_model',
custom_getter=self._custom_dtype_getter,
reuse=tf.AUTO_REUSE)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
with self._model_variable_scope():
if self.data_format == 'channels_first':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(a=inputs, perm=[0, 3, 1, 2])
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=self.num_filters,
kernel_size=self.kernel_size,
strides=self.conv_stride,
data_format=self.data_format,
name='initial_input')
inputs = tf.identity(inputs, 'initial_conv')
# We do not include batch normalization or activation functions in V2
# for the initial conv1 because the first ResNet unit will perform these
# for both the shortcut and non-shortcut paths as part of the first
# block's projection. Cf. Appendix of [2].
if self.resnet_version == 1:
inputs = batch_norm(inputs, training, self.data_format)
inputs = tf.nn.relu(inputs)
if self.first_pool_size:
inputs = tf.compat.v1.layers.max_pooling2d(
inputs=inputs,
pool_size=self.first_pool_size,
strides=self.first_pool_stride,
padding='SAME',
data_format=self.data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
for i, num_blocks in enumerate(self.block_sizes):
# We now have 4 block layers, but the last does not
# double the number of filters.
# We also skip the projection shortcut in the first block layer.
num_filters = self.num_filters * min((2**i), 4)
shortcut = i != 0
inputs = block_layer(
inputs=inputs,
filters=num_filters,
bottleneck=self.bottleneck,
block_fn=self.block_fn,
blocks=num_blocks,
strides=self.block_strides[i],
training=training,
name='block_layer{}'.format(i + 1),
data_format=self.data_format,
shortcut=shortcut)
# Skip the last BN+relu.
# Only apply the BN and ReLU for model that does pre_activation in each
# building/bottleneck block, eg resnet V2.
# if self.pre_activation:
# inputs = batch_norm(inputs, training, self.data_format,
# name='pre_act'+'batch_norm')
# inputs = tf.nn.relu(inputs,name='pre_act'+'relu')
# The current top layer has shape
# `batch_size x pool_size x pool_size x final_size`.
# ResNet does an Average Pooling layer over pool_size,
# but that is the same as doing a reduce_mean. We do a reduce_mean
# here because it performs better than AveragePooling2D.
# Also perform max-pooling, and concat results.
axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
avg_pooled = tf.reduce_mean(input_tensor=inputs, axis=axes, keepdims=True)
avg_pooled = tf.squeeze(avg_pooled, axes)
max_pooled = tf.reduce_max(input_tensor=inputs, axis=axes, keepdims=True)
max_pooled = tf.squeeze(max_pooled, axes)
inputs = tf.concat([avg_pooled, max_pooled], axis=1)
inputs = tf.identity(inputs, 'final_pooling')
inputs = tf.compat.v1.layers.dense(
inputs=inputs, units=self.num_classes, reuse=tf.AUTO_REUSE)
inputs = tf.identity(inputs, 'final_dense')
return inputs
###############################################################################
# Running the model
###############################################################################
class FastCifar10Model(Model):
"""Model class with appropriate defaults for CIFAR-10 data."""
def __init__(self,
resnet_size,
data_format=None,
num_classes=NUM_CLASSES,
resnet_version=DEFAULT_VERSION,
dtype=DEFAULT_DTYPE):
"""These are the parameters that work for CIFAR-10 data.
Args:
resnet_size: The number of convolutional layers needed in the model.
data_format: Either 'channels_first' or 'channels_last', specifying which
data format to use when setting up the model.
num_classes: The number of output classes needed from the model. This
enables users to extend the same model to their own datasets.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
dtype: The TensorFlow dtype to use for calculations.
Raises:
ValueError: if invalid resnet_size is chosen
"""
# 4 block layers, so change to 8n+2.
if resnet_size % 8 != 2:
raise ValueError('resnet_size must be 8n + 2:', resnet_size)
num_blocks = (resnet_size - 2) // 8
# Switch to 4 block layers. Use 64, 128, 256, 256 filters.
super(FastCifar10Model, self).__init__(
resnet_size=resnet_size,
bottleneck=False,
num_classes=num_classes,
num_filters=64,
kernel_size=3,
conv_stride=1,
first_pool_size=None,
first_pool_stride=None,
block_sizes=[num_blocks] * 4,
block_strides=[1, 2, 2, 2],
resnet_version=resnet_version,
data_format=data_format,
dtype=dtype)<|fim▁end|> | _BATCH_NORM_DECAY = 0.9
_BATCH_NORM_EPSILON = 1e-5
DEFAULT_VERSION = 2
DEFAULT_DTYPE = tf.float32 |
<|file_name|>anpa_formatter.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from copy import deepcopy
from superdesk.publish.formatters import Formatter
from .aap_formatter_common import map_priority, get_service_level
import superdesk
from superdesk.errors import FormatterError
import datetime
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, BYLINE, FORMAT, FORMATS
from .field_mappers.locator_mapper import LocatorMapper
from .field_mappers.slugline_mapper import SluglineMapper
from eve.utils import config
from .unicodetoascii import to_ascii
from .category_list_map import get_aap_category_list
import re
from superdesk.etree import parse_html, to_string, etree
from superdesk.text_utils import get_text
from superdesk.utc import utc_to_local
class AAPAnpaFormatter(Formatter):
def format(self, article, subscriber, codes=None):
try:
docs = []
formatted_article = deepcopy(article)
for category in self._get_category_list(formatted_article.get('anpa_category')):
mapped_source = self._get_mapped_source(formatted_article)
formatted_article[config.ID_FIELD] = formatted_article.get('item_id',
formatted_article.get(config.ID_FIELD))
pub_seq_num = superdesk.get_resource_service('subscribers').generate_sequence_number(subscriber)
anpa = []
if codes:
anpa.append(b'\x05')
anpa.append(' '.join(codes).encode('ascii'))
anpa.append(b'\x0D\x0A')
# start of message header (syn syn soh)
anpa.append(b'\x16\x16\x01')
anpa.append(get_service_level(category, formatted_article).encode('ascii'))
# story number
anpa.append(str(pub_seq_num).zfill(4).encode('ascii'))
# field seperator
anpa.append(b'\x0A') # -LF
anpa.append(map_priority(formatted_article.get('priority')).encode('ascii'))
anpa.append(b'\x20')
anpa.append(category['qcode'].lower().encode('ascii'))
anpa.append(b'\x13')
# format identifier
if formatted_article.get(FORMAT, FORMATS.HTML) == FORMATS.PRESERVED:
anpa.append(b'\x12')
else:
anpa.append(b'\x11')
anpa.append(b'\x20')
# keyword
keyword = 'bc-{}'.format(self.append_legal(article=formatted_article, truncate=True)).replace(' ', '-')
keyword = keyword[:24] if len(keyword) > 24 else keyword
anpa.append(keyword.encode('ascii'))
anpa.append(b'\x20')
# version field
anpa.append(b'\x20')
# reference field
anpa.append(b'\x20')
# filing date
local_time = utc_to_local(config.DEFAULT_TIMEZONE or 'UTC', formatted_article['_updated'])
anpa.append('{}-{}'.format(local_time.strftime('%m'), local_time.strftime('%d')).encode('ascii'))
anpa.append(b'\x20')
# add the word count
anpa.append(str(formatted_article.get('word_count', '0000')).zfill(4).encode('ascii'))
anpa.append(b'\x0D\x0A')
anpa.append(b'\x02') # STX
self._process_headline(anpa, formatted_article, category['qcode'].encode('ascii'))
keyword = SluglineMapper().map(article=formatted_article, category=category['qcode'].upper(),
truncate=True).encode('ascii', 'ignore')
anpa.append(keyword)
take_key = (formatted_article.get('anpa_take_key', '') or '').encode('ascii', 'ignore')<|fim▁hole|> if formatted_article.get('ednote', '') != '':
ednote = '{}\r\n'.format(to_ascii(formatted_article.get('ednote')))
anpa.append(ednote.encode('ascii', 'replace'))
if formatted_article.get(BYLINE):
anpa.append(get_text(formatted_article.get(BYLINE)).encode('ascii', 'replace'))
anpa.append(b'\x0D\x0A')
if formatted_article.get(FORMAT) == FORMATS.PRESERVED:
anpa.append(get_text(self.append_body_footer(formatted_article),
content='html').encode('ascii', 'replace'))
else:
body = to_ascii(formatted_article.get('body_html', ''))
# we need to inject the dateline
if formatted_article.get('dateline', {}).get('text') and not article.get('auto_publish', False):
body_html_elem = parse_html(formatted_article.get('body_html'))
ptag = body_html_elem.find('.//p')
if ptag is not None:
ptag.text = formatted_article['dateline']['text'] + ' ' + (ptag.text or '')
body = to_string(body_html_elem)
anpa.append(self.get_text_content(body))
if formatted_article.get('body_footer'):
anpa.append(self.get_text_content(to_ascii(formatted_article.get('body_footer', ''))))
anpa.append(b'\x0D\x0A')
anpa.append(mapped_source.encode('ascii'))
sign_off = (formatted_article.get('sign_off', '') or '').encode('ascii')
anpa.append((b'\x20' + sign_off) if len(sign_off) > 0 else b'')
anpa.append(b'\x0D\x0A')
anpa.append(b'\x03') # ETX
# time and date
anpa.append(datetime.datetime.now().strftime('%d-%m-%y %H-%M-%S').encode('ascii'))
anpa.append(b'\x04') # EOT
anpa.append(b'\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A')
docs.append({'published_seq_num': pub_seq_num, 'encoded_item': b''.join(anpa),
'formatted_item': b''.join(anpa).decode('ascii')})
return docs
except Exception as ex:
raise FormatterError.AnpaFormatterError(ex, subscriber)
def get_text_content(self, content):
content = content.replace('<br>', '<br/>').replace('</br>', '')
content = re.sub('[\x00-\x09\x0b\x0c\x0e-\x1f]', '', content)
content = content.replace('\xA0', ' ')
parsed = parse_html(content, content='html')
for br in parsed.xpath('//br'):
br.tail = '\r\n' + br.tail if br.tail else '\r\n'
etree.strip_elements(parsed, 'br', with_tail=False)
for tag in parsed.xpath('/html/div/child::*'):
if tag.tag not in ('br') and tag.text is not None and tag.text.strip() != '':
tag.text = ' ' + re.sub(' +', ' ', re.sub('(?<!\r)\n+', ' ', tag.text)) if tag.text else ''
tag.tail = '\r\n' + tag.tail if tag.tail else '\r\n'
para_text = "".join(x for x in parsed.itertext())
para_text = para_text.replace('\xA0', ' ')
return para_text.encode('ascii', 'replace')
def _process_headline(self, anpa, article, category):
# prepend the locator to the headline if required
article['headline'] = get_text(article.get('headline', ''))
headline = to_ascii(LocatorMapper().get_formatted_headline(article, category.decode('UTF-8').upper()))
# Set the maximum size to 64 including the sequence number if any
if len(headline) > 64:
if article.get('sequence'):
digits = len(str(article['sequence'])) + 1
shortened_headline = '{}={}'.format(headline[:-digits][:(64 - digits)], article['sequence'])
anpa.append(shortened_headline.encode('ascii', 'replace'))
else:
anpa.append(headline[:64].encode('ascii', 'replace'))
else:
anpa.append(headline.encode('ascii', 'replace'))
anpa.append(b'\x0D\x0A')
def _get_category_list(self, category_list):
return get_aap_category_list(category_list)
def _get_mapped_source(self, article):
return article.get('source', '') if article.get('source', '') != 'NZN' else 'AAP'
def can_format(self, format_type, article):
return format_type == 'AAP ANPA' and article[ITEM_TYPE] in [CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED]<|fim▁end|> | anpa.append((b'\x20' + take_key) if len(take_key) > 0 else b'')
anpa.append(b'\x0D\x0A')
|
<|file_name|>stars.py<|end_file_name|><|fim▁begin|># x = [4, 6, 1, 3, 5, 7, 25]
# def stars (a):
# i = 0
# while (i < len(a)):
# print '*' * a[i]
# i += 1
# stars(x) <|fim▁hole|> i = 0
while (i < len(a)):
if type(a[i]) is int:
print '*' * a[i]
i+=1
else:
temp = a[i]
temp = temp.lower()
print (len(a[i])) * temp[0]
i += 1
stars(x)<|fim▁end|> |
x = [4, "Tom", 1, "Michael", 5, 7, "Jimmy Smith"]
def stars (a): |
<|file_name|>Footer.js<|end_file_name|><|fim▁begin|>/**
* React Starter Kit (https://www.reactstarterkit.com/)
*
* Copyright © 2014-2016 Kriasoft, LLC. All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE.txt file in the root directory of this source tree.
*/
import React from 'react';
import withStyles from 'isomorphic-style-loader/lib/withStyles';
import s from './Footer.css';
import Link from '../Link';
function Footer() {<|fim▁hole|> return (
<div className={s.root}>
<div className={s.container}>
<span className={s.text}>© Your Company</span>
<span className={s.spacer}>·</span>
<Link className={s.link} to="/">Home</Link>
<span className={s.spacer}>·</span>
<Link className={s.link} to="/privacy">Privacy</Link>
<span className={s.spacer}>·</span>
<Link className={s.link} to="/not-found">Not Found</Link>
</div>
</div>
);
}
export default withStyles(s)(Footer);<|fim▁end|> | |
<|file_name|>steps.py<|end_file_name|><|fim▁begin|># This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
from buildbot.util import epoch2datetime
class StepsConnectorComponent(base.DBConnectorComponent):
# Documentation is in developer/db.rst
url_lock = None
@defer.inlineCallbacks
def getStep(self, stepid=None, buildid=None, number=None, name=None):
tbl = self.db.model.steps
if stepid is not None:
wc = (tbl.c.id == stepid)
else:
if buildid is None:
raise RuntimeError('must supply either stepid or buildid')
if number is not None:
wc = (tbl.c.number == number)
elif name is not None:
wc = (tbl.c.name == name)
else:
raise RuntimeError('must supply either number or name')
wc = wc & (tbl.c.buildid == buildid)
def thd(conn):
q = self.db.model.steps.select(whereclause=wc)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._stepdictFromRow(row)
res.close()
return rv
return (yield self.db.pool.do(thd))
# returns a Deferred that returns a value
def getSteps(self, buildid):
def thd(conn):
tbl = self.db.model.steps
q = tbl.select()
q = q.where(tbl.c.buildid == buildid)
q = q.order_by(tbl.c.number)
res = conn.execute(q)
return [self._stepdictFromRow(row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def addStep(self, buildid, name, state_string):
def thd(conn):
tbl = self.db.model.steps
# get the highest current number
r = conn.execute(sa.select([sa.func.max(tbl.c.number)],
whereclause=(tbl.c.buildid == buildid)))
number = r.scalar()
number = 0 if number is None else number + 1
# note that there is no chance for a race condition here,
# since only one master is inserting steps. If there is a
# conflict, then the name is likely already taken.
insert_row = dict(buildid=buildid, number=number,
started_at=None, complete_at=None,
state_string=state_string,
urls_json='[]', name=name)
try:
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
got_id = None
if got_id:
return (got_id, number, name)
# we didn't get an id, so calculate a unique name and use that
# instead. Because names are truncated at the right to fit in a
# 50-character identifier, this isn't a simple query.
res = conn.execute(sa.select([tbl.c.name],
whereclause=((tbl.c.buildid == buildid))))
names = {row[0] for row in res}
num = 1
while True:
numstr = '_%d' % num
newname = name[:50 - len(numstr)] + numstr
if newname not in names:
break
num += 1
insert_row['name'] = newname
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
return (got_id, number, newname)
return self.db.pool.do(thd)
@defer.inlineCallbacks
def startStep(self, stepid):
started_at = int(self.master.reactor.seconds())
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, started_at=started_at)
yield self.db.pool.do(thd)
# returns a Deferred that returns None
def setStepStateString(self, stepid, state_string):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, state_string=state_string)
return self.db.pool.do(thd)
def addURL(self, stepid, name, url, _racehook=None):
# This methods adds an URL to the db
# This is a read modify write and thus there is a possibility
# that several urls are added at the same time (e.g with a deferredlist
# at the end of a step)
# this race condition is only inside the same master, as only one master
# is supposed to add urls to a buildstep.
# so threading.lock is used, as we are in the thread pool
if self.url_lock is None:
# this runs in reactor thread, so no race here..
self.url_lock = defer.DeferredLock()
def thd(conn):
tbl = self.db.model.steps
wc = (tbl.c.id == stepid)
q = sa.select([tbl.c.urls_json],
whereclause=wc)
res = conn.execute(q)
row = res.fetchone()
if _racehook is not None:
_racehook()
urls = json.loads(row.urls_json)
url_item = dict(name=name, url=url)
if url_item not in urls:
urls.append(url_item)
q = tbl.update(whereclause=wc)
conn.execute(q, urls_json=json.dumps(urls))
<|fim▁hole|> return self.url_lock.run(lambda: self.db.pool.do(thd))
# returns a Deferred that returns None
def finishStep(self, stepid, results, hidden):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q,
complete_at=int(self.master.reactor.seconds()),
results=results,
hidden=1 if hidden else 0)
return self.db.pool.do(thd)
def _stepdictFromRow(self, row):
return dict(
id=row.id,
number=row.number,
name=row.name,
buildid=row.buildid,
started_at=epoch2datetime(row.started_at),
complete_at=epoch2datetime(row.complete_at),
state_string=row.state_string,
results=row.results,
urls=json.loads(row.urls_json),
hidden=bool(row.hidden))<|fim▁end|> | |
<|file_name|>stylist.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Selector matching.
use {Atom, LocalName, Namespace};
use applicable_declarations::{ApplicableDeclarationBlock, ApplicableDeclarationList};
use context::{CascadeInputs, QuirksMode};
use dom::TElement;
use element_state::ElementState;
use font_metrics::FontMetricsProvider;
#[cfg(feature = "gecko")]
use gecko_bindings::structs::{nsIAtom, ServoStyleSetSizes, StyleRuleInclusion};
use hashglobe::FailedAllocationError;
use invalidation::element::invalidation_map::InvalidationMap;
use invalidation::media_queries::{EffectiveMediaQueryResults, ToMediaListKey};
#[cfg(feature = "gecko")]
use malloc_size_of::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps};
#[cfg(feature = "gecko")]
use malloc_size_of::MallocUnconditionalShallowSizeOf;
use media_queries::Device;
use properties::{self, CascadeFlags, ComputedValues};
use properties::{AnimationRules, PropertyDeclarationBlock};
#[cfg(feature = "servo")]
use properties::INHERIT_ALL;
use properties::IS_LINK;
use rule_tree::{CascadeLevel, RuleTree, StrongRuleNode, StyleSource};
use selector_map::{PrecomputedHashMap, SelectorMap, SelectorMapEntry};
use selector_parser::{SelectorImpl, PerPseudoElementMap, PseudoElement};
use selectors::NthIndexCache;
use selectors::attr::NamespaceConstraint;
use selectors::bloom::{BloomFilter, NonCountingBloomFilter};
use selectors::matching::{ElementSelectorFlags, matches_selector, MatchingContext, MatchingMode};
use selectors::matching::VisitedHandlingMode;
use selectors::parser::{AncestorHashes, Combinator, Component, Selector};
use selectors::parser::{SelectorIter, SelectorMethods};
use selectors::sink::Push;
use selectors::visitor::SelectorVisitor;
use servo_arc::{Arc, ArcBorrow};
use shared_lock::{Locked, SharedRwLockReadGuard, StylesheetGuards};
use smallbitvec::SmallBitVec;
use smallvec::VecLike;
use std::fmt::Debug;
use std::ops;
use std::sync::Mutex;
use style_traits::viewport::ViewportConstraints;
use stylesheet_set::{OriginValidity, SheetRebuildKind, StylesheetSet, StylesheetFlusher};
#[cfg(feature = "gecko")]
use stylesheets::{CounterStyleRule, FontFaceRule, FontFeatureValuesRule, PageRule};
use stylesheets::{CssRule, Origin, OriginSet, PerOrigin, PerOriginIter};
use stylesheets::StyleRule;
use stylesheets::StylesheetInDocument;
use stylesheets::keyframes_rule::KeyframesAnimation;
use stylesheets::viewport_rule::{self, MaybeNew, ViewportRule};
use thread_state;
/// The type of the stylesheets that the stylist contains.
#[cfg(feature = "servo")]
pub type StylistSheet = ::stylesheets::DocumentStyleSheet;
/// The type of the stylesheets that the stylist contains.
#[cfg(feature = "gecko")]
pub type StylistSheet = ::gecko::data::GeckoStyleSheet;
/// A cache of computed user-agent data, to be shared across documents.
lazy_static! {
static ref UA_CASCADE_DATA_CACHE: Mutex<UserAgentCascadeDataCache> =
Mutex::new(UserAgentCascadeDataCache::new());
}
struct UserAgentCascadeDataCache {
entries: Vec<Arc<UserAgentCascadeData>>,
}
impl UserAgentCascadeDataCache {
fn new() -> Self {
Self {
entries: vec![],
}
}
fn lookup<'a, I, S>(
&'a mut self,
sheets: I,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
) -> Result<Arc<UserAgentCascadeData>, FailedAllocationError>
where
I: Iterator<Item = &'a S> + Clone,
S: StylesheetInDocument + ToMediaListKey + PartialEq + 'static,
{
let mut key = EffectiveMediaQueryResults::new();
for sheet in sheets.clone() {
CascadeData::collect_applicable_media_query_results_into(
device,
sheet,
guard,
&mut key,
)
}
for entry in &self.entries {
if entry.cascade_data.effective_media_query_results == key {
return Ok(entry.clone());
}
}
let mut new_data = UserAgentCascadeData {
cascade_data: CascadeData::new(),
precomputed_pseudo_element_decls: PrecomputedPseudoElementDeclarations::default(),
};
for sheet in sheets {
new_data.cascade_data.add_stylesheet(
device,
quirks_mode,
sheet,
guard,
SheetRebuildKind::Full,
Some(&mut new_data.precomputed_pseudo_element_decls),
)?;
}
let new_data = Arc::new(new_data);
self.entries.push(new_data.clone());
Ok(new_data)
}
fn expire_unused(&mut self) {
self.entries.retain(|e| !e.is_unique())
}
fn clear(&mut self) {
self.entries.clear();
}
#[cfg(feature = "gecko")]
pub fn add_size_of(&self, ops: &mut MallocSizeOfOps, sizes: &mut ServoStyleSetSizes) {
sizes.mOther += self.entries.shallow_size_of(ops);
for arc in self.entries.iter() {
// These are primary Arc references that can be measured
// unconditionally.
sizes.mOther += arc.unconditional_shallow_size_of(ops);
arc.add_size_of(ops, sizes);
}
}
}
/// Measure heap usage of UA_CASCADE_DATA_CACHE.
#[cfg(feature = "gecko")]
pub fn add_size_of_ua_cache(ops: &mut MallocSizeOfOps, sizes: &mut ServoStyleSetSizes) {
UA_CASCADE_DATA_CACHE.lock().unwrap().add_size_of(ops, sizes);
}
type PrecomputedPseudoElementDeclarations =
PerPseudoElementMap<Vec<ApplicableDeclarationBlock>>;
#[derive(Default)]
struct UserAgentCascadeData {
cascade_data: CascadeData,
/// Applicable declarations for a given non-eagerly cascaded pseudo-element.
///
/// These are eagerly computed once, and then used to resolve the new
/// computed values on the fly on layout.
///
/// These are only filled from UA stylesheets.
precomputed_pseudo_element_decls: PrecomputedPseudoElementDeclarations,
}
impl UserAgentCascadeData {
#[cfg(feature = "gecko")]
fn add_size_of(&self, ops: &mut MallocSizeOfOps, sizes: &mut ServoStyleSetSizes) {
self.cascade_data.add_size_of_children(ops, sizes);
sizes.mPrecomputedPseudos += self.precomputed_pseudo_element_decls.size_of(ops);
}
}
/// All the computed information for a stylesheet.
#[derive(Default)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
struct DocumentCascadeData {
#[cfg_attr(
feature = "servo",
ignore_heap_size_of = "Arc, owned by UserAgentCascadeDataCache"
)]
user_agent: Arc<UserAgentCascadeData>,
user: CascadeData,
author: CascadeData,
per_origin: PerOrigin<()>,
}
struct DocumentCascadeDataIter<'a> {
iter: PerOriginIter<'a, ()>,
cascade_data: &'a DocumentCascadeData,
}
impl<'a> Iterator for DocumentCascadeDataIter<'a> {
type Item = (&'a CascadeData, Origin);
fn next(&mut self) -> Option<Self::Item> {
let (_, origin) = match self.iter.next() {
Some(o) => o,
None => return None,
};
Some((self.cascade_data.borrow_for_origin(origin), origin))
}
}
impl DocumentCascadeData {
fn borrow_for_origin(&self, origin: Origin) -> &CascadeData {
match origin {
Origin::UserAgent => &self.user_agent.cascade_data,
Origin::Author => &self.author,
Origin::User => &self.user,
}
}
fn iter_origins(&self) -> DocumentCascadeDataIter {
DocumentCascadeDataIter {
iter: self.per_origin.iter_origins(),
cascade_data: self,
}
}
fn iter_origins_rev(&self) -> DocumentCascadeDataIter {
DocumentCascadeDataIter {
iter: self.per_origin.iter_origins_rev(),
cascade_data: self,
}
}
fn rebuild_origin<'a, S>(
device: &Device,
quirks_mode: QuirksMode,
flusher: &mut StylesheetFlusher<'a, S>,
guards: &StylesheetGuards,
origin: Origin,
cascade_data: &mut CascadeData,
) -> Result<(), FailedAllocationError>
where
S: StylesheetInDocument + ToMediaListKey + PartialEq + 'static,
{
debug_assert_ne!(origin, Origin::UserAgent);
let validity = flusher.origin_validity(origin);
match validity {
OriginValidity::Valid => {},
OriginValidity::CascadeInvalid => cascade_data.clear_cascade_data(),
OriginValidity::FullyInvalid => cascade_data.clear(),
}
let guard = guards.for_origin(origin);
for (stylesheet, rebuild_kind) in flusher.origin_sheets(origin) {
cascade_data.add_stylesheet(
device,
quirks_mode,
stylesheet,
guard,
rebuild_kind,
/* precomputed_pseudo_element_decls = */ None,
)?;
}
Ok(())
}
/// Rebuild the cascade data for the given document stylesheets, and
/// optionally with a set of user agent stylesheets. Returns Err(..)
/// to signify OOM.
fn rebuild<'a, S>(
&mut self,
device: &Device,
quirks_mode: QuirksMode,
mut flusher: StylesheetFlusher<'a, S>,
guards: &StylesheetGuards,
) -> Result<(), FailedAllocationError>
where
S: StylesheetInDocument + ToMediaListKey + PartialEq + 'static,
{
debug_assert!(!flusher.nothing_to_do());
// First do UA sheets.
{
if flusher.origin_dirty(Origin::UserAgent) {
let mut ua_cache = UA_CASCADE_DATA_CACHE.lock().unwrap();
let origin_sheets =
flusher.manual_origin_sheets(Origin::UserAgent);
let ua_cascade_data = ua_cache.lookup(
origin_sheets,
device,
quirks_mode,
guards.ua_or_user
)?;
ua_cache.expire_unused();
self.user_agent = ua_cascade_data;
}
}
// Now do the user sheets.
Self::rebuild_origin(
device,
quirks_mode,
&mut flusher,
guards,
Origin::User,
&mut self.user,
)?;
// And now the author sheets.
Self::rebuild_origin(
device,
quirks_mode,
&mut flusher,
guards,
Origin::Author,
&mut self.author,
)?;
Ok(())
}
/// Measures heap usage.
#[cfg(feature = "gecko")]
pub fn add_size_of_children(&self, ops: &mut MallocSizeOfOps, sizes: &mut ServoStyleSetSizes) {
self.user.add_size_of_children(ops, sizes);
self.author.add_size_of_children(ops, sizes);
}
}
/// A wrapper over a StylesheetSet that can be `Sync`, since it's only used and
/// exposed via mutable methods in the `Stylist`.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
struct StylistStylesheetSet(StylesheetSet<StylistSheet>);
// Read above to see why this is fine.
unsafe impl Sync for StylistStylesheetSet {}
impl StylistStylesheetSet {
fn new() -> Self {
StylistStylesheetSet(StylesheetSet::new())
}
}
impl ops::Deref for StylistStylesheetSet {
type Target = StylesheetSet<StylistSheet>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for StylistStylesheetSet {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// This structure holds all the selectors and device characteristics
/// for a given document. The selectors are converted into `Rule`s
/// and sorted into `SelectorMap`s keyed off stylesheet origin and
/// pseudo-element (see `CascadeData`).
///
/// This structure is effectively created once per pipeline, in the
/// LayoutThread corresponding to that pipeline.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Stylist {
/// Device that the stylist is currently evaluating against.
///
/// This field deserves a bigger comment due to the different use that Gecko
/// and Servo give to it (that we should eventually unify).
///
/// With Gecko, the device is never changed. Gecko manually tracks whether
/// the device data should be reconstructed, and "resets" the state of the
/// device.
///
/// On Servo, on the other hand, the device is a really cheap representation
/// that is recreated each time some constraint changes and calling
/// `set_device`.
device: Device,
/// Viewport constraints based on the current device.
viewport_constraints: Option<ViewportConstraints>,
/// The list of stylesheets.
stylesheets: StylistStylesheetSet,
/// If true, the quirks-mode stylesheet is applied.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "defined in selectors")]
quirks_mode: QuirksMode,
/// Selector maps for all of the style sheets in the stylist, after
/// evalutaing media rules against the current device, split out per
/// cascade level.
cascade_data: DocumentCascadeData,
/// The rule tree, that stores the results of selector matching.
rule_tree: RuleTree,
/// The total number of times the stylist has been rebuilt.
num_rebuilds: usize,
}
/// What cascade levels to include when styling elements.
#[derive(Clone, Copy, PartialEq)]
pub enum RuleInclusion {
/// Include rules for style sheets at all cascade levels. This is the
/// normal rule inclusion mode.
All,
/// Only include rules from UA and user level sheets. Used to implement
/// `getDefaultComputedStyle`.
DefaultOnly,
}
#[cfg(feature = "gecko")]
impl From<StyleRuleInclusion> for RuleInclusion {
fn from(value: StyleRuleInclusion) -> Self {
match value {
StyleRuleInclusion::All => RuleInclusion::All,
StyleRuleInclusion::DefaultOnly => RuleInclusion::DefaultOnly,
}
}
}
impl Stylist {
/// Construct a new `Stylist`, using given `Device` and `QuirksMode`.
/// If more members are added here, think about whether they should
/// be reset in clear().
#[inline]
pub fn new(device: Device, quirks_mode: QuirksMode) -> Self {
Self {
viewport_constraints: None,
device,
quirks_mode,
stylesheets: StylistStylesheetSet::new(),
cascade_data: Default::default(),
rule_tree: RuleTree::new(),
num_rebuilds: 0,
}
}
/// Iterate over the extra data in origin order.
#[inline]
pub fn iter_extra_data_origins(&self) -> ExtraStyleDataIterator {
ExtraStyleDataIterator(self.cascade_data.iter_origins())
}
/// Iterate over the extra data in reverse origin order.
#[inline]
pub fn iter_extra_data_origins_rev(&self) -> ExtraStyleDataIterator {
ExtraStyleDataIterator(self.cascade_data.iter_origins_rev())
}
/// Returns the number of selectors.
pub fn num_selectors(&self) -> usize {
self.cascade_data.iter_origins().map(|(d, _)| d.num_selectors).sum()
}
/// Returns the number of declarations.
pub fn num_declarations(&self) -> usize {
self.cascade_data.iter_origins().map(|(d, _)| d.num_declarations).sum()
}
/// Returns the number of times the stylist has been rebuilt.
pub fn num_rebuilds(&self) -> usize {
self.num_rebuilds
}
/// Returns the number of revalidation_selectors.
pub fn num_revalidation_selectors(&self) -> usize {
self.cascade_data.iter_origins()
.map(|(d, _)| d.selectors_for_cache_revalidation.len()).sum()
}
/// Returns the number of entries in invalidation maps.
pub fn num_invalidations(&self) -> usize {
self.cascade_data.iter_origins()
.map(|(d, _)| d.invalidation_map.len()).sum()
}
/// Invokes `f` with the `InvalidationMap` for each origin.
///
/// NOTE(heycam) This might be better as an `iter_invalidation_maps`, once
/// we have `impl trait` and can return that easily without bothering to
/// create a whole new iterator type.
pub fn each_invalidation_map<F>(&self, mut f: F)
where F: FnMut(&InvalidationMap)
{
for (data, _) in self.cascade_data.iter_origins() {
f(&data.invalidation_map)
}
}
/// Flush the list of stylesheets if they changed, ensuring the stylist is
/// up-to-date.
///
/// FIXME(emilio): Move the `ua_sheets` to the Stylist too?
pub fn flush<E>(
&mut self,
guards: &StylesheetGuards,
document_element: Option<E>,
) -> bool
where
E: TElement,
{
if !self.stylesheets.has_changed() {
return false;
}
self.num_rebuilds += 1;
// Update viewport_constraints regardless of which origins'
// `CascadeData` we're updating.
self.viewport_constraints = None;
if viewport_rule::enabled() {
// TODO(emilio): This doesn't look so efficient.
//
// Presumably when we properly implement this we can at least have a
// bit on the stylesheet that says whether it contains viewport
// rules to skip it entirely?
//
// Processing it with the rest of rules seems tricky since it
// overrides the viewport size which may change the evaluation of
// media queries (or may not? how are viewport units in media
// queries defined?)
let cascaded_rule = ViewportRule {
declarations: viewport_rule::Cascade::from_stylesheets(
self.stylesheets.iter(),
guards,
&self.device,
).finish()
};
self.viewport_constraints =
ViewportConstraints::maybe_new(
&self.device,
&cascaded_rule,
self.quirks_mode,
);
if let Some(ref constraints) = self.viewport_constraints {
self.device.account_for_viewport_rule(constraints);
}
}
let flusher = self.stylesheets.flush(document_element);
let had_invalidations = flusher.had_invalidations();
self.cascade_data.rebuild(
&self.device,
self.quirks_mode,
flusher,
guards,
).unwrap_or_else(|_| warn!("OOM in Stylist::flush"));
had_invalidations
}
/// Insert a given stylesheet before another stylesheet in the document.
pub fn insert_stylesheet_before(
&mut self,
sheet: StylistSheet,
before_sheet: StylistSheet,
guard: &SharedRwLockReadGuard,
) {
self.stylesheets.insert_stylesheet_before(
Some(&self.device),
sheet,
before_sheet,
guard,
)
}
/// Marks a given stylesheet origin as dirty, due to, for example, changes
/// in the declarations that affect a given rule.
///
/// FIXME(emilio): Eventually it'd be nice for this to become more
/// fine-grained.
pub fn force_stylesheet_origins_dirty(&mut self, origins: OriginSet) {
self.stylesheets.force_dirty(origins)
}
/// Sets whether author style is enabled or not.
pub fn set_author_style_disabled(&mut self, disabled: bool) {
self.stylesheets.set_author_style_disabled(disabled);
}
/// Returns whether we've recorded any stylesheet change so far.
pub fn stylesheets_have_changed(&self) -> bool {
self.stylesheets.has_changed()
}
/// Appends a new stylesheet to the current set.
pub fn append_stylesheet(&mut self, sheet: StylistSheet, guard: &SharedRwLockReadGuard) {
self.stylesheets.append_stylesheet(Some(&self.device), sheet, guard)
}
/// Appends a new stylesheet to the current set.
pub fn prepend_stylesheet(&mut self, sheet: StylistSheet, guard: &SharedRwLockReadGuard) {
self.stylesheets.prepend_stylesheet(Some(&self.device), sheet, guard)
}
/// Remove a given stylesheet to the current set.
pub fn remove_stylesheet(&mut self, sheet: StylistSheet, guard: &SharedRwLockReadGuard) {
self.stylesheets.remove_stylesheet(Some(&self.device), sheet, guard)
}
/// Returns whether the given attribute might appear in an attribute
/// selector of some rule in the stylist.
pub fn might_have_attribute_dependency(
&self,
local_name: &LocalName,
) -> bool {
if *local_name == local_name!("style") {
self.cascade_data
.iter_origins()
.any(|(d, _)| d.style_attribute_dependency)
} else {
self.cascade_data
.iter_origins()
.any(|(d, _)| {
d.attribute_dependencies
.might_contain_hash(local_name.get_hash())
})
}
}
/// Returns whether the given ElementState bit might be relied upon by a
/// selector of some rule in the stylist.
pub fn might_have_state_dependency(&self, state: ElementState) -> bool {
self.has_state_dependency(state)
}
/// Returns whether the given ElementState bit is relied upon by a selector
/// of some rule in the stylist.
pub fn has_state_dependency(&self, state: ElementState) -> bool {
self.cascade_data
.iter_origins()
.any(|(d, _)| d.state_dependencies.intersects(state))
}
/// Computes the style for a given "precomputed" pseudo-element, taking the
/// universal rules and applying them.
///
/// If `inherit_all` is true, then all properties are inherited from the
/// parent; otherwise, non-inherited properties are reset to their initial
/// values. The flow constructor uses this flag when constructing anonymous
/// flows.
pub fn precomputed_values_for_pseudo(
&self,
guards: &StylesheetGuards,
pseudo: &PseudoElement,
parent: Option<&ComputedValues>,
cascade_flags: CascadeFlags,
font_metrics: &FontMetricsProvider
) -> Arc<ComputedValues> {
debug_assert!(pseudo.is_precomputed());
let rule_node = self.rule_node_for_precomputed_pseudo(
guards,
pseudo,
None,
);
self.precomputed_values_for_pseudo_with_rule_node(
guards,
pseudo,
parent,
cascade_flags,
font_metrics,
&rule_node
)
}
/// Computes the style for a given "precomputed" pseudo-element with
/// given rule node.
pub fn precomputed_values_for_pseudo_with_rule_node(
&self,
guards: &StylesheetGuards,
pseudo: &PseudoElement,
parent: Option<&ComputedValues>,
cascade_flags: CascadeFlags,
font_metrics: &FontMetricsProvider,
rule_node: &StrongRuleNode
) -> Arc<ComputedValues> {
// NOTE(emilio): We skip calculating the proper layout parent style
// here.
//
// It'd be fine to assert that this isn't called with a parent style
// where display contents is in effect, but in practice this is hard to
// do for stuff like :-moz-fieldset-content with a
// <fieldset style="display: contents">. That is, the computed value of
// display for the fieldset is "contents", even though it's not the used
// value, so we don't need to adjust in a different way anyway.
//
// In practice, I don't think any anonymous content can be a direct
// descendant of a display: contents element where display: contents is
// the actual used value, and the computed value of it would need
// blockification.
properties::cascade(
&self.device,
Some(pseudo),
rule_node,
guards,
parent,
parent,
parent,
None,
font_metrics,
cascade_flags,
self.quirks_mode,
/* rule_cache = */ None,
&mut Default::default(),
)
}
/// Returns the rule node for given precomputed pseudo-element.
///
/// If we want to include extra declarations to this precomputed pseudo-element,
/// we can provide a vector of ApplicableDeclarationBlock to extra_declarations
/// argument. This is useful for providing extra @page rules.
pub fn rule_node_for_precomputed_pseudo(
&self,
guards: &StylesheetGuards,
pseudo: &PseudoElement,
extra_declarations: Option<Vec<ApplicableDeclarationBlock>>,
) -> StrongRuleNode {
let mut decl;
let declarations = match self.cascade_data.user_agent.precomputed_pseudo_element_decls.get(pseudo) {
Some(declarations) => {
match extra_declarations {
Some(mut extra_decls) => {
decl = declarations.clone();
decl.append(&mut extra_decls);
Some(&decl)
},
None => Some(declarations),
}
}
None => extra_declarations.as_ref(),
};
match declarations {
Some(decls) => {
self.rule_tree.insert_ordered_rules_with_important(
decls.into_iter().map(|a| (a.source.clone(), a.level())),
guards
)
},
None => self.rule_tree.root().clone(),
}
}
/// Returns the style for an anonymous box of the given type.
#[cfg(feature = "servo")]
pub fn style_for_anonymous(
&self,
guards: &StylesheetGuards,
pseudo: &PseudoElement,
parent_style: &ComputedValues
) -> Arc<ComputedValues> {
use font_metrics::ServoMetricsProvider;
// For most (but not all) pseudo-elements, we inherit all values from the parent.
let inherit_all = match *pseudo {
PseudoElement::ServoText |
PseudoElement::ServoInputText => false,
PseudoElement::ServoAnonymousBlock |
PseudoElement::ServoAnonymousTable |
PseudoElement::ServoAnonymousTableCell |
PseudoElement::ServoAnonymousTableRow |
PseudoElement::ServoAnonymousTableWrapper |
PseudoElement::ServoTableWrapper |
PseudoElement::ServoInlineBlockWrapper |
PseudoElement::ServoInlineAbsolute => true,
PseudoElement::Before |
PseudoElement::After |
PseudoElement::Selection |
PseudoElement::DetailsSummary |
PseudoElement::DetailsContent => {
unreachable!("That pseudo doesn't represent an anonymous box!")
}
};
let mut cascade_flags = CascadeFlags::empty();
if inherit_all {
cascade_flags.insert(INHERIT_ALL);
}
self.precomputed_values_for_pseudo(
guards,
&pseudo,
Some(parent_style),
cascade_flags,
&ServoMetricsProvider
)
}
/// Computes a pseudo-element style lazily during layout.
///
/// This can only be done for a certain set of pseudo-elements, like
/// :selection.
///
/// Check the documentation on lazy pseudo-elements in
/// docs/components/style.md
pub fn lazily_compute_pseudo_element_style<E>(
&self,
guards: &StylesheetGuards,
element: &E,
pseudo: &PseudoElement,
rule_inclusion: RuleInclusion,
parent_style: &ComputedValues,
is_probe: bool,
font_metrics: &FontMetricsProvider
) -> Option<Arc<ComputedValues>>
where
E: TElement,
{
let cascade_inputs =
self.lazy_pseudo_rules(guards, element, pseudo, is_probe, rule_inclusion);
self.compute_pseudo_element_style_with_inputs(
&cascade_inputs,
pseudo,
guards,
parent_style,
font_metrics,
)
}
/// Computes a pseudo-element style lazily using the given CascadeInputs.
/// This can be used for truly lazy pseudo-elements or to avoid redoing
/// selector matching for eager pseudo-elements when we need to recompute
/// their style with a new parent style.
pub fn compute_pseudo_element_style_with_inputs(
&self,
inputs: &CascadeInputs,
pseudo: &PseudoElement,
guards: &StylesheetGuards,
parent_style: &ComputedValues,
font_metrics: &FontMetricsProvider
) -> Option<Arc<ComputedValues>> {
// We may have only visited rules in cases when we are actually
// resolving, not probing, pseudo-element style.
if inputs.rules.is_none() && inputs.visited_rules.is_none() {
return None
}
// FIXME(emilio): The lack of layout_parent_style here could be
// worrying, but we're probably dropping the display fixup for
// pseudos other than before and after, so it's probably ok.
//
// (Though the flags don't indicate so!)
Some(self.compute_style_with_inputs(
inputs,
Some(pseudo),
guards,
parent_style,
parent_style,
parent_style,
font_metrics,
CascadeFlags::empty(),
))
}
/// Computes a style using the given CascadeInputs. This can be used to
/// compute a style any time we know what rules apply and just need to use
/// the given parent styles.
///
/// parent_style is the style to inherit from for properties affected by
/// first-line ancestors.
///
/// parent_style_ignoring_first_line is the style to inherit from for
/// properties not affected by first-line ancestors.
///
/// layout_parent_style is the style used for some property fixups. It's
/// the style of the nearest ancestor with a layout box.
///
/// is_link should be true if we're computing style for a link; that affects
/// how :visited handling is done.
pub fn compute_style_with_inputs(
&self,
inputs: &CascadeInputs,
pseudo: Option<&PseudoElement>,
guards: &StylesheetGuards,
parent_style: &ComputedValues,
parent_style_ignoring_first_line: &ComputedValues,
layout_parent_style: &ComputedValues,
font_metrics: &FontMetricsProvider,
cascade_flags: CascadeFlags
) -> Arc<ComputedValues> {
// We need to compute visited values if we have visited rules or if our
// parent has visited values.
let visited_values = if inputs.visited_rules.is_some() || parent_style.get_visited_style().is_some() {
// Slightly annoying: we know that inputs has either rules or
// visited rules, but we can't do inputs.rules() up front because
// maybe it just has visited rules, so can't unwrap_or.
let rule_node = match inputs.visited_rules.as_ref() {
Some(rules) => rules,
None => inputs.rules.as_ref().unwrap(),
};
let inherited_style;
let inherited_style_ignoring_first_line;
let layout_parent_style_for_visited;
if cascade_flags.contains(IS_LINK) {
// We just want to use our parent style as our parent.
inherited_style = parent_style;
inherited_style_ignoring_first_line = parent_style_ignoring_first_line;
layout_parent_style_for_visited = layout_parent_style;
} else {
// We want to use the visited bits (if any) from our parent
// style as our parent.
inherited_style =
parent_style.get_visited_style().unwrap_or(parent_style);
inherited_style_ignoring_first_line =
parent_style_ignoring_first_line.get_visited_style().unwrap_or(parent_style_ignoring_first_line);
layout_parent_style_for_visited =
layout_parent_style.get_visited_style().unwrap_or(layout_parent_style);
}
Some(properties::cascade(
&self.device,
pseudo,
rule_node,
guards,
Some(inherited_style),
Some(inherited_style_ignoring_first_line),
Some(layout_parent_style_for_visited),
None,
font_metrics,
cascade_flags,
self.quirks_mode,
/* rule_cache = */ None,
&mut Default::default(),
))
} else {
None
};
// We may not have non-visited rules, if we only had visited ones. In
// that case we want to use the root rulenode for our non-visited rules.
let rules = inputs.rules.as_ref().unwrap_or(self.rule_tree.root());
// Read the comment on `precomputed_values_for_pseudo` to see why it's
// difficult to assert that display: contents nodes never arrive here
// (tl;dr: It doesn't apply for replaced elements and such, but the
// computed value is still "contents").
properties::cascade(
&self.device,
pseudo,
rules,
guards,
Some(parent_style),
Some(parent_style_ignoring_first_line),
Some(layout_parent_style),
visited_values,
font_metrics,
cascade_flags,
self.quirks_mode,
/* rule_cache = */ None,
&mut Default::default(),
)
}
fn has_rules_for_pseudo(&self, pseudo: &PseudoElement) -> bool {
self.cascade_data
.iter_origins()
.any(|(d, _)| d.has_rules_for_pseudo(pseudo))
}
/// Computes the cascade inputs for a lazily-cascaded pseudo-element.
///
/// See the documentation on lazy pseudo-elements in
/// docs/components/style.md
pub fn lazy_pseudo_rules<E>(
&self,
guards: &StylesheetGuards,
element: &E,
pseudo: &PseudoElement,
is_probe: bool,
rule_inclusion: RuleInclusion
) -> CascadeInputs
where
E: TElement
{
let pseudo = pseudo.canonical();
debug_assert!(pseudo.is_lazy());
if !self.has_rules_for_pseudo(&pseudo) {
return CascadeInputs::default()
}
// Apply the selector flags. We should be in sequential mode
// already, so we can directly apply the parent flags.
let mut set_selector_flags = |element: &E, flags: ElementSelectorFlags| {
if cfg!(feature = "servo") {
// Servo calls this function from the worker, but only for internal
// pseudos, so we should never generate selector flags here.
unreachable!("internal pseudo generated slow selector flags?");
}
// No need to bother setting the selector flags when we're computing
// default styles.
if rule_inclusion == RuleInclusion::DefaultOnly {
return;
}
// Gecko calls this from sequential mode, so we can directly apply
// the flags.
debug_assert!(thread_state::get() == thread_state::LAYOUT);
let self_flags = flags.for_self();
if !self_flags.is_empty() {
unsafe { element.set_selector_flags(self_flags); }
}
let parent_flags = flags.for_parent();
if !parent_flags.is_empty() {
if let Some(p) = element.parent_element() {
unsafe { p.set_selector_flags(parent_flags); }
}
}
};
let mut inputs = CascadeInputs::default();
let mut declarations = ApplicableDeclarationList::new();
let mut matching_context =
MatchingContext::new(MatchingMode::ForStatelessPseudoElement,
None,
None,
self.quirks_mode);
self.push_applicable_declarations(
element,
Some(&pseudo),
None,
None,
AnimationRules(None, None),
rule_inclusion,
&mut declarations,
&mut matching_context,
&mut set_selector_flags
);
if !declarations.is_empty() {
let rule_node =
self.rule_tree.compute_rule_node(&mut declarations, guards);
debug_assert!(rule_node != *self.rule_tree.root());
inputs.rules = Some(rule_node);
}
if is_probe && inputs.rules.is_none() {
// When probing, don't compute visited styles if we have no
// unvisited styles.
return inputs;
}
if matching_context.relevant_link_found {
let mut declarations = ApplicableDeclarationList::new();
let mut matching_context =
MatchingContext::new_for_visited(
MatchingMode::ForStatelessPseudoElement,
None,
None,
VisitedHandlingMode::RelevantLinkVisited,
self.quirks_mode,
);
self.push_applicable_declarations(
element,
Some(&pseudo),
None,
None,
AnimationRules(None, None),
rule_inclusion,
&mut declarations,
&mut matching_context,
&mut set_selector_flags
);
if !declarations.is_empty() {
let rule_node =
self.rule_tree.insert_ordered_rules_with_important(
declarations.drain().map(|a| a.order_and_level()),
guards);
if rule_node != *self.rule_tree.root() {
inputs.visited_rules = Some(rule_node);
}
}
}
inputs
}
/// Set a given device, which may change the styles that apply to the
/// document.
///
/// Returns the sheet origins that were actually affected.
///
/// This means that we may need to rebuild style data even if the
/// stylesheets haven't changed.
///
/// Also, the device that arrives here may need to take the viewport rules
/// into account.
///
/// For Gecko, this is called when XBL bindings are used by different
/// documents.
pub fn set_device(
&mut self,
mut device: Device,
guards: &StylesheetGuards,
) -> OriginSet {
if viewport_rule::enabled() {
let cascaded_rule = {
let stylesheets = self.stylesheets.iter();
ViewportRule {
declarations: viewport_rule::Cascade::from_stylesheets(
stylesheets.clone(),
guards,
&device
).finish(),
}
};
self.viewport_constraints =
ViewportConstraints::maybe_new(&device, &cascaded_rule, self.quirks_mode);
if let Some(ref constraints) = self.viewport_constraints {
device.account_for_viewport_rule(constraints);
}
}
self.device = device;
self.media_features_change_changed_style(guards)
}
/// Returns whether, given a media feature change, any previously-applicable
/// style has become non-applicable, or vice-versa for each origin.
pub fn media_features_change_changed_style(
&self,
guards: &StylesheetGuards,
) -> OriginSet {
debug!("Stylist::media_features_change_changed_style");
let mut origins = OriginSet::empty();
let stylesheets = self.stylesheets.iter();
for (stylesheet, origin) in stylesheets {
if origins.contains(origin.into()) {
continue;
}
let guard = guards.for_origin(origin);
let origin_cascade_data =
self.cascade_data.borrow_for_origin(origin);
let affected_changed = !origin_cascade_data.media_feature_affected_matches(
stylesheet,
guard,
&self.device,
self.quirks_mode
);
if affected_changed {
origins |= origin;
}
}
origins
}
/// Returns the viewport constraints that apply to this document because of
/// a @viewport rule.
pub fn viewport_constraints(&self) -> Option<&ViewportConstraints> {
self.viewport_constraints.as_ref()
}
/// Returns the Quirks Mode of the document.
pub fn quirks_mode(&self) -> QuirksMode {
self.quirks_mode
}
/// Sets the quirks mode of the document.
pub fn set_quirks_mode(&mut self, quirks_mode: QuirksMode) {
// FIXME(emilio): We don't seem to change the quirks mode dynamically
// during multiple layout passes, but this is totally bogus, in the
// sense that it's updated asynchronously.
//
// This should probably be an argument to `update`, and use the quirks
// mode info in the `SharedLayoutContext`.
self.quirks_mode = quirks_mode;
}
/// Returns the applicable CSS declarations for the given element by
/// treating us as an XBL stylesheet-only stylist.
pub fn push_applicable_declarations_as_xbl_only_stylist<E, V>(
&self,
element: &E,
pseudo_element: Option<&PseudoElement>,
applicable_declarations: &mut V
)
where
E: TElement,
V: Push<ApplicableDeclarationBlock> + VecLike<ApplicableDeclarationBlock>,
{
let mut matching_context =
MatchingContext::new(MatchingMode::Normal, None, None, self.quirks_mode);
let mut dummy_flag_setter = |_: &E, _: ElementSelectorFlags| {};
let rule_hash_target = element.rule_hash_target();
// nsXBLPrototypeResources::LoadResources() loads Chrome XBL style
// sheets under eAuthorSheetFeatures level.
if let Some(map) = self.cascade_data.author.borrow_for_pseudo(pseudo_element) {
map.get_all_matching_rules(
element,
&rule_hash_target,
applicable_declarations,
&mut matching_context,
self.quirks_mode,
&mut dummy_flag_setter,
CascadeLevel::XBL,
);
}
}
/// Returns the applicable CSS declarations for the given element.
///
/// This corresponds to `ElementRuleCollector` in WebKit.
pub fn push_applicable_declarations<E, V, F>(
&self,
element: &E,
pseudo_element: Option<&PseudoElement>,
style_attribute: Option<ArcBorrow<Locked<PropertyDeclarationBlock>>>,
smil_override: Option<ArcBorrow<Locked<PropertyDeclarationBlock>>>,
animation_rules: AnimationRules,
rule_inclusion: RuleInclusion,
applicable_declarations: &mut V,
context: &mut MatchingContext,
flags_setter: &mut F,
)
where
E: TElement,
V: Push<ApplicableDeclarationBlock> + VecLike<ApplicableDeclarationBlock> + Debug,
F: FnMut(&E, ElementSelectorFlags),
{
// Gecko definitely has pseudo-elements with style attributes, like
// ::-moz-color-swatch.
debug_assert!(cfg!(feature = "gecko") ||
style_attribute.is_none() || pseudo_element.is_none(),
"Style attributes do not apply to pseudo-elements");
debug_assert!(pseudo_element.map_or(true, |p| !p.is_precomputed()));
let rule_hash_target = element.rule_hash_target();
debug!("Determining if style is shareable: pseudo: {}",
pseudo_element.is_some());
let only_default_rules = rule_inclusion == RuleInclusion::DefaultOnly;
// Step 1: Normal user-agent rules.
if let Some(map) = self.cascade_data.user_agent.cascade_data.borrow_for_pseudo(pseudo_element) {
map.get_all_matching_rules(
element,
&rule_hash_target,
applicable_declarations,
context,
self.quirks_mode,
flags_setter,
CascadeLevel::UANormal
);
}
if pseudo_element.is_none() && !only_default_rules {
// Step 2: Presentational hints.
let length_before_preshints = applicable_declarations.len();
element.synthesize_presentational_hints_for_legacy_attributes(
context.visited_handling,
applicable_declarations
);
if applicable_declarations.len() != length_before_preshints {
if cfg!(debug_assertions) {
for declaration in &applicable_declarations[length_before_preshints..] {
assert_eq!(declaration.level(), CascadeLevel::PresHints);
}
}
}
}
// NB: the following condition, although it may look somewhat
// inaccurate, would be equivalent to something like:
//
// element.matches_user_and_author_rules() ||
// (is_implemented_pseudo &&
// rule_hash_target.matches_user_and_author_rules())
//
// Which may be more what you would probably expect.
if rule_hash_target.matches_user_and_author_rules() {
// Step 3a: User normal rules.
if let Some(map) = self.cascade_data.user.borrow_for_pseudo(pseudo_element) {
map.get_all_matching_rules(
element,
&rule_hash_target,
applicable_declarations,
context,
self.quirks_mode,
flags_setter,
CascadeLevel::UserNormal,
);
}
} else {
debug!("skipping user rules");
}
// Step 3b: XBL rules.
let cut_off_inheritance =
element.get_declarations_from_xbl_bindings(
pseudo_element,
applicable_declarations,
);
if rule_hash_target.matches_user_and_author_rules() && !only_default_rules {
// Gecko skips author normal rules if cutting off inheritance.
// See nsStyleSet::FileRules().
if !cut_off_inheritance {
// Step 3c: Author normal rules.
if let Some(map) = self.cascade_data.author.borrow_for_pseudo(pseudo_element) {
map.get_all_matching_rules(
element,
&rule_hash_target,
applicable_declarations,
context,
self.quirks_mode,
flags_setter,
CascadeLevel::AuthorNormal
);
}
} else {
debug!("skipping author normal rules due to cut off inheritance");
}
} else {
debug!("skipping author normal rules");
}
if !only_default_rules {
// Step 4: Normal style attributes.
if let Some(sa) = style_attribute {
Push::push(
applicable_declarations,
ApplicableDeclarationBlock::from_declarations(
sa.clone_arc(),
CascadeLevel::StyleAttributeNormal
)
);
}
// Step 5: SMIL override.
// Declarations from SVG SMIL animation elements.
if let Some(so) = smil_override {
Push::push(
applicable_declarations,
ApplicableDeclarationBlock::from_declarations(
so.clone_arc(),
CascadeLevel::SMILOverride
)
);
}
// Step 6: Animations.
// The animations sheet (CSS animations, script-generated animations,
// and CSS transitions that are no longer tied to CSS markup)
if let Some(anim) = animation_rules.0 {
Push::push(
applicable_declarations,
ApplicableDeclarationBlock::from_declarations(
anim.clone(),
CascadeLevel::Animations
)
);
}
} else {
debug!("skipping style attr and SMIL & animation rules");
}
//
// Steps 7-10 correspond to !important rules, and are handled during
// rule tree insertion.
//
if !only_default_rules {
// Step 11: Transitions.
// The transitions sheet (CSS transitions that are tied to CSS markup)
if let Some(anim) = animation_rules.1 {
Push::push(
applicable_declarations,
ApplicableDeclarationBlock::from_declarations(
anim.clone(),
CascadeLevel::Transitions
)
);
}
} else {
debug!("skipping transition rules");
}
}
/// Given an id, returns whether there might be any rules for that id in any
/// of our rule maps.
#[inline]
pub fn may_have_rules_for_id(&self, id: &Atom) -> bool {
self.cascade_data
.iter_origins()
.any(|(d, _)| d.mapped_ids.might_contain_hash(id.get_hash()))
}
/// Returns the registered `@keyframes` animation for the specified name.
#[inline]
pub fn get_animation(&self, name: &Atom) -> Option<&KeyframesAnimation> {
self.cascade_data
.iter_origins()
.filter_map(|(d, _)| d.animations.get(name))
.next()
}
/// Computes the match results of a given element against the set of
/// revalidation selectors.
pub fn match_revalidation_selectors<E, F>(
&self,
element: &E,
bloom: Option<&BloomFilter>,
nth_index_cache: &mut NthIndexCache,
flags_setter: &mut F
) -> SmallBitVec
where
E: TElement,
F: FnMut(&E, ElementSelectorFlags),
{
// NB: `MatchingMode` doesn't really matter, given we don't share style
// between pseudos.
let mut matching_context = MatchingContext::new(
MatchingMode::Normal,
bloom,
Some(nth_index_cache),
self.quirks_mode
);
// Note that, by the time we're revalidating, we're guaranteed that the
// candidate and the entry have the same id, classes, and local name.
// This means we're guaranteed to get the same rulehash buckets for all
// the lookups, which means that the bitvecs are comparable. We verify
// this in the caller by asserting that the bitvecs are same-length.
let mut results = SmallBitVec::new();
for (data, _) in self.cascade_data.iter_origins() {
data.selectors_for_cache_revalidation.lookup(
*element,
self.quirks_mode,
&mut |selector_and_hashes| {
results.push(matches_selector(
&selector_and_hashes.selector,
selector_and_hashes.selector_offset,
Some(&selector_and_hashes.hashes),
element,
&mut matching_context,
flags_setter
));
true
}
);
}
results
}
/// Computes styles for a given declaration with parent_style.
pub fn compute_for_declarations(
&self,
guards: &StylesheetGuards,
parent_style: &ComputedValues,
declarations: Arc<Locked<PropertyDeclarationBlock>>,
) -> Arc<ComputedValues> {
use font_metrics::get_metrics_provider_for_product;
let v = vec![ApplicableDeclarationBlock::from_declarations(
declarations.clone(),
CascadeLevel::StyleAttributeNormal
)];
let rule_node =
self.rule_tree.insert_ordered_rules(v.into_iter().map(|a| a.order_and_level()));
// This currently ignores visited styles. It appears to be used for
// font styles in <canvas> via Servo_StyleSet_ResolveForDeclarations.
// It is unclear if visited styles are meaningful for this case.
let metrics = get_metrics_provider_for_product();
// FIXME(emilio): the pseudo bit looks quite dubious!
properties::cascade(
&self.device,
/* pseudo = */ None,
&rule_node,
guards,
Some(parent_style),
Some(parent_style),
Some(parent_style),
None,
&metrics,
CascadeFlags::empty(),
self.quirks_mode,
/* rule_cache = */ None,
&mut Default::default(),
)
}
/// Accessor for a shared reference to the device.
pub fn device(&self) -> &Device {
&self.device
}
/// Accessor for a mutable reference to the device.
pub fn device_mut(&mut self) -> &mut Device {
&mut self.device
}
/// Accessor for a shared reference to the rule tree.
pub fn rule_tree(&self) -> &RuleTree {
&self.rule_tree
}
/// Measures heap usage.
#[cfg(feature = "gecko")]
pub fn add_size_of_children(&self, ops: &mut MallocSizeOfOps, sizes: &mut ServoStyleSetSizes) {
self.cascade_data.add_size_of_children(ops, sizes);
sizes.mRuleTree += self.rule_tree.size_of(ops);
// We may measure other fields in the future if DMD says it's worth it.
}
/// Shutdown the static data that this module stores.
pub fn shutdown() {
UA_CASCADE_DATA_CACHE.lock().unwrap().clear()
}
}
/// This struct holds data which users of Stylist may want to extract
/// from stylesheets which can be done at the same time as updating.
#[derive(Debug, Default)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct ExtraStyleData {
/// A list of effective font-face rules and their origin.
#[cfg(feature = "gecko")]
pub font_faces: Vec<Arc<Locked<FontFaceRule>>>,
/// A list of effective font-feature-values rules.
#[cfg(feature = "gecko")]
pub font_feature_values: Vec<Arc<Locked<FontFeatureValuesRule>>>,
/// A map of effective counter-style rules.
#[cfg(feature = "gecko")]
pub counter_styles: PrecomputedHashMap<Atom, Arc<Locked<CounterStyleRule>>>,
/// A map of effective page rules.
#[cfg(feature = "gecko")]
pub pages: Vec<Arc<Locked<PageRule>>>,
}
// FIXME(emilio): This is kind of a lie, and relies on us not cloning
// nsCSSFontFaceRules or nsCSSCounterStyleRules OMT (which we don't).
#[cfg(feature = "gecko")]
unsafe impl Sync for ExtraStyleData {}
#[cfg(feature = "gecko")]
unsafe impl Send for ExtraStyleData {}
#[cfg(feature = "gecko")]
impl ExtraStyleData {
/// Add the given @font-face rule.
fn add_font_face(&mut self, rule: &Arc<Locked<FontFaceRule>>) {
self.font_faces.push(rule.clone());
}
/// Add the given @font-feature-values rule.
fn add_font_feature_values(&mut self, rule: &Arc<Locked<FontFeatureValuesRule>>) {
self.font_feature_values.push(rule.clone());
}
/// Add the given @counter-style rule.
fn add_counter_style(
&mut self,
guard: &SharedRwLockReadGuard,
rule: &Arc<Locked<CounterStyleRule>>,
) {
let name = rule.read_with(guard).mName.raw::<nsIAtom>().into();
self.counter_styles.insert(name, rule.clone());
}
/// Add the given @page rule.
fn add_page(&mut self, rule: &Arc<Locked<PageRule>>) {
self.pages.push(rule.clone());
}
}
impl ExtraStyleData {
fn clear(&mut self) {
#[cfg(feature = "gecko")]
{
self.font_faces.clear();
self.font_feature_values.clear();
self.counter_styles.clear();
self.pages.clear();
}
}
}
/// An iterator over the different ExtraStyleData.
pub struct ExtraStyleDataIterator<'a>(DocumentCascadeDataIter<'a>);
impl<'a> Iterator for ExtraStyleDataIterator<'a> {
type Item = (&'a ExtraStyleData, Origin);
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|d| (&d.0.extra_data, d.1))
}
}
#[cfg(feature = "gecko")]
impl MallocSizeOf for ExtraStyleData {
/// Measure heap usage.
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
let mut n = 0;
n += self.font_faces.shallow_size_of(ops);
n += self.font_feature_values.shallow_size_of(ops);
n += self.counter_styles.shallow_size_of(ops);
n += self.pages.shallow_size_of(ops);
n
}
}
/// SelectorMapEntry implementation for use in our revalidation selector map.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug)]
struct RevalidationSelectorAndHashes {
#[cfg_attr(feature = "gecko",
ignore_malloc_size_of = "CssRules have primary refs, we measure there")]
selector: Selector<SelectorImpl>,
selector_offset: usize,
hashes: AncestorHashes,
}
impl RevalidationSelectorAndHashes {
fn new(selector: Selector<SelectorImpl>, hashes: AncestorHashes) -> Self {
let selector_offset = {
// We basically want to check whether the first combinator is a
// pseudo-element combinator. If it is, we want to use the offset
// one past it. Otherwise, our offset is 0.
let mut index = 0;
let mut iter = selector.iter();
// First skip over the first ComplexSelector.
//
// We can't check what sort of what combinator we have until we do
// that.
for _ in &mut iter {
index += 1; // Simple selector
}
match iter.next_sequence() {
Some(Combinator::PseudoElement) => index + 1, // +1 for the combinator
_ => 0
}
};
RevalidationSelectorAndHashes { selector, selector_offset, hashes, }
}
}
impl SelectorMapEntry for RevalidationSelectorAndHashes {
fn selector(&self) -> SelectorIter<SelectorImpl> {
self.selector.iter_from(self.selector_offset)
}
}
/// A selector visitor implementation that collects all the state the Stylist
/// cares about a selector.
struct StylistSelectorVisitor<'a> {
/// Whether the selector needs revalidation for the style sharing cache.
needs_revalidation: bool,
/// Whether we've past the rightmost compound selector, not counting
/// pseudo-elements.
passed_rightmost_selector: bool,
/// The filter with all the id's getting referenced from rightmost
/// selectors.
mapped_ids: &'a mut NonCountingBloomFilter,
/// The filter with the local names of attributes there are selectors for.
attribute_dependencies: &'a mut NonCountingBloomFilter,
/// Whether there's any attribute selector for the [style] attribute.
style_attribute_dependency: &'a mut bool,
/// All the states selectors in the page reference.
state_dependencies: &'a mut ElementState,
}
fn component_needs_revalidation(
c: &Component<SelectorImpl>,
passed_rightmost_selector: bool,
) -> bool {
match *c {
Component::ID(_) => {
// TODO(emilio): This could also check that the ID is not already in
// the rule hash. In that case, we could avoid making this a
// revalidation selector too.
//
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1369611
passed_rightmost_selector
}
Component::AttributeInNoNamespaceExists { .. } |
Component::AttributeInNoNamespace { .. } |
Component::AttributeOther(_) |
Component::Empty |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType => {
true
},
Component::NonTSPseudoClass(ref p) => {
p.needs_cache_revalidation()
},
_ => {
false
}
}
}
impl<'a> SelectorVisitor for StylistSelectorVisitor<'a> {
type Impl = SelectorImpl;
fn visit_complex_selector(
&mut self,
combinator: Option<Combinator>
) -> bool {
self.needs_revalidation =
self.needs_revalidation || combinator.map_or(false, |c| c.is_sibling());
// NOTE(emilio): This works properly right now because we can't store
// complex selectors in nested selectors, otherwise we may need to
// rethink this.
//
// Also, note that this call happens before we visit any of the simple
// selectors in the next ComplexSelector, so we can use this to skip
// looking at them.
self.passed_rightmost_selector =
self.passed_rightmost_selector ||
!matches!(combinator, None | Some(Combinator::PseudoElement));
true
}
fn visit_attribute_selector(
&mut self,
_ns: &NamespaceConstraint<&Namespace>,
name: &LocalName,
lower_name: &LocalName
) -> bool {
if *lower_name == local_name!("style") {
*self.style_attribute_dependency = true;
} else {
self.attribute_dependencies.insert_hash(name.get_hash());
self.attribute_dependencies.insert_hash(lower_name.get_hash());
}
true
}
fn visit_simple_selector(&mut self, s: &Component<SelectorImpl>) -> bool {
self.needs_revalidation =
self.needs_revalidation ||
component_needs_revalidation(s, self.passed_rightmost_selector);
match *s {
Component::NonTSPseudoClass(ref p) => {
self.state_dependencies.insert(p.state_flag());
}
Component::ID(ref id) if !self.passed_rightmost_selector => {
// We want to stop storing mapped ids as soon as we've moved off
// the rightmost ComplexSelector that is not a pseudo-element.
//
// That can be detected by a visit_complex_selector call with a
// combinator other than None and PseudoElement.
//
// Importantly, this call happens before we visit any of the
// simple selectors in that ComplexSelector.
//
// NOTE(emilio): See the comment regarding on when this may
// break in visit_complex_selector.
self.mapped_ids.insert_hash(id.get_hash());
}
_ => {},
}
true
}
}
/// Data resulting from performing the CSS cascade that is specific to a given
/// origin.
///
/// FIXME(emilio): Consider renaming and splitting in `CascadeData` and
/// `InvalidationData`? That'd make `clear_cascade_data()` clearer.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Debug)]
struct CascadeData {
/// Rules from stylesheets at this `CascadeData`'s origin.
element_map: SelectorMap<Rule>,
/// Rules from stylesheets at this `CascadeData`'s origin that correspond
/// to a given pseudo-element.
///
/// FIXME(emilio): There are a bunch of wasted entries here in practice.
/// Figure out a good way to do a `PerNonAnonBox` and `PerAnonBox` (for
/// `precomputed_values_for_pseudo`) without duplicating a lot of code.
pseudos_map: PerPseudoElementMap<Box<SelectorMap<Rule>>>,
/// A map with all the animations at this `CascadeData`'s origin, indexed
/// by name.
animations: PrecomputedHashMap<Atom, KeyframesAnimation>,
/// The invalidation map for the rules at this origin.
invalidation_map: InvalidationMap,
/// The attribute local names that appear in attribute selectors. Used
/// to avoid taking element snapshots when an irrelevant attribute changes.
/// (We don't bother storing the namespace, since namespaced attributes
/// are rare.)
#[cfg_attr(feature = "servo", ignore_heap_size_of = "just an array")]
attribute_dependencies: NonCountingBloomFilter,
/// Whether `"style"` appears in an attribute selector. This is not common,
/// and by tracking this explicitly, we can avoid taking an element snapshot
/// in the common case of style=""` changing due to modifying
/// `element.style`. (We could track this in `attribute_dependencies`, like
/// all other attributes, but we should probably not risk incorrectly
/// returning `true` for `"style"` just due to a hash collision.)
style_attribute_dependency: bool,
/// The element state bits that are relied on by selectors. Like
/// `attribute_dependencies`, this is used to avoid taking element snapshots
/// when an irrelevant element state bit changes.
state_dependencies: ElementState,
/// The ids that appear in the rightmost complex selector of selectors (and
/// hence in our selector maps). Used to determine when sharing styles is
/// safe: we disallow style sharing for elements whose id matches this
/// filter, and hence might be in one of our selector maps.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "just an array")]
mapped_ids: NonCountingBloomFilter,
/// Selectors that require explicit cache revalidation (i.e. which depend
/// on state that is not otherwise visible to the cache, like attributes or
/// tree-structural state like child index and pseudos).
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
selectors_for_cache_revalidation: SelectorMap<RevalidationSelectorAndHashes>,
/// Effective media query results cached from the last rebuild.
effective_media_query_results: EffectiveMediaQueryResults,
/// Extra data, like different kinds of rules, etc.
extra_data: ExtraStyleData,
/// A monotonically increasing counter to represent the order on which a
/// style rule appears in a stylesheet, needed to sort them by source order.
rules_source_order: u32,
/// The total number of selectors.
num_selectors: usize,
/// The total number of declarations.
num_declarations: usize,
}
impl CascadeData {
fn new() -> Self {
Self {
element_map: SelectorMap::new(),
pseudos_map: PerPseudoElementMap::default(),
animations: Default::default(),
extra_data: ExtraStyleData::default(),
invalidation_map: InvalidationMap::new(),
attribute_dependencies: NonCountingBloomFilter::new(),
style_attribute_dependency: false,
state_dependencies: ElementState::empty(),
mapped_ids: NonCountingBloomFilter::new(),
selectors_for_cache_revalidation: SelectorMap::new(),
effective_media_query_results: EffectiveMediaQueryResults::new(),
rules_source_order: 0,
num_selectors: 0,
num_declarations: 0,
}
}
/// Collects all the applicable media query results into `results`.
///
/// This duplicates part of the logic in `add_stylesheet`, which is
/// a bit unfortunate.
///
/// FIXME(emilio): With a bit of smartness in
/// `media_feature_affected_matches`, we could convert
/// `EffectiveMediaQueryResults` into a vector without too much effort.
fn collect_applicable_media_query_results_into<S>(
device: &Device,
stylesheet: &S,
guard: &SharedRwLockReadGuard,
results: &mut EffectiveMediaQueryResults,
)
where
S: StylesheetInDocument + ToMediaListKey + 'static,
{
if !stylesheet.enabled() ||
!stylesheet.is_effective_for_device(device, guard) {
return;
}
results.saw_effective(stylesheet);
for rule in stylesheet.effective_rules(device, guard) {
match *rule {
CssRule::Import(ref lock) => {
let import_rule = lock.read_with(guard);
results.saw_effective(import_rule);
}
CssRule::Media(ref lock) => {
let media_rule = lock.read_with(guard);
results.saw_effective(media_rule);
}
_ => {},
}
}
}
// Returns Err(..) to signify OOM
fn add_stylesheet<S>(
&mut self,
device: &Device,
quirks_mode: QuirksMode,
stylesheet: &S,
guard: &SharedRwLockReadGuard,
rebuild_kind: SheetRebuildKind,
mut precomputed_pseudo_element_decls: Option<&mut PrecomputedPseudoElementDeclarations>,
) -> Result<(), FailedAllocationError>
where
S: StylesheetInDocument + ToMediaListKey + 'static,
{
if !stylesheet.enabled() ||
!stylesheet.is_effective_for_device(device, guard) {
return Ok(());
}
let origin = stylesheet.origin(guard);
if rebuild_kind.should_rebuild_invalidation() {
self.effective_media_query_results.saw_effective(stylesheet);
}
for rule in stylesheet.effective_rules(device, guard) {
match *rule {
CssRule::Style(ref locked) => {
let style_rule = locked.read_with(&guard);
self.num_declarations +=
style_rule.block.read_with(&guard).len();
for selector in &style_rule.selectors.0 {
self.num_selectors += 1;
let map = match selector.pseudo_element() {
Some(pseudo) if pseudo.is_precomputed() => {
if !selector.is_universal() ||
!matches!(origin, Origin::UserAgent) {
// ::-moz-tree selectors may appear in
// non-UA sheets (even though they never
// match).
continue;
}
precomputed_pseudo_element_decls
.as_mut()
.expect("Expected precomputed declarations for the UA level")
.get_or_insert_with(&pseudo.canonical(), Vec::new)
.expect("Unexpected tree pseudo-element?")
.push(ApplicableDeclarationBlock::new(
StyleSource::Style(locked.clone()),
self.rules_source_order,
CascadeLevel::UANormal,
selector.specificity()
));
continue;
}
None => &mut self.element_map,
Some(pseudo) => {
self.pseudos_map
.get_or_insert_with(&pseudo.canonical(), || Box::new(SelectorMap::new()))
.expect("Unexpected tree pseudo-element?")
}
};
let hashes =
AncestorHashes::new(&selector, quirks_mode);
let rule = Rule::new(
selector.clone(),
hashes.clone(),
locked.clone(),
self.rules_source_order
);
map.insert(rule, quirks_mode)?;
if rebuild_kind.should_rebuild_invalidation() {
self.invalidation_map
.note_selector(selector, quirks_mode)?;
let mut visitor = StylistSelectorVisitor {
needs_revalidation: false,
passed_rightmost_selector: false,
attribute_dependencies: &mut self.attribute_dependencies,
style_attribute_dependency: &mut self.style_attribute_dependency,
state_dependencies: &mut self.state_dependencies,
mapped_ids: &mut self.mapped_ids,
};
selector.visit(&mut visitor);
if visitor.needs_revalidation {
self.selectors_for_cache_revalidation.insert(
RevalidationSelectorAndHashes::new(selector.clone(), hashes),
quirks_mode
)?;
}
}
}
self.rules_source_order += 1;
}
CssRule::Import(ref lock) => {
if rebuild_kind.should_rebuild_invalidation() {
let import_rule = lock.read_with(guard);
self.effective_media_query_results
.saw_effective(import_rule);
}
// NOTE: effective_rules visits the inner stylesheet if
// appropriate.
}
CssRule::Media(ref lock) => {
if rebuild_kind.should_rebuild_invalidation() {
let media_rule = lock.read_with(guard);
self.effective_media_query_results
.saw_effective(media_rule);
}
}
CssRule::Keyframes(ref keyframes_rule) => {
let keyframes_rule = keyframes_rule.read_with(guard);
debug!("Found valid keyframes rule: {:?}", *keyframes_rule);
// Don't let a prefixed keyframes animation override a non-prefixed one.
let needs_insertion =
keyframes_rule.vendor_prefix.is_none() ||
self.animations.get(keyframes_rule.name.as_atom())
.map_or(true, |rule| rule.vendor_prefix.is_some());
if needs_insertion {
let animation = KeyframesAnimation::from_keyframes(
&keyframes_rule.keyframes, keyframes_rule.vendor_prefix.clone(), guard);
debug!("Found valid keyframe animation: {:?}", animation);
self.animations
.try_insert(keyframes_rule.name.as_atom().clone(), animation)?;
}
}
#[cfg(feature = "gecko")]
CssRule::FontFace(ref rule) => {
self.extra_data.add_font_face(rule);
}
#[cfg(feature = "gecko")]
CssRule::FontFeatureValues(ref rule) => {
self.extra_data.add_font_feature_values(rule);
}
#[cfg(feature = "gecko")]
CssRule::CounterStyle(ref rule) => {
self.extra_data.add_counter_style(guard, rule);
}
#[cfg(feature = "gecko")]
CssRule::Page(ref rule) => {
self.extra_data.add_page(rule);
}
// We don't care about any other rule.
_ => {}
}
}
Ok(())
}
/// Returns whether all the media-feature affected values matched before and
/// match now in the given stylesheet.
fn media_feature_affected_matches<S>(
&self,
stylesheet: &S,
guard: &SharedRwLockReadGuard,
device: &Device,
quirks_mode: QuirksMode,
) -> bool
where
S: StylesheetInDocument + ToMediaListKey + 'static,
{
use invalidation::media_queries::PotentiallyEffectiveMediaRules;
let effective_now =
stylesheet.is_effective_for_device(device, guard);
let effective_then =
self.effective_media_query_results.was_effective(stylesheet);
if effective_now != effective_then {
debug!(" > Stylesheet changed -> {}, {}",
effective_then, effective_now);
return false;
}
if !effective_now {
return true;
}
let mut iter =
stylesheet.iter_rules::<PotentiallyEffectiveMediaRules>(device, guard);
while let Some(rule) = iter.next() {
match *rule {
CssRule::Style(..) |
CssRule::Namespace(..) |
CssRule::FontFace(..) |
CssRule::CounterStyle(..) |
CssRule::Supports(..) |
CssRule::Keyframes(..) |
CssRule::Page(..) |
CssRule::Viewport(..) |
CssRule::Document(..) |
CssRule::FontFeatureValues(..) => {
// Not affected by device changes.
continue;
}
CssRule::Import(ref lock) => {
let import_rule = lock.read_with(guard);
let effective_now =
import_rule.stylesheet
.is_effective_for_device(&device, guard);
let effective_then =
self.effective_media_query_results.was_effective(import_rule);
if effective_now != effective_then {
debug!(" > @import rule changed {} -> {}",
effective_then, effective_now);
return false;
}
if !effective_now {
iter.skip_children();
}
}
CssRule::Media(ref lock) => {
let media_rule = lock.read_with(guard);
let mq = media_rule.media_queries.read_with(guard);
let effective_now = mq.evaluate(device, quirks_mode);
let effective_then =
self.effective_media_query_results.was_effective(media_rule);
if effective_now != effective_then {
debug!(" > @media rule changed {} -> {}",
effective_then, effective_now);
return false;
}
if !effective_now {
iter.skip_children();
}
}
}
}<|fim▁hole|> #[inline]
fn borrow_for_pseudo(&self, pseudo: Option<&PseudoElement>) -> Option<&SelectorMap<Rule>> {
match pseudo {
Some(pseudo) => self.pseudos_map.get(&pseudo.canonical()).map(|p| &**p),
None => Some(&self.element_map),
}
}
fn has_rules_for_pseudo(&self, pseudo: &PseudoElement) -> bool {
self.pseudos_map.get(pseudo).is_some()
}
/// Clears the cascade data, but not the invalidation data.
fn clear_cascade_data(&mut self) {
self.element_map.clear();
self.pseudos_map.clear();
self.animations.clear();
self.extra_data.clear();
self.rules_source_order = 0;
self.num_selectors = 0;
self.num_declarations = 0;
}
fn clear(&mut self) {
self.clear_cascade_data();
self.effective_media_query_results.clear();
self.invalidation_map.clear();
self.attribute_dependencies.clear();
self.style_attribute_dependency = false;
self.state_dependencies = ElementState::empty();
self.mapped_ids.clear();
self.selectors_for_cache_revalidation.clear();
}
/// Measures heap usage.
#[cfg(feature = "gecko")]
pub fn add_size_of_children(&self, ops: &mut MallocSizeOfOps, sizes: &mut ServoStyleSetSizes) {
sizes.mElementAndPseudosMaps += self.element_map.size_of(ops);
for elem in self.pseudos_map.iter() {
if let Some(ref elem) = *elem {
sizes.mElementAndPseudosMaps += <Box<_> as MallocSizeOf>::size_of(elem, ops);
}
}
sizes.mOther += self.animations.size_of(ops);
sizes.mInvalidationMap += self.invalidation_map.size_of(ops);
sizes.mRevalidationSelectors += self.selectors_for_cache_revalidation.size_of(ops);
sizes.mOther += self.effective_media_query_results.size_of(ops);
sizes.mOther += self.extra_data.size_of(ops);
}
}
impl Default for CascadeData {
fn default() -> Self {
CascadeData::new()
}
}
/// A rule, that wraps a style rule, but represents a single selector of the
/// rule.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug)]
pub struct Rule {
/// The selector this struct represents. We store this and the
/// any_{important,normal} booleans inline in the Rule to avoid
/// pointer-chasing when gathering applicable declarations, which
/// can ruin performance when there are a lot of rules.
#[cfg_attr(feature = "gecko",
ignore_malloc_size_of = "CssRules have primary refs, we measure there")]
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub selector: Selector<SelectorImpl>,
/// The ancestor hashes associated with the selector.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "No heap data")]
pub hashes: AncestorHashes,
/// The source order this style rule appears in. Note that we only use
/// three bytes to store this value in ApplicableDeclarationsBlock, so
/// we could repurpose that storage here if we needed to.
pub source_order: u32,
/// The actual style rule.
#[cfg_attr(feature = "gecko",
ignore_malloc_size_of =
"Secondary ref. Primary ref is in StyleRule under Stylesheet.")]
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub style_rule: Arc<Locked<StyleRule>>,
}
impl SelectorMapEntry for Rule {
fn selector(&self) -> SelectorIter<SelectorImpl> {
self.selector.iter()
}
}
impl Rule {
/// Returns the specificity of the rule.
pub fn specificity(&self) -> u32 {
self.selector.specificity()
}
/// Turns this rule into an `ApplicableDeclarationBlock` for the given
/// cascade level.
pub fn to_applicable_declaration_block(
&self,
level: CascadeLevel
) -> ApplicableDeclarationBlock {
let source = StyleSource::Style(self.style_rule.clone());
ApplicableDeclarationBlock::new(
source,
self.source_order,
level,
self.specificity()
)
}
/// Creates a new Rule.
pub fn new(selector: Selector<SelectorImpl>,
hashes: AncestorHashes,
style_rule: Arc<Locked<StyleRule>>,
source_order: u32)
-> Self
{
Rule {
selector: selector,
hashes: hashes,
style_rule: style_rule,
source_order: source_order,
}
}
}
/// A function to be able to test the revalidation stuff.
pub fn needs_revalidation_for_testing(s: &Selector<SelectorImpl>) -> bool {
let mut attribute_dependencies = NonCountingBloomFilter::new();
let mut mapped_ids = NonCountingBloomFilter::new();
let mut style_attribute_dependency = false;
let mut state_dependencies = ElementState::empty();
let mut visitor = StylistSelectorVisitor {
needs_revalidation: false,
passed_rightmost_selector: false,
attribute_dependencies: &mut attribute_dependencies,
style_attribute_dependency: &mut style_attribute_dependency,
state_dependencies: &mut state_dependencies,
mapped_ids: &mut mapped_ids,
};
s.visit(&mut visitor);
visitor.needs_revalidation
}<|fim▁end|> |
true
}
|
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>use std::env;
use std::path::PathBuf;
fn main() {
let target = env::var("TARGET").unwrap();
if target.contains("pc-windows") {
let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let mut lib_dir = manifest_dir.clone();
let mut dll_dir = manifest_dir.clone();
if target.contains("msvc") {
lib_dir.push("msvc");
dll_dir.push("msvc");
}
else {
lib_dir.push("gnu-mingw");<|fim▁hole|> if target.contains("x86_64") {
lib_dir.push("64");
dll_dir.push("64");
}
else {
lib_dir.push("32");
dll_dir.push("32");
}
println!("cargo:rustc-link-search=all={}", lib_dir.display());
for entry in std::fs::read_dir(dll_dir).expect("Can't read DLL dir") {
let entry_path = entry.expect("Invalid fs entry").path();
let file_name_result = entry_path.file_name();
let mut new_file_path = manifest_dir.clone();
if let Some(file_name) = file_name_result {
let file_name = file_name.to_str().unwrap();
if file_name.ends_with(".dll") {
new_file_path.push(file_name);
std::fs::copy(&entry_path, new_file_path.as_path()).expect("Can't copy from DLL dir");
}
}
}
}
}<|fim▁end|> | dll_dir.push("gnu-mingw");
}
lib_dir.push("lib");
dll_dir.push("dll"); |
<|file_name|>DropdownAction.ts<|end_file_name|><|fim▁begin|>import ActionInterface from "@enhavo/app/action/ActionInterface";
import AbstractAction from "@enhavo/app/action/model/AbstractAction";<|fim▁hole|>{
items: ActionInterface[];
closeAfter: boolean;
execute(): void
{
}
}<|fim▁end|> |
export default class DropdownAction extends AbstractAction |
<|file_name|>test_types.py<|end_file_name|><|fim▁begin|># Copyright 2011 OpenStack Foundation
# aLL Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
from oslo_utils import timeutils
import six
import webob
from cinder.api.v2 import types
from cinder.api.views import types as views_types
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.volume import volume_types
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='vol_type_%s' % six.text_type(id),
description='vol_type_desc_%s' % six.text_type(id),
extra_specs=specs,
)
def return_volume_types_get_all_types(context, search_opts=None):
return dict(
vol_type_1=stub_volume_type(1),
vol_type_2=stub_volume_type(2),
vol_type_3=stub_volume_type(3)
)
def return_empty_volume_types_get_all_types(context, search_opts=None):
return {}
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(id)
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
def return_volume_types_get_default():
return stub_volume_type(1)
def return_volume_types_get_default_not_found():
return {}
class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
self.controller = types.VolumeTypesController()
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['volume_types']))
expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
actual_names = map(lambda e: e['name'], res_dict['volume_types'])
self.assertEqual(set(actual_names), set(expected_names))
for entry in res_dict['volume_types']:
self.assertEqual('value1', entry['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['volume_types']))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
type_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v2/fake/types/' + type_id)
res_dict = self.controller.show(req, type_id)
self.assertEqual(1, len(res_dict))
self.assertEqual(type_id, res_dict['volume_type']['id'])
type_name = 'vol_type_' + type_id
self.assertEqual(type_name, res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_get_default(self):
self.stubs.Set(volume_types, 'get_default_volume_type',
return_volume_types_get_default)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
req.method = 'GET'
res_dict = self.controller.show(req, 'default')
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
self.assertEqual('vol_type_desc_1',
res_dict['volume_type']['description'])
def test_get_default_not_found(self):
self.stubs.Set(volume_types, 'get_default_volume_type',
return_volume_types_get_default_not_found)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
req.method = 'GET'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'default')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,<|fim▁hole|>
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
extra_specs={},
id=42,
)
self.assertDictMatch(output['volume_type'], expected_volume_type)
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_types = []
for i in range(0, 10):
raw_volume_types.append(
dict(
name='new_type',
description='new_type_desc',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_volume_types)
self.assertIn('volume_types', output)
for i in range(0, 10):
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
extra_specs={},
id=42 + i
)
self.assertDictMatch(output['volume_types'][i],
expected_volume_type)
class VolumeTypesSerializerTest(test.TestCase):
def _verify_volume_type(self, vtype, tree):
self.assertEqual('volume_type', tree.tag)
self.assertEqual(vtype['name'], tree.get('name'))
self.assertEqual(vtype['description'], tree.get('description'))
self.assertEqual(str(vtype['id']), tree.get('id'))
self.assertEqual(1, len(tree))
extra_specs = tree[0]
self.assertEqual('extra_specs', extra_specs.tag)
seen = set(vtype['extra_specs'].keys())
for child in extra_specs:
self.assertIn(child.tag, seen)
self.assertEqual(vtype['extra_specs'][child.tag], child.text)
seen.remove(child.tag)
self.assertEqual(len(seen), 0)
def test_index_serializer(self):
serializer = types.VolumeTypesTemplate()
# Just getting some input data
vtypes = return_volume_types_get_all_types(None)
text = serializer.serialize({'volume_types': vtypes.values()})
tree = etree.fromstring(text)
self.assertEqual('volume_types', tree.tag)
self.assertEqual(len(vtypes), len(tree))
for child in tree:
name = child.get('name')
self.assertIn(name, vtypes)
self._verify_volume_type(vtypes[name], child)
def test_voltype_serializer(self):
serializer = types.VolumeTypeTemplate()
vtype = stub_volume_type(1)
text = serializer.serialize(dict(volume_type=vtype))
tree = etree.fromstring(text)
self._verify_volume_type(vtype, tree)<|fim▁end|> | id=42,
) |
<|file_name|>extern-fail.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}<|fim▁hole|>extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() {
for _ in range(0, 10u) {
task::spawn(proc() {
let result = count(5u);
println!("result = %?", result);
fail!();
});
}
}<|fim▁end|> | }
|
<|file_name|>explicit_self_xcrate.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|> #[inline(always)]
fn f(&self);
}
pub struct Bar {
x: ~str
}
impl Foo for Bar {
#[inline(always)]
fn f(&self) {
io::println((*self).x);
}
}<|fim▁end|> | // option. This file may not be copied, modified, or distributed
// except according to those terms.
pub trait Foo { |
<|file_name|>test_structural.py<|end_file_name|><|fim▁begin|>"""
Tests for structural time series models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.datasets import macrodata
from statsmodels.tsa.statespace import structural
from statsmodels.tsa.statespace.structural import UnobservedComponents
from .results import results_structural
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
dta = macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS')
def run_ucm(name):
true = getattr(results_structural, name)
for model in true['models']:
kwargs = model.copy()
kwargs.update(true['kwargs'])
# Make a copy of the data
values = dta.copy()
freq = kwargs.pop('freq', None)
if freq is not None:
values.index = pd.date_range(start='1959-01-01', periods=len(dta),
freq=freq)
# Test pandas exog
if 'exog' in kwargs:
# Default value here is pd.Series object
exog = np.log(values['realgdp'])
# Also allow a check with a 1-dim numpy array
if kwargs['exog'] == 'numpy':
exog = exog.values.squeeze()
kwargs['exog'] = exog
# Create the model
mod = UnobservedComponents(values['unemp'], **kwargs)
# Smoke test for starting parameters, untransform, transform
# Also test that transform and untransform are inverses
mod.start_params
assert_allclose(mod.start_params, mod.transform_params(mod.untransform_params(mod.start_params)))
# Fit the model at the true parameters
res_true = mod.filter(true['params'])
# Check that the cycle bounds were computed correctly
freqstr = freq[0] if freq is not None else values.index.freqstr[0]
if freqstr == 'A':
cycle_period_bounds = (1.5, 12)
elif freqstr == 'Q':
cycle_period_bounds = (1.5*4, 12*4)
elif freqstr == 'M':
cycle_period_bounds = (1.5*12, 12*12)
else:
# If we have no information on data frequency, require the
# cycle frequency to be between 0 and pi
cycle_period_bounds = (2, np.inf)
# Test that the cycle frequency bound is correct
assert_equal(mod.cycle_frequency_bound,
(2*np.pi / cycle_period_bounds[1],
2*np.pi / cycle_period_bounds[0])
)
# Test that the likelihood is correct
rtol = true.get('rtol', 1e-7)
atol = true.get('atol', 0)
assert_allclose(res_true.llf, true['llf'], rtol=rtol, atol=atol)
# Smoke test for plot_components
if have_matplotlib:
fig = res_true.plot_components()
plt.close(fig)
# Now fit the model via MLE
with warnings.catch_warnings(record=True) as w:
res = mod.fit(disp=-1)
# If we found a higher likelihood, no problem; otherwise check
# that we're very close to that found by R
if res.llf <= true['llf']:
assert_allclose(res.llf, true['llf'], rtol=1e-4)
# Smoke test for summary
res.summary()
def test_irregular():
run_ucm('irregular')
def test_fixed_intercept():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_intercept')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_constant():
run_ucm('deterministic_constant')
def test_random_walk():
run_ucm('random_walk')
def test_local_level():
run_ucm('local_level')
def test_fixed_slope():
run_ucm('fixed_slope')
def test_fixed_slope():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_slope')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_trend():
run_ucm('deterministic_trend')
def test_random_walk_with_drift():
run_ucm('random_walk_with_drift')
<|fim▁hole|> run_ucm('local_linear_deterministic_trend')
def test_local_linear_trend():
run_ucm('local_linear_trend')
def test_smooth_trend():
run_ucm('smooth_trend')
def test_random_trend():
run_ucm('random_trend')
def test_cycle():
run_ucm('cycle')
def test_seasonal():
run_ucm('seasonal')
def test_reg():
run_ucm('reg')
def test_rtrend_ar1():
run_ucm('rtrend_ar1')
def test_lltrend_cycle_seasonal_reg_ar1():
run_ucm('lltrend_cycle_seasonal_reg_ar1')
def test_mle_reg():
endog = np.arange(100)*1.0
exog = endog*2
# Make the fit not-quite-perfect
endog[::2] += 0.01
endog[1::2] -= 0.01
with warnings.catch_warnings(record=True) as w:
mod1 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=False)
res1 = mod1.fit(disp=-1)
mod2 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=True)
res2 = mod2.fit(disp=-1)
assert_allclose(res1.regression_coefficients.filtered[0, -1], 0.5, atol=1e-5)
assert_allclose(res2.params[1], 0.5, atol=1e-5)
def test_specifications():
endog = [1, 2]
# Test that when nothing specified, a warning is issued and the model that
# is fit is one with irregular=True and nothing else.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog)
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'irregular')
# Test an invalid string trend specification
assert_raises(ValueError, UnobservedComponents, endog, 'invalid spec')
# Test that if a trend component is specified without a level component,
# a warning is issued and a deterministic level component is added
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog, trend=True, irregular=True)
message = ("Trend component specified without level component;"
" deterministic level component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'deterministic trend')
# Test that if a string specification is provided, a warning is issued if
# the boolean attributes are also specified
trend_attributes = ['irregular', 'trend', 'stochastic_level',
'stochastic_trend']
for attribute in trend_attributes:
with warnings.catch_warnings(record=True) as w:
kwargs = {attribute: True}
mod = UnobservedComponents(endog, 'deterministic trend', **kwargs)
message = ("Value of `%s` may be overridden when the trend"
" component is specified using a model string."
% attribute)
assert_equal(str(w[0].message), message)
# Test that a seasonal with period less than two is invalid
assert_raises(ValueError, UnobservedComponents, endog, seasonal=1)
def test_start_params():
# Test that the behavior is correct for multiple exogenous and / or
# autoregressive components
# Parameters
nobs = int(1e4)
beta = np.r_[10, -2]
phi = np.r_[0.5, 0.1]
# Generate data
np.random.seed(1234)
exog = np.c_[np.ones(nobs), np.arange(nobs)*1.0]
eps = np.random.normal(size=nobs)
endog = np.zeros(nobs+2)
for t in range(1, nobs):
endog[t+1] = phi[0] * endog[t] + phi[1] * endog[t-1] + eps[t]
endog = endog[2:]
endog += np.dot(exog, beta)
# Now just test that the starting parameters are approximately what they
# ought to be (could make this arbitrarily precise by increasing nobs,
# but that would slow down the test for no real gain)
mod = UnobservedComponents(endog, exog=exog, autoregressive=2)
assert_allclose(mod.start_params, [1., 0.5, 0.1, 10, -2], atol=1e-1)
def test_forecast():
endog = np.arange(50) + 10
exog = np.arange(50)
mod = UnobservedComponents(endog, exog=exog, level='dconstant')
res = mod.smooth([1e-15, 1])
actual = res.forecast(10, exog=np.arange(50,60)[:,np.newaxis])
desired = np.arange(50,60) + 10
assert_allclose(actual, desired)<|fim▁end|> | def test_local_linear_deterministic_trend(): |
<|file_name|>test_s3_file_transform.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import errno
import io
import os
import shutil
import sys
import unittest
from tempfile import mkdtemp
from unittest import mock
import boto3
import pytest
from moto import mock_s3
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.operators.s3 import S3FileTransformOperator
class TestS3FileTransformOperator(unittest.TestCase):
def setUp(self):
self.content = b"input"
self.bucket = "bucket"
self.input_key = "foo"
self.output_key = "bar"
self.bio = io.BytesIO(self.content)
self.tmp_dir = mkdtemp(prefix='test_tmpS3FileTransform_')
self.transform_script = os.path.join(self.tmp_dir, "transform.py")
os.mknod(self.transform_script)
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
@mock.patch('subprocess.Popen')
@mock.patch.object(S3FileTransformOperator, 'log')
@mock_s3
def test_execute_with_transform_script(self, mock_log, mock_popen):
process_output = [b"Foo", b"Bar", b"Baz"]
self.mock_process(mock_popen, process_output=process_output)
input_path, output_path = self.s3_paths()
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
replace=True,
task_id="task_id",
)
op.execute(None)
mock_log.info.assert_has_calls(
[mock.call(line.decode(sys.getdefaultencoding())) for line in process_output]
)
@mock.patch('subprocess.Popen')
@mock_s3
def test_execute_with_failing_transform_script(self, mock_popen):
self.mock_process(mock_popen, return_code=42)
input_path, output_path = self.s3_paths()
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
replace=True,
task_id="task_id",
)
with pytest.raises(AirflowException) as ctx:
op.execute(None)
assert 'Transform script failed: 42' == str(ctx.value)
@mock.patch('subprocess.Popen')<|fim▁hole|> input_path, output_path = self.s3_paths()
script_args = ['arg1', 'arg2']
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
script_args=script_args,
replace=True,
task_id="task_id",
)
op.execute(None)
assert script_args == mock_popen.call_args[0][0][3:]
@mock.patch('airflow.providers.amazon.aws.hooks.s3.S3Hook.select_key', return_value="input")
@mock_s3
def test_execute_with_select_expression(self, mock_select_key):
input_path, output_path = self.s3_paths()
select_expression = "SELECT * FROM s3object s"
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
select_expression=select_expression,
replace=True,
task_id="task_id",
)
op.execute(None)
mock_select_key.assert_called_once_with(key=input_path, expression=select_expression)
conn = boto3.client('s3')
result = conn.get_object(Bucket=self.bucket, Key=self.output_key)
assert self.content == result['Body'].read()
@staticmethod
def mock_process(mock_popen, return_code=0, process_output=None):
mock_proc = mock.MagicMock()
mock_proc.returncode = return_code
mock_proc.stdout.readline.side_effect = process_output or []
mock_proc.wait.return_value = None
mock_popen.return_value.__enter__.return_value = mock_proc
def s3_paths(self):
conn = boto3.client('s3')
conn.create_bucket(Bucket=self.bucket)
conn.upload_fileobj(Bucket=self.bucket, Key=self.input_key, Fileobj=self.bio)
s3_url = "s3://{0}/{1}"
input_path = s3_url.format(self.bucket, self.input_key)
output_path = s3_url.format(self.bucket, self.output_key)
return input_path, output_path<|fim▁end|> | @mock_s3
def test_execute_with_transform_script_args(self, mock_popen):
self.mock_process(mock_popen, process_output=[b"Foo", b"Bar", b"Baz"]) |
<|file_name|>galleries.js<|end_file_name|><|fim▁begin|>const keystone = require('keystone');
const Types = keystone.Field.Types;
const Gallery = new keystone.List('Gallery', {
autokey: { path: 'slug', from: 'title', unique: true },
map: { name: 'title' },
defaultSort: 'order'
});
Gallery.add({
title: { type: String, required: true },
state: { type: Types.Select, options: 'draft, published', default: 'draft' },<|fim▁hole|> order: { type: Types.Number, format: false },
items: { type: Types.Relationship, ref: 'Gallery item', many: true },
appearance: { type: Types.Select, options: 'grid, carousel, block', default: 'grid'},
description: { type: Types.Textarea }
});
Gallery.defaultColumns = 'title, state|20%, order|20%'
module.exports = Gallery;<|fim▁end|> | |
<|file_name|>blob.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Support for converting Mononoke data structures into in-memory blobs.
use anyhow::Result;
use blobstore::BlobstoreBytes;
use bytes::Bytes;
use crate::typed_hash::{
ChangesetId, ContentChunkId, ContentId, ContentMetadataId, DeletedManifestId, FastlogBatchId,
FileUnodeId, FsnodeId, ManifestUnodeId, RawBundle2Id, RedactionKeyListId, SkeletonManifestId,
};
/// A serialized blob in memory.
pub struct Blob<Id> {
id: Id,
data: Bytes,
}
impl<Id> Blob<Id> {
pub fn new(id: Id, data: Bytes) -> Self {
Self { id, data }
}
#[inline]
pub fn len(&self) -> usize {
self.data.len()
}
pub fn id(&self) -> &Id {
&self.id
}
<|fim▁hole|> pub fn data(&self) -> &Bytes {
&self.data
}
}
pub type ChangesetBlob = Blob<ChangesetId>;
pub type ContentBlob = Blob<ContentId>;
pub type ContentChunkBlob = Blob<ContentChunkId>;
pub type RawBundle2Blob = Blob<RawBundle2Id>;
pub type FileUnodeBlob = Blob<FileUnodeId>;
pub type ManifestUnodeBlob = Blob<ManifestUnodeId>;
pub type DeletedManifestBlob = Blob<DeletedManifestId>;
pub type FsnodeBlob = Blob<FsnodeId>;
pub type SkeletonManifestBlob = Blob<SkeletonManifestId>;
pub type ContentMetadataBlob = Blob<ContentMetadataId>;
pub type FastlogBatchBlob = Blob<FastlogBatchId>;
pub type RedactionKeyListBlob = Blob<RedactionKeyListId>;
impl<Id> From<Blob<Id>> for BlobstoreBytes {
#[inline]
fn from(blob: Blob<Id>) -> BlobstoreBytes {
BlobstoreBytes::from_bytes(blob.data)
}
}
pub trait BlobstoreValue: Sized + Send {
type Key;
fn into_blob(self) -> Blob<Self::Key>;
fn from_blob(blob: Blob<Self::Key>) -> Result<Self>;
}<|fim▁end|> | |
<|file_name|>contact_me.js<|end_file_name|><|fim▁begin|>$(function() {
$("input,textarea").jqBootstrapValidation({
preventSubmit: true,
submitError: function($form, event, errors) {
// additional error messages or events
},
submitSuccess: function($form, event) {
event.preventDefault(); // prevent default submit behaviour
// get values from FORM
var name = $("input#name").val();
var email = $("input#email").val();
var phone = $("input#phone").val();
var message = $("textarea#message").val();
var firstName = name; // For Success/Failure Message
// Check for white space in name for Success/Fail message
if (firstName.indexOf(' ') >= 0) {
firstName = name.split(' ').slice(0, -1).join(' ');
}
$.ajax({
url: "././mail/contact_me.php",
type: "POST",
data: {
name: name,
phone: phone,
email: email,
message: message
},
cache: false,
success: function() {
// Success message
$('#success').html("<div class='alert alert-success'>");
$('#success > .alert-success').html("<button type='button' class='close' data-dismiss='alert' aria-hidden='true'>×")
.append("</button>");
$('#success > .alert-success')
.append("<strong>Votre message a bien été envoyé. </strong>");
$('#success > .alert-success')
.append('</div>');
//clear all fields
$('#contactForm').trigger("reset");
},
error: function() {
// Fail message
$('#success').html("<div class='alert alert-danger'>");
$('#success > .alert-danger').html("<button type='button' class='close' data-dismiss='alert' aria-hidden='true'>×")
.append("</button>");
$('#success > .alert-danger').append("<strong>Désolé " + firstName + ", il semble que mon serveur mail ne réponde pas. Réessayez plus tard !");
$('#success > .alert-danger').append('</div>');
//clear all fields
$('#contactForm').trigger("reset");
},
})
},
filter: function() {
return $(this).is(":visible");
},
});
$("a[data-toggle=\"tab\"]").click(function(e) {
e.preventDefault();
$(this).tab("show");<|fim▁hole|> });
});
/*When clicking on Full hide fail/success boxes */
$('#name').focus(function() {
$('#success').html('');
});<|fim▁end|> | |
<|file_name|>cycle_basis.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
cycle_basis.py
functions for calculating the cycle basis of a graph
"""
from numpy import *
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.path import Path
if matplotlib.__version__ >= '1.3.0':
from matplotlib.path import Path
else:
from matplotlib import nxutils
from itertools import chain
from itertools import ifilterfalse
from itertools import izip
from itertools import tee
from collections import defaultdict
import time
from helpers import *
class Cycle():
""" Represents a set of nodes that make up a cycle in some
graph. Is hashable and does not care about orientation or things
like that, two cycles are equal if they share the same nodes.
A cycle can be compared to a set or frozenset of nodes.
path is a list of vertices describing a closed path in the cycle.
if it is absent, a closed path will be calculated together with
coordinates.
coords is an array of x-y pairs representing the coordinates of
the cycle path elements.
"""
def __init__(self, graph, edges, coords=None):
""" Initializes the Cycle with an edge list representing the
cycle.
All edges should be ordered such that a cycle is represented
as
(1,2)(2,3)(3,4)...(n-2,n-1)(n-1,1)
Parameters:
graph: The underlying graph object
edges: The edge list making up the cycle.
is_ordered: If set to false, will use the neighborhood
information from graph to construct ordered edge set
from unordered one.
In case the unordered edge set is not a connected graph,
e.g. when removing one cycle splits the surrounding
one in half, the smaller connected component in terms
of total length is thrown away. Since our cycles are
typically convex, this means we use the outermost
component.
"""
self.graph = graph
edges, self.total_area = self.ordered_edges(edges)
self.path = zip(*edges)[0]
if coords is None:
self.coords = array([[graph.node[n]['x'], graph.node[n]['y']]
for n in self.path])
else:
self.coords = coords
self.edges = edges
# This allows comparisons
self.edgeset = set([tuple(sorted(e)) for e in edges])
self.com = mean(self.coords, axis=0)
# This frozenset is used to compare/hash cycles.
self._nodeset = frozenset(self.path)
def ordered_edges(self, edges):
""" Uses the graph associated to this cycle to order
the unordered edge set.
Also return the area of the cycle. This is defined as
max(Areas of individual connected components) -<|fim▁hole|> one or more smaller cycles.
"""
# construct subgraph consisting of only the specified edges
edge_graph = nx.Graph(edges)
con = sorted_connected_components(edge_graph)
# Calculate sorted edge list for each connected component
# of the cycle
component_sorted_edges = []
areas = []
G = self.graph
for comp in con:
# get ordered list of edges
component_edges = comp.edges()
n_edges = len(component_edges)
sorted_edges = []
start = component_edges[0][0]
cur = start
prev = None
for i in xrange(n_edges):
nextn = [n for n in comp.neighbors(cur)
if n != prev][0]
sorted_edges.append((cur, nextn))
prev = cur
cur = nextn
# coordinates of path
coords = array([(G.node[u]['x'], G.node[u]['y'])
for u, v in sorted_edges] \
+ [(G.node[sorted_edges[0][0]]['x'],
G.node[sorted_edges[0][0]]['y'])])
areas.append(polygon_area(coords))
component_sorted_edges.append(sorted_edges)
if len(areas) > 1:
areas = sorted(areas, reverse=True)
total_area = areas[0] - sum(areas[1:])
else:
total_area = areas[0]
return list(chain.from_iterable(
sorted(component_sorted_edges, key=len, reverse=True))), \
total_area
def intersection(self, other):
""" Returns an edge set representing the intersection of
the two cycles.
"""
inters = self.edgeset.intersection(other.edgeset)
return inters
def union(self, other, data=True):
""" Returns the edge set corresponding to the union of two cycles.
Will overwrite edge/vertex attributes from other to this,
so only use if both cycle graphs are the same graph!
"""
union = self.edgeset.union(other.edgeset)
return union
def symmetric_difference(self, other, intersection=None):
""" Returns a Cycle corresponding to the symmetric difference of
the Cycle and other. This is defined as the set of edges which
is present in either cycle but not in both.
If the intersection has been pre-calculated it can be used.
This will fail on non-adjacent loops.
"""
new_edgeset = list(self.edgeset.symmetric_difference(
other.edgeset))
return Cycle(self.graph, new_edgeset)
def area(self):
""" Returns the area enclosed by the polygon defined by the
Cycle. If the cycle contains more than one connected component,
this is defined as the area of the largest area connected
component minus the areas of the other connected components.
"""
return self.total_area
def radii(self):
""" Return the radii of all edges in this cycle.
"""
return array([self.graph[u][v]['conductivity']
for u, v in self.edgeset])
def __hash__(self):
""" Implements hashing by using the internal set description's hash
"""
return self._nodeset.__hash__()
def __eq__(self, other):
""" Implements comparison using the internal set description
"""
if isinstance(other, Cycle):
return self._nodeset.__eq__(other._nodeset)
elif isinstance(other, frozenset) or isinstance(other, set):
return self._nodeset.__eq__(other)
else:
return -1
def __repr__(self):
return repr(self._nodeset)
def polygon_area(coords):
""" Return the area of a closed polygon
"""
Xs = coords[:,0]
Ys = coords[:,1]
# Ignore orientation
return 0.5*abs(sum(Xs[:-1]*Ys[1:] - Xs[1:]*Ys[:-1]))
def traverse_graph(G, start, nextn):
""" Traverses the pruned (i.e. ONLY LOOPS) graph G counter-clockwise
in the direction of nextn until start is hit again.
If G has treelike components this will fail and get stuck, there
is no backtracking.
Returns a list of nodes visited, a list of edges visited and
an array of node coordinates.
This will find (a) all internal
smallest loops (faces of the planar graph) and (b) one maximal
outer loop
"""
start_coords = array([G.node[start]['x'], G.node[start]['y']])
nodes_visited = [start]
nodes_visited_set = set()
edges_visited = []
coords = [start_coords]
prev = start
cur = nextn
while cur != start:
cur_coords = array([G.node[cur]['x'], G.node[cur]['y']])
# We ignore all neighbors we alreay visited to avoid multiple loops
neighs = [n for n in G.neighbors(cur) if n != prev and n != cur]
edges_visited.append((prev, cur))
nodes_visited.append(cur)
coords.append(cur_coords)
n_neighs = len(neighs)
if n_neighs > 1:
# Choose path that keeps the loop closest on the left hand side
prev_coords = array([G.node[prev]['x'], G.node[prev]['y']])
neigh_coords = array([[G.node[n]['x'], G.node[n]['y']] \
for n in neighs])
## Construct vectors and normalize
u = cur_coords - prev_coords
vs = neigh_coords - cur_coords
# calculate cos and sin between direction vector and neighbors
u /= sqrt((u*u).sum(-1))
vs /= sqrt((vs*vs).sum(-1))[...,newaxis]
coss = dot(u, vs.T)
sins = cross(u, vs)
# this is a function between -2 and +2, where the
# leftmost path corresponds to -2, rightmost to +2
# sgn(alpha)(cos(alpha) - 1)
ranked = sign(sins)*(coss - 1.)
prev = cur
cur = neighs[argmin(ranked)]
else:
# No choice to make
prev = cur
cur = neighs[0]
# Remove pathological protruding loops
if prev in nodes_visited_set:
n_ind = nodes_visited.index(prev)
del nodes_visited[n_ind+1:]
del coords[n_ind+1:]
del edges_visited[n_ind:]
nodes_visited_set.add(prev)
edges_visited.append((nodes_visited[-1], nodes_visited[0]))
return nodes_visited, edges_visited, array(coords)
def cycle_mtp_path(cycle):
""" Returns a matplotlib Path object describing the cycle.
"""
# Set up polygon
verts = zeros((cycle.coords.shape[0] + 1, cycle.coords.shape[1]))
verts[:-1,:] = cycle.coords
verts[-1,:] = cycle.coords[0,:]
codes = Path.LINETO*ones(verts.shape[0])
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
return Path(verts, codes)
def outer_loop(G, cycles):
""" Detects the boundary loop in the set of fundamental cycles
by noting that the boundary is precisely the one loop with
maximum area (since it contains all other loops, they all must
have smaller area)
"""
return max([(c.area(), c) for c in cycles])[1]
def shortest_cycles(G):
""" Returns a list of lists of Cycle objects belonging to the
fundamental cycles of the pruned (i.e. there are no treelike
components) graph G by traversing the graph counter-clockwise
for each node until the starting node has been found.
Also returns the outer loop.
"""
cycleset = set()
# Betti number counts interior loops, this algorithm finds
# exterior loop as well!
n_cycles = G.number_of_edges() - G.number_of_nodes() + 1
# Count outer loop as well
if n_cycles >= 2:
n_cycles += 1
print "Number of cycles including boundary: {}.".format(n_cycles)
t0 = time.time()
mst = nx.minimum_spanning_tree(G, weight=None)
for u, v in G.edges_iter():
if not mst.has_edge(u, v):
# traverse cycle in both directions
path, edges, coords = traverse_graph(G, u, v)
cycleset.add(Cycle(G, edges, coords=coords))
path, edges, coords = traverse_graph(G, v, u)
cycleset.add(Cycle(G, edges, coords=coords))
if len(cycleset) != n_cycles:
print "WARNING: Found only", len(cycleset), "cycles!!"
t1 = time.time()
print "Detected fundamental cycles in {}s".format(t1 - t0)
#print "Number of detected facets:", len(cycleset)
return list(cycleset)
def find_neighbor_cycles(G, cycles):
""" Returns a set of tuples of cycle indices describing
which cycles share edges
"""
n_c = len(cycles)
# Construct edge dictionary
edges = defaultdict(list)
for i in xrange(n_c):
for e in cycles[i].edges:
edges[tuple(sorted(e))].append(i)
# Find all neighboring cycles
neighbor_cycles = set()
for n in edges.values():
neighbor_cycles.add(tuple(sorted(n)))
return neighbor_cycles<|fim▁end|> | (Areas of other connected components)
This assumes that the cycle is one large cycle containing |
<|file_name|>win_file.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#<|fim▁hole|>
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories.
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
- Unlike M(file), does not modify ownership, permissions or manipulate links.
notes:
- See also M(win_copy), M(win_template), M(copy), M(template), M(assemble)
requirements: [ ]
author: "Jon Hawkesworth (@jhawkesworth)"
options:
path:
description:
- 'path to the file being managed. Aliases: I(dest), I(name)'
required: true
aliases: ['dest', 'name']
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(absent),
directories will be recursively deleted, and files will be removed.
If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
choices: [ file, directory, touch, absent ]
'''
EXAMPLES = r'''
- name: Create a file
win_file:
path: C:\Temp\foo.conf
state: file
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\Temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\Temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\Temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\Temp
state: absent
'''<|fim▁end|> | # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
<|file_name|>Assertion.js<|end_file_name|><|fim▁begin|>if (typeof parseRegExp === 'undefined')
quit();
load(libdir + "regexp_parse.js");
test_mix("^", no_multiline_flags,
Assertion("START_OF_INPUT"));
test_mix("^", multiline_flags,
Assertion("START_OF_LINE"));
test_mix("$", no_multiline_flags,
Assertion("END_OF_INPUT"));
test_mix("$", multiline_flags,
Assertion("END_OF_LINE"));
<|fim▁hole|>test_mix("\\B", all_flags,
Assertion("NON_BOUNDARY"));<|fim▁end|> | test_mix("\\b", all_flags,
Assertion("BOUNDARY"));
|
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openslide(AutotoolsPackage):
"""OpenSlide reads whole slide image files."""
homepage = "https://openslide.org/"
url = "https://github.com/openslide/openslide/releases/download/v3.4.1/openslide-3.4.1.tar.xz"
version('3.4.1', sha256='9938034dba7f48fadc90a2cdf8cfe94c5613b04098d1348a5ff19da95b990564')
depends_on('pkgconfig', type='build')
depends_on('openjpeg')
depends_on('jpeg')
depends_on('libtiff')
depends_on('libxml2')
depends_on('sqlite@3.6:')
depends_on('glib')<|fim▁hole|><|fim▁end|> | depends_on('cairo+pdf')
depends_on('gdk-pixbuf') |
<|file_name|>simulationCanvas.py<|end_file_name|><|fim▁begin|>from kivy.uix.floatlayout import FloatLayout
from kivy.properties import NumericProperty, ObjectProperty
from kivy.graphics import Color, Ellipse, Line
from kivy.graphics.transformation import Matrix
from kivy.core.window import Window
from simulationLine import SimulationLine
from simulationAngle import SimulationAngle
from simulationSled import SimulationSled
from chainLengthToXY import ChainLengthtoXY
from posToChainLength import PosToChainLength
from kivy.graphics.transformation import Matrix
import re
import math
class SimulationCanvas(FloatLayout):
scatterObject = ObjectProperty(None)
motorLift = 220
motorTranslate = 258.8
bedWidth = 2438.4 #8'
bedHeight = 1219.2 #4'
motorY = bedHeight + motorLift
motor2X = bedWidth + motorTranslate
def initialize(self):
self.startChains()
self.drawFrame()
self.setSpindleLocation(self.bedWidth/2,self.bedHeight/2)
self.setInitialZoom()
self.xPosSlider.bind(value=self.xPosSliderValueChange)
self.yPosSlider.bind(value=self.yPosSliderValueChange)
self.setupAngles()
self.setupSled()
self.lengthToXY.initialize(self.chainA, self.chainB, self.bedWidth+2*self.motorTranslate, self.bedHeight+self.motorLift, self.motorTranslate, self.motorLift)
self.posToLength.initialize(self.sled, self.bedWidth+2*self.motorTranslate, self.bedHeight+self.motorLift, self.motorTranslate, self.motorLift)
def setSpindleLocation(self,x,y):
self.chainA.setEnd(x,y)
self.chainB.setEnd(x,y)
def xPosSliderValueChange(self,callback,value):<|fim▁hole|> self.setSpindleLocation(value,self.chainA.toPos[1])
def yPosSliderValueChange(self,callback,value):
self.setSpindleLocation(self.chainA.toPos[0], value)
def drawFrame(self):
self.frameLeft.initialize()
self.frameTop.initialize()
self.frameRight.initialize()
self.frameBottom.initialize()
self.frameLeft.setStart(0,0)
self.frameLeft.setEnd(0,self.bedHeight)
self.frameLeft.color = (1,0,0)
self.frameTop.setStart(0,self.bedHeight)
self.frameTop.setEnd(self.bedWidth,self.bedHeight)
self.frameTop.color = (1,0,0)
self.frameRight.setStart(self.bedWidth,0)
self.frameRight.setEnd(self.bedWidth,self.bedHeight)
self.frameRight.color = (1,0,0)
self.frameBottom.setStart(0,0)
self.frameBottom.setEnd(self.bedWidth,0)
self.frameBottom.color = (1,0,0)
def setupAngles(self):
self.angleA.initialize(self.chainA, self.lineT, 0)
self.angleB.initialize(self.chainB, self.lineT, 0)
self.angleP.initialize(self.chainA, self.chainB, 1)
def setupSled(self):
self.sled.initialize(self.chainA, self.chainB, 1, self.angleP)
def setInitialZoom(self):
mat = Matrix().scale(.4, .4, 1)
self.scatterInstance.apply_transform(mat, (0,0))
mat = Matrix().translate(200, 100, 0)
self.scatterInstance.apply_transform(mat)
def startChains(self):
self.chainA.initialize()
self.chainB.initialize()
self.lineT.initialize()
self.lineT.color = (0,0,1)
self.chainA.setStart(-self.motorTranslate, self.motorY)
self.chainB.setStart(self.motor2X, self.motorY)
self.lineT.setStart(-self.motorTranslate,self.motorY)
self.lineT.setEnd(self.motor2X,self.motorY)<|fim▁end|> | |
<|file_name|>feature_column.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features in tf.learn Estimator models.
FeatureColumns are the primary way of encoding features for pre-canned
tf.learn Estimators.
When using FeatureColumns with tf.learn models, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
(1) Feature type:
* Continuous features can be represented by `real_valued_column`.
* Categorical features can be represented by any `sparse_column_with_*`
column (`sparse_column_with_keys`, `sparse_column_with_vocabulary_file`,
`sparse_column_with_hash_bucket`, `sparse_column_with_integerized_feature`).
(2) Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = real_valued_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `one_hot_column`. `one_hot_column` is recommended for
features with only a few possible values. For features with many possible
values, `embedding_column` is recommended.
embedded_dept_column = embedding_column(
sparse_column_with_keys("department", ["math", "philosphy", ...]),
dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models.
dept_column = sparse_column_with_keys("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=[department_column, bucketized_age_column],
hash_bucket_size=1000)
Example of building tf.learn model using FeatureColumns:
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_from_feature_columns` within
`feature_column_ops.py`.
Example of building non-tf.learn model using FeatureColumns:
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import six
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.layers.python.ops import sparse_ops as contrib_sparse_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class _LinearEmbeddingLookupArguments(
collections.namedtuple("_LinearEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner"])):
"""Represents the information needed from a column for embedding lookup.
Used to to compute DNN inputs and weighted sum.
"""
pass
class _DeepEmbeddingLookupArguments(
collections.namedtuple("_DeepEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner",
"dimension",
"shared_embedding_name",
"hash_key",
"max_norm",
"trainable"])):
"""Represents the information needed from a column for embedding lookup.
Used to to compute DNN inputs and weighted sum.
"""
pass
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def config(self):
"""Returns configuration of the base feature for `tf.parse_example`."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError("Transform is not implemented for {}.".format(
self))
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
def _deep_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to embedding lookup to build an input layer."""
raise NotImplementedError(
"No deep embedding lookup arguments for column {}.".format(self))
# It is expected that classes implement either wide_embedding_lookup_arguments
# or to_dense_tensor to be used in linear models.
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to look up embeddings for this column."""
raise NotImplementedError(
"No wide embedding lookup arguments for column {}.".format(self))
# pylint: disable=unused-argument
def _to_dense_tensor(self, input_tensor):
"""Returns a dense tensor representing this column's values."""
raise NotImplementedError(
"No dense tensor representation for column {}.".format(self))
def _checkpoint_path(self):
"""Returns None, or a (path,tensor_name) to load a checkpoint from."""
return None
def _key_without_properties(self, properties):
"""Helper method for self.key() that omits particular properties."""
fields_values = []
# pylint: disable=protected-access
for i, k in enumerate(self._fields):
if k in properties:
# Excludes a property from the key.
# For instance, exclude `initializer` from the key of EmbeddingColumn
# since we don't support users specifying different initializers for
# the same embedding column. Ditto for `normalizer` and
# RealValuedColumn.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, self[i]))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "{}({})".format(type(self).__name__, ", ".join(fields_values))
# TODO(b/30410315): Support warm starting in all feature columns.
class _SparseColumn(_FeatureColumn,
collections.namedtuple("_SparseColumn",
["column_name", "is_integerized",
"bucket_size", "lookup_config",
"combiner", "dtype"])):
"""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 0. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, such as `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sum",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size must be set if is_integerized is True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype must be an integer if is_integerized is True. "
"dtype: {}, column_name: {}.".format(dtype, column_name))
if dtype != dtypes.string and not dtype.is_integer:
raise ValueError("dtype must be string or integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config must be set. "
"column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"must be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 1:
raise ValueError("bucket_size must be at least 1. "
"bucket_size: {}, column_name: {}".format(bucket_size,
column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config must be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size must be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(
cls,
column_name,
is_integerized=is_integerized,
bucket_size=bucket_size,
lookup_config=lookup_config,
combiner=combiner,
dtype=dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"SparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _get_input_sparse_tensor(self, columns_to_tensors):
"""Looks up the input tensor for transformation and sparsify it if dense."""
input_tensor = columns_to_tensors[self.name]
if not isinstance(input_tensor, sparse_tensor_py.SparseTensor):
# To avoid making any assumptions about which values are to be ignored,
# we set ignore_value to -1 for numeric tensors to avoid excluding valid
# indices.
if input_tensor.dtype == dtypes.string:
ignore_value = ""
else:
ignore_value = -1
input_tensor = _reshape_real_valued_tensor(input_tensor, 2, self.name)
input_tensor = contrib_sparse_ops.dense_to_sparse_tensor(
input_tensor, ignore_value=ignore_value)
return input_tensor
def is_compatible(self, other_column):
"""Check compatability of two sparse columns."""
if self.lookup_config and other_column.lookup_config:
return self.lookup_config == other_column.lookup_config
compatible = (self.length == other_column.length and
(self.dtype == other_column.dtype or
(self.dtype.is_integer and other_column.dtype.is_integer)))
if compatible:
logging.warn("Column {} and {} may not have the same vocabulary.".
format(self.name, other_column.name))
return compatible
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
sparse_id_values = math_ops.mod(input_tensor.values, self.bucket_size,
name="mod")
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs, that
is, when the set of values to output is already coming in as what's desired in
the output. Integerized means we can use the feature value itself as id.
Typically this is used for reading contiguous ranges of integers indexes, but
it doesn't have to be. The output value is simply copied from the
input_feature, whatever it is. Just be aware, however, that if you have large
gaps of unused integers it might affect what you feed those in (for instance,
if you make up a one-hot tensor from these, the unused integers will appear as
values in the tensor which are always zero.)
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is > 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is not greater than 1.
ValueError: dtype is not integer.
"""
return _SparseColumnIntegerized(
column_name, is_integerized=True, bucket_size=bucket_size,
combiner=combiner, dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
if self.dtype.is_integer:
sparse_values = string_ops.as_string(input_tensor.values)
else:
sparse_values = input_tensor.values
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.bucket_size, name="lookup")
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string or integer format, but you
don't have a vocab file that maps each value to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
ValueError: dtype is neither string nor integer.
"""
return _SparseColumnHashed(
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtype)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
table = lookup.index_table_from_tensor(
mapping=tuple(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
dtype=self.dtype,
name="lookup")
columns_to_tensors[self] = table.lookup(input_tensor)
def sparse_column_with_keys(
column_name, keys, default_value=-1, combiner="sum", dtype=dtypes.string):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: A list or tuple defining vocabulary. Must be castable to `dtype`.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. Only integer and string are supported.
Returns:
A _SparseColumnKeys with keys configuration.
"""
keys = tuple(keys)
return _SparseColumnKeys(
column_name,
lookup_config=_SparseIdLookupConfig(
keys=keys, vocab_size=len(keys), default_value=default_value),
combiner=combiner,
dtype=dtype)
class _SparseColumnVocabulary(_SparseColumn):
"""See `sparse_column_with_vocabulary_file`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
st = self._get_input_sparse_tensor(columns_to_tensors)
if self.dtype.is_integer:
sparse_string_values = string_ops.as_string(st.values)
sparse_string_tensor = sparse_tensor_py.SparseTensor(st.indices,
sparse_string_values,
st.dense_shape)
else:
sparse_string_tensor = st
table = lookup.string_to_index_table_from_file(
vocabulary_file=self.lookup_config.vocabulary_file,
num_oov_buckets=self.lookup_config.num_oov_buckets,
vocab_size=self.lookup_config.vocab_size,
default_value=self.lookup_config.default_value,
name=self.name + "_lookup")
columns_to_tensors[self] = table.lookup(sparse_string_tensor)
def sparse_column_with_vocabulary_file(column_name,
vocabulary_file,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with vocabulary file configuration.
Use this when your sparse features are in string or integer format, and you
have a vocab file that maps each value to an integer ID.
output_id = LookupIdFromVocab(input_feature_string)
Args:
column_name: A string defining sparse column name.
vocabulary_file: The vocabulary filename.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with vocabulary file configuration.
Raises:
ValueError: vocab_size is not defined.
ValueError: dtype is neither string nor integer.
"""
if vocab_size is None:
raise ValueError("vocab_size should be defined. "
"column_name: {}".format(column_name))
return _SparseColumnVocabulary(
column_name,
lookup_config=_SparseIdLookupConfig(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
vocab_size=vocab_size,
default_value=default_value),
combiner=combiner,
dtype=dtype)
class _WeightedSparseColumn(_FeatureColumn, collections.namedtuple(
"_WeightedSparseColumn",
["sparse_id_column", "weight_column_name", "dtype"])):
"""See `weighted_sparse_column`."""
def __new__(cls, sparse_id_column, weight_column_name, dtype):
return super(_WeightedSparseColumn, cls).__new__(cls, sparse_id_column,
weight_column_name, dtype)
@property
def name(self):
return "{}_weighted_by_{}".format(self.sparse_id_column.name,
self.weight_column_name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
config = _get_feature_config(self.sparse_id_column)
config.update(
{self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)})
return config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Inserts a tuple with the id and weight tensors."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
weight_tensor = columns_to_tensors[self.weight_column_name]
if not isinstance(weight_tensor, sparse_tensor_py.SparseTensor):
# The weight tensor can be a regular Tensor. In such case, sparsify it.
weight_tensor = contrib_sparse_ops.dense_to_sparse_tensor(weight_tensor)
if not self.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
columns_to_tensors[self] = tuple([
columns_to_tensors[self.sparse_id_column],
weight_tensor
])
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor[0]
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return input_tensor[1]
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"WeightedSparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.sparse_id_column.combiner)
def weighted_sparse_column(sparse_id_column,
weight_column_name,
dtype=dtypes.float32):
"""Creates a _SparseColumn by combining sparse_id_column with a weight column.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(column_name="sparse_col",
hash_bucket_size=1000)
weighted_feature = weighted_sparse_column(sparse_id_column=sparse_feature,
weight_column_name="weights_col")
```
This configuration assumes that input dictionary of model contains the
following two items:
* (key="sparse_col", value=sparse_tensor) where sparse_tensor is
a SparseTensor.
* (key="weights_col", value=weights_tensor) where weights_tensor
is a SparseTensor.
Following are assumed to be true:
* sparse_tensor.indices = weights_tensor.indices
* sparse_tensor.dense_shape = weights_tensor.dense_shape
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` functions.
weight_column_name: A string defining a sparse column name which represents
weight or value of the corresponding sparse id feature.
dtype: Type of weights, such as `tf.float32`. Only floating and integer
weights are supported.
Returns:
A _WeightedSparseColumn composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if dtype is not convertible to float.
"""
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype is not convertible to float. Given {}".format(
dtype))
return _WeightedSparseColumn(sparse_id_column, weight_column_name, dtype)
class _OneHotColumn(_FeatureColumn,
collections.namedtuple("_OneHotColumn",
["sparse_id_column"])):
"""Represents a one-hot column for use in deep networks.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
function.
"""
@property
def name(self):
return "{}_one_hot".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
return self.sparse_id_column.length
@property
def config(self):
"""Returns the parsing config of the origin column."""
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Used by the Transformer to prevent double transformations."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _to_dnn_input_layer(self,
transformed_input_tensor,
unused_weight_collections=None,
unused_trainable=False,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network.
Args:
transformed_input_tensor: A tensor that has undergone the transformations
in `insert_transformed_feature`. Rank should be >= `output_rank`.
unused_weight_collections: Unused. One hot encodings are not variable.
unused_trainable: Unused. One hot encodings are not trainable.
output_rank: the desired rank of the output `Tensor`.
Returns:
A multihot Tensor to be fed into the first layer of neural network.
Raises:
ValueError: When using one_hot_column with weighted_sparse_column.
This is not yet supported.
"""
# Reshape ID column to `output_rank`.
sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
# pylint: disable=protected-access
sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)
weight_tensor = self.sparse_id_column.weight_tensor(
transformed_input_tensor)
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(sp_ids=sparse_id_column,
sp_values=weight_tensor,
vocab_size=self.length)
return sparse_ops.sparse_tensor_to_dense(weighted_column)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column,
default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(
one_hot_id_tensor, reduction_indices=[output_rank - 1])
class _EmbeddingColumn(_FeatureColumn, collections.namedtuple(
"_EmbeddingColumn",
["sparse_id_column", "dimension", "combiner", "initializer",
"ckpt_to_load_from", "tensor_name_in_ckpt", "shared_embedding_name",
"shared_vocab_size", "max_norm", "trainable"])):
"""Represents an embedding column.
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` or `weighted_sparse_column` functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
shared_embedding_name: (Optional). The common name for shared embedding.
shared_vocab_size: (Optional). The common vocab_size used for shared
embedding space.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True.
Raises:
ValueError: if `initializer` is specified and is not callable. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
shared_embedding_name=None,
shared_vocab_size=None,
max_norm=None,
trainable=True):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"Embedding of column_name: {}".format(
sparse_id_column.name))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" after "
"2017/02/25.")
stddev = 1 / math.sqrt(sparse_id_column.length)
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer, ckpt_to_load_from,
tensor_name_in_ckpt,
shared_embedding_name,
shared_vocab_size,
max_norm,
trainable)
@property
def name(self):
if self.shared_embedding_name is None:
return "{}_embedding".format(self.sparse_id_column.name)
else:
return "{}_shared_embedding".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns id size."""
if self.shared_vocab_size is None:
return self.sparse_id_column.length
else:
return self.shared_vocab_size
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=self.sparse_id_column.id_tensor(input_tensor),
weight_tensor=self.sparse_id_column.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=self.dimension,
initializer=self.initializer,
combiner=self.combiner,
shared_embedding_name=self.shared_embedding_name,
hash_key=None,
max_norm=self.max_norm,
trainable=self.trainable)
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
raise ValueError("Column {} is not supported in linear models. "
"Please use sparse_column.".format(self))
def one_hot_column(sparse_id_column):
"""Creates an `_OneHotColumn` for a one-hot or multi-hot repr in a DNN.
Args:
sparse_id_column: A _SparseColumn which is created by
`sparse_column_with_*`
or crossed_column functions. Note that `combiner` defined in
`sparse_id_column` is ignored.
Returns:
An _OneHotColumn.
"""
return _OneHotColumn(sparse_id_column)
def embedding_column(sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates an `_EmbeddingColumn` for feeding sparse data into a DNN.
Args:
sparse_id_column: A `_SparseColumn` which is created by for example
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in `sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
An `_EmbeddingColumn`.
"""
return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
max_norm=max_norm, trainable=trainable)
def shared_embedding_columns(sparse_id_columns,
dimension,
combiner="mean",
shared_embedding_name=None,
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates a list of `_EmbeddingColumn` sharing the same embedding.
Args:
sparse_id_columns: An iterable of `_SparseColumn`, such as those created by
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in each sparse_id_column is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
shared_embedding_name: (Optional). A string specifying the name of shared
embedding weights. This will be needed if you want to reference the shared
embedding separately from the generated `_EmbeddingColumn`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_columns[0].length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
A tuple of `_EmbeddingColumn` with shared embedding space.
Raises:
ValueError: if sparse_id_columns is empty, or its elements are not
compatible with each other.
TypeError: if `sparse_id_columns` is not a sequence or is a string. If at
least one element of `sparse_id_columns` is not a `SparseTensor`.
"""
if (not isinstance(sparse_id_columns, collections.Sequence) or
isinstance(sparse_id_columns, six.string_types)):
raise TypeError(
"sparse_id_columns must be a non-string sequence (ex: list or tuple) "
"instead of type {}.".format(type(sparse_id_columns)))
if len(sparse_id_columns) < 1:
raise ValueError("The input sparse_id_columns should have at least one "
"element.")
for sparse_id_column in sparse_id_columns:
if not isinstance(sparse_id_column, _SparseColumn):
raise TypeError("Elements of sparse_id_columns must be _SparseColumn, but"
"{} is not.".format(sparse_id_column))
if len(sparse_id_columns) == 1:
return [
_EmbeddingColumn(sparse_id_columns[0], dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, max_norm=max_norm,
trainable=trainable)]
else:
# check compatibility of sparse_id_columns
compatible = True
for column in sparse_id_columns[1:]:
compatible = compatible and column.is_compatible(sparse_id_columns[0])
if not compatible:
raise ValueError("The input sparse id columns are not compatible.")
# Construct the shared name and size for shared embedding space.
if not shared_embedding_name:
# Sort the columns so that shared_embedding_name will be deterministic
# even if users pass in unsorted columns from a dict or something.
sorted_columns = sorted(sparse_id_columns)
if len(sorted_columns) <= 3:
shared_embedding_name = "_".join([column.name
for column in sorted_columns])
else:
shared_embedding_name = "_".join([column.name
for column in sorted_columns[0:3]])
shared_embedding_name += (
"_plus_{}_others".format(len(sorted_columns) - 3))
shared_embedding_name += "_shared_embedding"
shared_vocab_size = sparse_id_columns[0].length
embedded_columns = []
for column in sparse_id_columns:
embedded_columns.append(
_EmbeddingColumn(column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, shared_vocab_size,
max_norm=max_norm, trainable=trainable))
return tuple(embedded_columns)
class _ScatteredEmbeddingColumn(
_FeatureColumn,
collections.namedtuple(
"_ScatteredEmbeddingColumn",
["column_name", "size", "dimension", "hash_key", "combiner",
"initializer"])):
"""See `scattered_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
hash_key,
combiner="sqrtn",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"column_name: {}".format(column_name))
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"0.1\" to \"1/sqrt(dimension)\" after 2017/02/25.")
stddev = 0.1
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_ScatteredEmbeddingColumn, cls).__new__(cls, column_name, size,
dimension, hash_key,
combiner,
initializer)
@property
def name(self):
return "{}_scattered_embedding".format(self.column_name)
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.size,
initializer=self.initializer,
combiner=self.combiner,
dimension=self.dimension,
shared_embedding_name=None,
hash_key=self.hash_key,
max_norm=None,
trainable=True)
def scattered_embedding_column(column_name,
size,
dimension,
hash_key,
combiner="mean",
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
This is a useful shorthand when you have a sparse feature you want to use an
embedding for, but also want to hash the embedding's values in each dimension
to a variable based on a different hash.
Specifically, the i-th embedding component of a value v is found by retrieving
an embedding weight whose index is a fingerprint of the pair (v,i).
An embedding column with sparse_column_with_hash_bucket such as
embedding_column(
sparse_column_with_hash_bucket(column_name, bucket_size),
dimension)
could be replaced by
scattered_embedding_column(
column_name,
size=bucket_size * dimension,
dimension=dimension,
hash_key=tf.contrib.layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
for the same number of embedding parameters. This should hopefully reduce the
impact of collisions, but adds the cost of slowing down training.
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.
Returns:
A _ScatteredEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0. "
"dimension: {}, size: {}, column_name: {}".format(
dimension, size, column_name))
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
"combiner: {}, column_name: {}".format(combiner,
column_name))
return _ScatteredEmbeddingColumn(column_name, size, dimension, hash_key,
combiner, initializer)
def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
"""Reshaping logic for dense, numeric `Tensors`.
Follows the following rules:
1. If `output_rank > input_rank + 1` raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand `input_tensor` by one
dimension and return
3. If `output_rank == input_rank`, return `input_tensor`.
4. If `output_rank < input_rank`, flatten the inner dimensions of
`input_tensor` and return a `Tensor` with `output_rank`
Args:
input_tensor: a dense `Tensor` to be reshaped.
output_rank: the desired rank of the reshaped `Tensor`.
column_name: (optional) the name of the associated column. Used for error
messages.
Returns:
A `Tensor` with the same entries as `input_tensor` and rank `output_rank`.
Raises:
ValueError: if `output_rank > input_rank + 1`.
"""
input_rank = input_tensor.get_shape().ndims
if input_rank is not None:
if output_rank > input_rank + 1:
error_string = ("Rank of input Tensor ({}) should be the same as "
"output_rank ({}). For example, sequence data should "
"typically be 3 dimensional (rank 3) while non-sequence "
"data is typically 2 dimensional (rank 2).".format(
input_rank, output_rank))
if column_name is not None:
error_string = ("Error while processing column {}.".format(column_name)
+ error_string)
raise ValueError(error_string)
if output_rank == input_rank + 1:
logging.warning(
"Rank of input Tensor ({}) should be the same as output_rank ({}) "
"for column. Will attempt to expand dims. It is highly recommended "
"that you resize your input, as this behavior may change.".format(
input_rank, output_rank))
return array_ops.expand_dims(input_tensor, -1, name="expand_dims")
if output_rank == input_rank:
return input_tensor
# Here, either `input_rank` is unknown or it is greater than `output_rank`.
return layers._inner_flatten(input_tensor, output_rank) # pylint: disable=protected-access
class _RealValuedColumn(_FeatureColumn, collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype", "normalizer"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. A real valued column with a specified
dimension means features are dense, otherwise they're sparse.
In the dense case, the dictionary returned by InputBuilder contains a
("column_name", Tensor) pair with a Tensor shape of (batch_size, dimension).
In the sparse shape, the dictionary contains a ("column_name", SparseTensor)
pair instead with shape inferred after parsing.
"""
def __new__(cls, column_name, dimension, default_value,
dtype, normalizer):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension,
default_value, dtype,
normalizer)
@property
def name(self):
return self.column_name
@property
def config(self):
if self.dimension is None:
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
else:
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {self.column_name: parsing_ops.FixedLenFeature([self.dimension],
self.dtype,
default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
return (self.normalizer(input_tensor) if self.normalizer is not None else
input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
input_tensor = self._to_dense_tensor(input_tensor)
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_py.SparseTensor):
default_value = (self.default_value[0] if self.default_value is not None
else 0)
return sparse_ops.sparse_tensor_to_dense(
input_tensor, default_value=default_value)
return input_tensor
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None):
"""Creates a `_RealValuedColumn` for dense numeric data.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column.
The default is 1. When dimension is not None, the Tensor representing
the _RealValuedColumn will have the shape of [batch_size, dimension].
A None dimension means the feature column should be treat as variable
length and will be parsed as a `SparseTensor`.
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. When dimension is not None, a default value of None
will cause tf.parse_example to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every dimension. If a list of values is provided,
the length of the list should be equal to the value of `dimension`.
Only scalar default value is supported in case dimension is not specified.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
variable length columns, the normalizer should expect an input_tensor of
type `SparseTensor`.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertible to tf.float32.
"""
if dimension is not None:
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if dtype.is_floating:
default_value = float(default_value)
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, list):
if dimension is None:
raise ValueError(
"Only scalar default value is supported when dimension is None. "
"default_value: {}, column_name: {}".format(
default_value, column_name))
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name))
class _BucketizedColumn(_FeatureColumn, collections.namedtuple(
"_BucketizedColumn", ["source_column", "boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted. [a, b, c] defines following buckets: (-inf., a), [a, b),
[b, c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError("source_column must be an instance of _RealValuedColumn. "
"source_column: {}".format(source_column))
if source_column.dimension is None:
raise ValueError("source_column must have a defined dimension. "
"source_column: {}".format(source_column))
if (not isinstance(boundaries, list) and
not isinstance(boundaries, tuple)) or not boundaries:
raise ValueError("boundaries must be a non-empty list or tuple. "
"boundaries: {}".format(boundaries))
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list. "
"boundaries: {}".format(boundaries))
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return "{}_bucketized".format(self.source_column.name)
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
# Bucketize the source column.
if self.source_column not in columns_to_tensors:
self.source_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = bucketization_op.bucketize(
columns_to_tensors[self.source_column],
boundaries=list(self.boundaries),
name="bucketize")
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,<|fim▁hole|> return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor),
self.length,
1.,
0.,
name="one_hot"), [-1, self.length * self.source_column.dimension],
name="reshape")
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor, name="shape")[0]
if dimension > 1:
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(
math_ops.range(0, batch_size), 1, name="expand_dims"),
[1, dimension],
name="tile"), [-1],
name="reshape")
i2 = array_ops.tile(
math_ops.range(0, dimension), [batch_size], name="tile")
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(
input_tensor, [-1], name="reshape") + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
sparse_id_values = sparse_tensor_py.SparseTensor(
indices, bucket_indices, shape)
return sparse_id_values
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.to_sparse_tensor(input_tensor),
weight_tensor=None,
vocab_size=self.length * self.source_column.dimension,
initializer=init_ops.zeros_initializer(),
combiner="sum")
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn for discretizing dense input.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(_FeatureColumn,
collections.namedtuple("_CrossedColumn",
["columns", "hash_bucket_size",
"hash_key",
"combiner", "ckpt_to_load_from",
"tensor_name_in_ckpt"])):
"""Represents a cross transformation also known as conjuction or combination.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn.
ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
@staticmethod
def _assert_is_crossable(column):
if isinstance(column, (_SparseColumn, _CrossedColumn, _BucketizedColumn)):
return
raise TypeError("columns must be a set of _SparseColumn, "
"_CrossedColumn, or _BucketizedColumn instances. "
"(column {} is a {})".format(column,
column.__class__.__name__))
def __new__(cls,
columns,
hash_bucket_size,
hash_key,
combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
for column in columns:
_CrossedColumn._assert_is_crossable(column)
if len(columns) < 2:
raise ValueError("columns must contain at least 2 elements. "
"columns: {}".format(columns))
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size must be at least 2. "
"hash_bucket_size: {}".format(hash_bucket_size))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
sorted_columns = sorted(
[column for column in columns], key=lambda column: column.name)
return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns),
hash_bucket_size, hash_key,
combiner,
ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
def insert_transformed_feature(self, columns_to_tensors):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(columns_to_tensors[c.name])
else:
if c not in columns_to_tensors:
c.insert_transformed_feature(columns_to_tensors)
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(columns_to_tensors[c]))
else:
feature_tensors.append(columns_to_tensors[c])
columns_to_tensors[self] = sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key,
name="cross")
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError("CrossedColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def crossed_column(columns, hash_bucket_size, combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
hash_key=None):
"""Creates a _CrossedColumn for performing feature crosses.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
return _CrossedColumn(
columns,
hash_bucket_size,
hash_key,
combiner=combiner,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt)
class DataFrameColumn(_FeatureColumn,
collections.namedtuple("DataFrameColumn",
["column_name", "series"])):
"""Represents a feature column produced from a `DataFrame`.
Instances of this class are immutable. A `DataFrame` column may be dense or
sparse, and may have any shape, with the constraint that dimension 0 is
batch_size.
Args:
column_name: a name for this column
series: a `Series` to be wrapped, which has already had its base features
substituted with `PredefinedSeries`.
"""
def __new__(cls, column_name, series):
return super(DataFrameColumn, cls).__new__(cls, column_name, series)
@property
def name(self):
return self.column_name
@property
def config(self):
return self.series.required_base_features()
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self.name
def insert_transformed_feature(self, columns_to_tensors):
# The cache must already contain mappings from the expected base feature
# names to Tensors.
# Passing columns_to_tensors as the cache here means that multiple outputs
# of the transform will be cached, keyed by the repr of their associated
# TransformedSeries.
# The specific requested output ends up in columns_to_tensors twice: once
# keyed by the TransformedSeries repr, and once keyed by this
# DataFrameColumn instance.
columns_to_tensors[self] = self.series.build(columns_to_tensors)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return self._to_dnn_input_layer(input_tensor)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column, (_SparseColumn, _WeightedSparseColumn,
_EmbeddingColumn, _RealValuedColumn,
_BucketizedColumn, _CrossedColumn,
_OneHotColumn, _ScatteredEmbeddingColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
feature_a = sparse_column_with_vocabulary_file(...)
feature_b = real_valued_column(...)
feature_c_bucketized = bucketized_column(real_valued_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=[feature_a, feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
batch_examples = tf.parse_example(
serialized=serialized_examples,
features=create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn, unless
feature_columns is a dict -- in which case, this should be true of all
values in the dict.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
if isinstance(feature_columns, dict):
feature_columns = feature_columns.values()
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def _create_sequence_feature_spec_for_parsing(sequence_feature_columns,
allow_missing_by_default=False):
"""Prepares a feature spec for parsing `tf.SequenceExample`s.
Args:
sequence_feature_columns: an iterable containing all the feature columns.
All items should be instances of classes derived from `_FeatureColumn`.
allow_missing_by_default: whether to set `allow_missing=True` by default for
`FixedLenSequenceFeature`s.
Returns:
A dict mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature`.
"""
feature_spec = create_feature_spec_for_parsing(sequence_feature_columns)
sequence_feature_spec = {}
for key, feature in feature_spec.items():
if isinstance(feature, parsing_ops.VarLenFeature):
sequence_feature = feature
elif isinstance(feature, parsing_ops.FixedLenFeature):
default_is_set = feature.default_value is not None
if default_is_set:
logging.warning(
'Found default value {} for feature "{}". Ignoring this value and '
'setting `allow_missing=True` instead.'.
format(feature.default_value, key))
sequence_feature = parsing_ops.FixedLenSequenceFeature(
shape=feature.shape,
dtype=feature.dtype,
allow_missing=(allow_missing_by_default or default_is_set))
else:
raise TypeError(
"Unsupported feature type: {}".format(type(feature).__name__))
sequence_feature_spec[key] = sequence_feature
return sequence_feature_spec
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype, name="Placeholder_{}".format(column_name))
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_{}".format(column_name))
return placeholders
class _SparseIdLookupConfig(
collections.namedtuple("_SparseIdLookupConfig",
["vocabulary_file", "keys", "num_oov_buckets",
"vocab_size", "default_value"])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys,
num_oov_buckets,
vocab_size, default_value)<|fim▁end|> | trainable=True,
output_rank=2):
if output_rank != 2:
raise ValueError("BucketizedColumn currently only supports output_rank=2") |
<|file_name|>Explorer.java<|end_file_name|><|fim▁begin|>/* */ package com.webbuilder.interact;
/* */
/* */ import com.webbuilder.controls.Query;
/* */ import com.webbuilder.utils.CompressUtil;
/* */ import com.webbuilder.utils.DateUtil;
/* */ import com.webbuilder.utils.DbUtil;
/* */ import com.webbuilder.utils.FileUtil;
/* */ import com.webbuilder.utils.StringUtil;
/* */ import com.webbuilder.utils.SysUtil;
/* */ import com.webbuilder.utils.WebUtil;
/* */ import com.webbuilder.utils.XMLParser;
/* */ import java.awt.Color;
/* */ import java.awt.Graphics;
/* */ import java.awt.image.BufferedImage;
/* */ import java.io.File;
/* */ import java.io.FileInputStream;
/* */ import java.io.InputStream;
/* */ import java.io.PrintWriter;
/* */ import java.sql.Connection;
/* */ import java.sql.PreparedStatement;
/* */ import java.sql.Timestamp;
/* */ import java.util.Calendar;
/* */ import java.util.Date;
/* */ import java.util.HashMap;
/* */ import java.util.HashSet;
/* */ import java.util.Iterator;
/* */ import javax.imageio.ImageIO;
/* */ import javax.servlet.http.HttpServletRequest;
/* */ import javax.servlet.http.HttpServletResponse;
/* */ import javax.servlet.http.HttpSession;
/* */ import javax.swing.Icon;
/* */ import javax.swing.filechooser.FileSystemView;
/* */ import org.dom4j.Attribute;
/* */ import org.dom4j.Document;
/* */ import org.dom4j.Element;
/* */ import org.json.JSONArray;
/* */ import org.json.JSONObject;
/* */
/* */ public class Explorer
/* */ {
/* */ public void getRcvFilter(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* 42 */ String find = StringUtil.fetchString(request, "findCombo");
/* */
/* 44 */ if (!StringUtil.isEmpty(find)) {
/* 45 */ request.setAttribute("findValue", "%" + find + "%");
/* 46 */ String sql = " and WB_NAME like {?findValue?}";
/* 47 */ request.setAttribute("whereSql", sql);
/* */ } else {
/* 49 */ DbUtil.getDefaultWhere(request, response, "WB_DATE,WB_CODE=b",
/* 50 */ false);
/* */ }
/* */ }
/* */
/* */ public void sendFile(HttpServletRequest request, HttpServletResponse response) throws Exception {
/* 55 */ Connection conn = DbUtil.fetchConnection(request, request.getAttribute(
/* 56 */ "sys.jndi").toString());
/* 57 */ String depts = request.getAttribute("WB_RDEPT").toString();
/* 58 */ String roles = request.getAttribute("WB_RROLE").toString();
/* 59 */ String users = request.getAttribute("WB_RUSER").toString();
/* 60 */ String scope = request.getAttribute("sys.scope").toString();
/* 61 */ String dbType = request.getAttribute("sys.dbType").toString();
/* 62 */ HashSet userList = new HashSet();
/* */
/* 64 */ userList = DbUtil.getUserList(conn, dbType, scope, depts, roles, users);
/* 65 */ conn.setAutoCommit(false);
/* */ try {
/* 67 */ PreparedStatement stm = null;
/* 68 */ int k = 0; int l = userList.size();
/* 69 */ boolean commitAll = false; boolean added = false;
/* 70 */ stm = conn
/* 71 */ .prepareStatement("insert into WB_FILERECEIVE values(?,?,?,?,null)");
/* */ try {
/* 73 */ stm.setString(1, scope);
/* 74 */ stm.setTimestamp(2,
/* 75 */ new Timestamp(DateUtil.stringToStdDate(
/* 75 */ request.getAttribute("sys.now").toString()).getTime()));
/* 76 */ stm.setString(3, request.getAttribute("sys.code").toString());
/* 77 */ while ( userList.iterator().hasNext()) {
String s=userList.iterator().next().toString();
/* 78 */ k++;
/* 79 */ stm.setString(4, s);
/* 80 */ stm.addBatch();
/* 81 */ if (!added)
/* 82 */ added = true;
/* 83 */ if (k % 1000 == 0) {
/* 84 */ if (k == l)
/* 85 */ commitAll = true;
/* 86 */ stm.executeBatch();
/* */ }
/* */ }
/* 89 */ if ((added) && (!commitAll))
/* 90 */ stm.executeBatch();
/* */ } finally {
/* 92 */ DbUtil.closeStatement(stm);
/* */ }
/* 94 */ conn.commit();
/* */ } catch (Exception e) {
/* 96 */ conn.rollback();
/* 97 */ throw new Exception(e);
/* */ } finally {
/* 99 */ conn.setAutoCommit(true);
/* */ }
/* */ }
/* */
/* */ private String createUserDir(String root) throws Exception {
/* 104 */ Date dt = new Date();
/* 105 */ String y = "y" + Integer.toString(DateUtil.yearOf(dt));
/* 106 */ String d = "d" + Integer.toString(DateUtil.dayOfYear(dt));
/* 107 */ String h = "h" + Integer.toString(DateUtil.hourOfDay(dt));
/* 108 */ Calendar cal = Calendar.getInstance();
/* 109 */ cal.setTime(dt);
/* 110 */ int m = cal.get(12);
/* 111 */ h = h + "m" + Integer.toString(m / 10);
/* 112 */ String rel = y + "/" + d + "/" + h + "/";
/* 113 */ File file = FileUtil.getUniqueFile(new File(root + "/" + rel + "s" +
/* 114 */ DateUtil.formatDate(dt, "ssSSS")));
/* 115 */ rel = rel + file.getName();
/* 116 */ File dir = new File(root + "/" + rel);
/* 117 */ if (!dir.mkdirs())
/* 118 */ throw new Exception("不能创建目录。");
/* 119 */ return rel;
/* */ }
/* */
/* */ public void createPubDir(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 124 */ String root = request.getAttribute("sys.path").toString() +
/* 125 */ "WEB-INF/myfile";
/* 126 */ String scope = request.getAttribute("sys.scope").toString();
/* */
/* 128 */ String sysPubDir = FileUtil.fetchPubDir(root, scope);
/* 129 */ request.setAttribute("sysPubDir", sysPubDir);
/* */ }
/* */
/* */ public void createUserDir(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 134 */ String userPath = request.getAttribute("sys.rootpath").toString();
/* */
/* 136 */ if (StringUtil.isEmpty(userPath)) {
/* 137 */ String root = request.getAttribute("sys.path").toString() +
/* 138 */ "WEB-INF/myfile";
/* 139 */ String path = createUserDir(root);
/* 140 */ Query query = new Query();
/* 141 */ query.setRequest(request);
/* 142 */ query.type = "update";
/* 143 */ request.setAttribute("rootPath", path);
/* 144 */ query.sql = "update WB_USER set ROOT_PATH={?rootPath?} where USERNAME={?sys.user?}";
/* 145 */ query.jndi = StringUtil.fetchString(request, "sys.jndi");
/* 146 */ query.setName("query.updateUser");
/* 147 */ query.create();
/* 148 */ request.getSession(false).setAttribute("sys.rootpath",
/* 149 */ root + "/" + path);
/* 150 */ request.setAttribute("sys.rootpath", root + "/" + path);
/* */ } else {
/* 152 */ File dir = new File(userPath);
/* 153 */ if ((!dir.exists()) && (!dir.mkdirs()))
/* 154 */ throw new Exception("不能创建用户目录。");
/* */ }
/* */ }
/* */
/* */ public void setOrder(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 160 */ JSONArray files = new JSONArray(request.getParameter("orderTree"));
/* 161 */ int j = files.length();
/* 162 */ if (j == 0)
/* 163 */ return;
/* 164 */ File dir = new File(request.getParameter("orderDir"));
/* */
/* 168 */ HashMap hashMap = new HashMap();
/* */
/* 170 */ XMLParser mapXml = new XMLParser(FileUtil.getUserIndex(dir, request.getAttribute(
/* 171 */ "sys.scope").toString(), false));
/* 172 */ Element root = mapXml.document.getRootElement();
/* 173 */ Iterator iterator = root.elementIterator();
/* 174 */ while (iterator.hasNext()) {
/* 175 */ Element el = (Element)iterator.next();
/* 176 */ hashMap.put(el.attribute("name").getText(), el);
/* */ }
/* 178 */ for (int i = 0; i < j; i++) {
/* 179 */ String name = new JSONObject(files.getString(i)).getString("filename");
/* 180 */ Element el = (Element)hashMap.get(name);
/* 181 */ if (el != null) {
/* 182 */ root.add(el.createCopy());
/* 183 */ root.remove(el);
/* */ }
/* */ }
/* 186 */ mapXml.save();
/* */ }
/* */
/* */ public void getOrder(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* 195 */ StringBuilder buf = new StringBuilder();
/* 196 */ boolean added = false;
/* */
/* 198 */ buf.append("[");
/* 199 */ File file = new File(request.getParameter("dir"));
/* 200 */ File mapFile = FileUtil.getUserIndex(file, request.getAttribute("sys.scope")
/* 201 */ .toString(), false);
/* 202 */ if (mapFile.exists()) {
/* 203 */ XMLParser mapXml = new XMLParser(mapFile);
/* 204 */ Element el = mapXml.document.getRootElement();
/* 205 */ if (el != null) {
/* 206 */ Iterator iterator = el.elementIterator();
/* 207 */ while (iterator.hasNext()) {
/* 208 */ el = (Element)iterator.next();
/* 209 */ if (added)
/* 210 */ buf.append(",");
/* */ else
/* 212 */ added = true;
/* 213 */ buf.append("{text:\"");
/* 214 */ String text = StringUtil.replaceParameters(request, el.attribute(
/* 215 */ "caption").getValue());
/* 216 */ String name = el.attribute("name").getValue();
/* 217 */ if (StringUtil.isEmpty(text))
/* 218 */ text = name;
/* 219 */ buf.append(StringUtil.toExpress(text));
/* 220 */ text = el.attribute("icon").getValue();
/* 221 */ if (!StringUtil.isEmpty(text)) {
/* 222 */ buf.append("\",iconCls:\"");
/* 223 */ buf.append(text);
/* */ }
/* 225 */ buf.append("\",filename:\"");
/* 226 */ buf.append(name);
/* 227 */ buf.append("\",leaf:true}");
/* */ }
/* */ }
/* */ }
/* 231 */ buf.append("]");
/* 232 */ response.getWriter().print(buf);
/* */ }
/* */
/* */ public void getProperty(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* 243 */ File file = new File(request.getParameter("fileName"));
/* 244 */ File mapFile = FileUtil.getUserIndex(file.getParentFile(), request
/* 245 */ .getAttribute("sys.scope").toString(), false);
/* 246 */ if (mapFile.exists()) {
/* 247 */ String fileName = file.getName();
/* 248 */ XMLParser mapXml = new XMLParser(mapFile);
/* 249 */ Element el = mapXml.document.getRootElement();
/* 250 */ if (el != null) {
/* 251 */ Iterator iterator = el.elementIterator();
/* 252 */ while (iterator.hasNext()) {
/* 253 */ el = (Element)iterator.next();
/* 254 */ if (!StringUtil.isSame(el.attribute("name").getText(),
/* 255 */ fileName)) continue;
/* 256 */ StringBuilder buf = new StringBuilder();
/* 257 */ buf.append("{fileCaption:\"");
/* 258 */ Attribute attr = el.attribute("caption");
/* 259 */ if (attr != null)
/* 260 */ buf.append(StringUtil.toExpress(attr.getText()));
/* 261 */ buf.append("\",fileRole:\"");
/* 262 */ attr = el.attribute("role");
/* 263 */ if (attr != null)
/* 264 */ buf.append(StringUtil.toExpress(attr.getText()));
/* 265 */ buf.append("\",fileIcon:\"");
/* 266 */ attr = el.attribute("icon");
/* 267 */ if (attr != null)
/* 268 */ buf.append(attr.getText());
/* 269 */ buf.append("\",fileHint:\"");
/* 270 */ attr = el.attribute("hint");
/* 271 */ if (attr != null)
/* 272 */ buf.append(attr.getText());
/* 273 */ buf.append("\",fileHidden:\"");
/* 274 */ attr = el.attribute("hidden");
/* 275 */ if (attr != null)
/* 276 */ buf.append(StringUtil.toExpress(attr.getText()));
/* */ else
/* 278 */ buf.append("0");
/* 279 */ buf.append("\"}");
/* 280 */ response.getWriter().print(buf);
/* 281 */ return;
/* */ }
/* */ }
/* */ }
/* */ }
/* */
/* */ public void setPropertyCopy(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* 290 */ innerSetProperty(request, response, true);
/* */ }
/* */
/* */ public void setProperty(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 295 */ innerSetProperty(request, response, false);
/* */ }
/* */
/* */ private void innerSetProperty(HttpServletRequest request, HttpServletResponse response, boolean createCopy)
/* */ throws Exception
/* */ {
/* 304 */ String caption = request.getParameter("fileCaption");
/* 305 */ String role = request.getParameter("fileRole");
/* 306 */ String icon = request.getParameter("fileIcon");
/* 307 */ String hint = request.getParameter("fileHint");
/* 308 */ String hidden = request.getParameter("fileHidden");
/* 309 */ JSONArray files = new JSONArray(request.getParameter("setFile"));
/* 310 */ HashMap map = new HashMap();
/* */
/* 313 */ File file = new File(files.getString(0));
/* 314 */ File dir = file.getParentFile();
/* 315 */ XMLParser mapXml = new XMLParser(FileUtil.getUserIndex(dir, request.getAttribute(
/* 316 */ "sys.scope").toString(), createCopy));
/* 317 */ Element root = mapXml.document.getRootElement();
/* 318 */ if (root != null) {
/* 319 */ Iterator iterator = root.elementIterator();
/* 320 */ while (iterator.hasNext()) {
/* 321 */ Element el = (Element)iterator.next();
/* 322 */ String name = el.attribute("name").getText();
/* 323 */ file = new File(dir, name);
/* 324 */ if ((!file.exists()) || (map.containsKey(name)))
/* 325 */ root.remove(el);
/* */ else
/* 327 */ map.put(name, el);
/* */ }
/* */ } else {
/* 330 */ root = mapXml.document.addElement("map");
/* 331 */ }int j = files.length();
/* 332 */ for (int i = 0; i < j; i++) {
/* 333 */ String name = FileUtil.extractFileName(files.getString(i));
/* 334 */ Element el = (Element)map.get(name);
/* 335 */ if (el == null) {
/* 336 */ el = root.addElement("file");
/* 337 */ el.addAttribute("name", name);
/* 338 */ el.addAttribute("caption", caption);
/* 339 */ el.addAttribute("role", role);
/* 340 */ el.addAttribute("icon", icon);
/* 341 */ el.addAttribute("hint", hint);
/* 342 */ el.addAttribute("hidden", hidden);
/* */ } else {
/* 344 */ el.attribute("name").setText(name);
/* 345 */ el.attribute("caption").setText(caption);
/* 346 */ el.attribute("role").setText(role);
/* 347 */ el.attribute("icon").setText(icon);
/* 348 */ el.attribute("hint").setText(hint);
/* 349 */ el.attribute("hidden").setText(hidden);
/* */ }
/* */ }
/* 352 */ mapXml.save();
/* */ }
/* */
/* */ public void importFile(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 357 */ String importDir = request.getAttribute("importDir").toString();
/* 358 */ FileUtil.checkRight(request, new File(importDir));
/* 359 */ InputStream stream = (InputStream)request.getAttribute("importFile");
/* 360 */ String fn = request.getAttribute("importFile__file").toString();
/* */
/* 362 */ if (StringUtil.isEqual(request.getAttribute("importType").toString(),
/* 363 */ "1")) {
/* 364 */ if (StringUtil.isSame(FileUtil.extractFileExt(fn), "zip"))
/* 365 */ CompressUtil.unzip(stream, new File(importDir),
/* 366 */ (String)request.getAttribute("sys.fileCharset"));
/* */ else
/* 368 */ throw new Exception("请选择一个zip格式的压缩文件。");
/* */ }
/* 370 */ else FileUtil.saveInputStreamToFile(stream, new File(importDir, fn));
/* */ }
/* */
/* */ public void exportFile(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* 375 */ String[] list = StringUtil.split(request.getParameter("exportFiles"),
/* 376 */ "|");
/* 378 */ int i = 0; int j = list.length;
/* */
/* 380 */ File[] files = new File[j];
/* */
/* 382 */ for (i = 0; i < j; i++) {
/* 383 */ WebUtil.recordLog(request, "explorer导出:" + list[i], 1);
/* 384 */ files[i] = new File(list[i]);
/* 385 */ FileUtil.checkRight(request, files[i]);
/* */ }
/* */ String fileName;
/* 387 */ if (j == 1) {
/* 388 */ fileName = FileUtil.extractFileNameNoExt(files[0].getName());
/* */ } else {
/* 390 */ File parentFile = files[0].getParentFile();
/* 391 */ fileName = "";
/* 392 */ if (parentFile != null)
/* 393 */ fileName = FileUtil.extractFileNameNoExt(parentFile.getName());
/* 394 */ if (StringUtil.isEmpty(fileName))
/* 395 */ fileName = "data";
/* */ }
/* 397 */ boolean useZip = (StringUtil.isEqual(request.getParameter("exportType"), "1")) ||
/* 398 */ (j > 1) || (files[0].isDirectory());
/* 399 */ response.reset();
/* 400 */ if (!useZip) {
/* 401 */ response.setHeader("content-length", Long.toString(files[0]
/* 402 */ .length()));
/* 403 */ fileName = files[0].getName();
/* */ } else {
/* 405 */ fileName = fileName + ".zip";
/* 406 */ }response.setHeader("content-type", "application/force-download");
/* 407 */ String charset = (String)request.getAttribute("sys.fileCharset");
/* 408 */ response.setHeader("content-disposition", "attachment;filename=" +
/* 409 */ WebUtil.getFileName(fileName, charset));
/* 410 */ if (useZip) {
/* 411 */ CompressUtil.zip(files, response.getOutputStream(),
/* 412 */ (String)request.getAttribute("sys.fileCharset"));
/* */ } else {
/* 414 */ FileInputStream inputStream = new FileInputStream(files[0]);
/* 415 */ SysUtil.inputStreamToOutputStream(inputStream, response
/* 416 */ .getOutputStream());
/* 417 */ inputStream.close();
/* */ }
/* */ }
/* */
/* */ public void exportFile2(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* */ String root = request.getAttribute("sys.path").toString() +
"WEB-INF/myfile.doc";
/* 378 */ int i = 0; int j = 1;
/* */
/* 380 */ File[] files = new File[j];
/* */
/* 382 */
/* 383 */ WebUtil.recordLog(request, "explorer导出:" + root, 1);
/* 384 */ files[i] = new File(root);
/* 385 */ FileUtil.checkRight(request, files[i]);
/* */
/* */ String fileName;
/* 387 */ if (j == 1) {
/* 388 */ fileName = FileUtil.extractFileNameNoExt(files[0].getName());
/* */ } else {
/* 390 */ File parentFile = files[0].getParentFile();
/* 391 */ fileName = "";
/* 392 */ if (parentFile != null)
/* 393 */ fileName = FileUtil.extractFileNameNoExt(parentFile.getName());
/* 394 */ if (StringUtil.isEmpty(fileName))
/* 395 */ fileName = "data";
/* */ }
/* 397 */ boolean useZip = (StringUtil.isEqual(request.getParameter("exportType"), "1")) ||
/* 398 */ (j > 1) || (files[0].isDirectory());
/* 399 */ response.reset();
/* 400 */ if (!useZip) {
/* 401 */ response.setHeader("content-length", Long.toString(files[0]
/* 402 */ .length()));
/* 403 */ fileName = files[0].getName();
/* */ } else {
/* 405 */ fileName = fileName + ".zip";
/* 406 */ }response.setHeader("content-type", "application/force-download");
/* 407 */ String charset = (String)request.getAttribute("sys.fileCharset");
/* 408 */ response.setHeader("content-disposition", "attachment;filename=" +
/* 409 */ WebUtil.getFileName(fileName, charset));
/* 410 */ if (useZip) {
/* 411 */ CompressUtil.zip(files, response.getOutputStream(),
/* 412 */ (String)request.getAttribute("sys.fileCharset"));
/* */ } else {
/* 414 */ FileInputStream inputStream = new FileInputStream(files[0]);
/* 415 */ SysUtil.inputStreamToOutputStream(inputStream, response
/* 416 */ .getOutputStream());
/* 417 */ inputStream.close();
/* */ }
/* */ }
/* */
/* */ public void execute(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 423 */ String fileName = request.getParameter("file");
/* */ try {
/* 425 */ Runtime.getRuntime().exec(fileName);
/* */ } catch (Exception e) {
/* 427 */ throw new Exception("执行 \"" + FileUtil.extractFileName(fileName) +
/* 428 */ "\"错误。");
/* */ }
/* */ }
/* */
/* */ public void openFile(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 434 */ FileUtil.checkRight(request, new File(request.getParameter("file")));
/* 435 */ String charset = request.getParameter("charset");
/* */
/* 437 */ if (StringUtil.isEmpty(charset))
/* 438 */ charset = (String)request.getAttribute("sys.charset");
/* 439 */ response.getWriter().print(
/* 440 */ FileUtil.readText(request.getParameter("file"), charset));
/* */ }
/* */
/* */ public void saveFile(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 445 */ FileUtil.checkRight(request, new File(request.getParameter("file")));
/* 446 */ String charset = request.getParameter("charset");
/* 447 */ if (StringUtil.isEmpty(charset))
/* 448 */ charset = (String)request.getAttribute("sys.charset");
/* 449 */ FileUtil.writeText(request.getParameter("file"), request
/* 450 */ .getParameter("text"), charset);
/* */ }
/* */
/* */ public void deleteFiles(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* 457 */ JSONArray files = new JSONArray(request.getParameter("files"));
/* 458 */ int j = files.length();
/* */
/* 460 */ for (int i = 0; i < j; i++) {
/* 461 */ String fileName = files.getString(i);
/* 462 */ File file = new File(fileName);
/* 463 */ FileUtil.checkRight(request, file);
/* 464 */ WebUtil.recordLog(request, "explorer删除:" + fileName, 1);
/* 465 */ if (file.isDirectory())
/* 466 */ FileUtil.deleteFolder(file);
/* 467 */ else if (!file.delete())
/* 468 */ throw new Exception("不能删除文件 \"" + file.getName() + "\"。");
/* */ }
/* */ }
/* */
/* */ public void pasteFiles(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 474 */ String filesParam = request.getParameter("files");
/* 475 */ String dir = request.getParameter("dir") + "/";
/* 476 */ File destFile = new File(dir);
/* 477 */ JSONArray files = new JSONArray(filesParam);
/* 478 */ boolean isCut = StringUtil.getStringBool(request.getParameter("isCut"));
/* 479 */ int j = files.length();
/* */
/* 481 */ for (int i = 0; i < j; i++) {
/* 482 */ File file = new File(files.getString(i));
/* 483 */ File dest = new File(dir + file.getName());
/* 484 */ FileUtil.checkRight(request, file);
/* 485 */ FileUtil.checkRight(request, dest);
/* 486 */ WebUtil.recordLog(request, "explorer贴粘:" + (isCut ? "剪切" : "复制") +
/* 487 */ "," + files.getString(i) + "至" + dir, 1);
/* 488 */ if (file.isDirectory()) {
/* 489 */ if (FileUtil.isSubFolder(file, destFile))
/* 490 */ throw new Exception("不能复制相同的文件夹。");
/* 491 */ FileUtil.copyFolder(file, dest, true, isCut);
/* */ } else {
/* 493 */ FileUtil.copyFile(file, dest, true, isCut);
/* */ }
/* */ }
/* */ }
/* */
/* */ public void rename(HttpServletRequest request, HttpServletResponse response) throws Exception {
/* 499 */ String fileName = request.getParameter("fileName");
/* 500 */ String rename = request.getParameter("fileValue");
/* 501 */ File file = new File(fileName);
/* 502 */ FileUtil.checkRight(request, file);
/* 503 */ if ((rename.indexOf("/") > -1) ||
/* 504 */ (rename.indexOf("\\") > -1) ||
/* 505 */ (!file.renameTo(
/* 506 */ new File(FileUtil.extractFilePath(fileName) +
/* 506 */ rename))))
/* 507 */ throw new Exception("重命名失败。");
/* */ }
/* */
/* */ public void newFile(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 512 */ String name = request.getParameter("fileValue");
/* 513 */ String fileName = request.getParameter("dir") + "/" + name;
/* 514 */ String type = request.getParameter("type");
/* */
/* 517 */ File file = new File(fileName);
/* 518 */ FileUtil.checkRight(request, file);
/* */ boolean flag;
/* */
/* 519 */ if (type.equals("dir"))
/* 520 */ flag = file.mkdir();
/* */ else
/* 522 */ flag = file.createNewFile();
/* 523 */ if (!flag)
/* 524 */ throw new Exception("不能创建\"" + name + "\"");
/* */ }
/* */
/* */ public void getPubDir(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 529 */ String dir = request.getParameter("dir");
/* 530 */ StringBuilder buf = new StringBuilder();
/* 531 */ String root = request.getAttribute("sys.path").toString() +
/* 532 */ "WEB-INF/myfile";
/* 533 */ String scope = request.getAttribute("sys.scope").toString();
/* */
/* 535 */ if (StringUtil.isEmpty(dir)) {
/* 536 */ loadPubDir(FileUtil.fetchPubDir(root, scope), buf);
/* */ } else {
/* 538 */ File fl = new File(dir);
/* 539 */ FileUtil.checkRight(request, fl);
/* 540 */ File[] files = fl.listFiles();
/* 541 */ FileUtil.sortFiles(files);
/* 542 */ loadFilesBuf(files, buf);
/* */ }
/* 544 */ response.getWriter().print(buf);
/* */ }
/* */
/* */ public void getUserDir(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 549 */ String dir = request.getParameter("dir");
/* 550 */ StringBuilder buf = new StringBuilder();
/* */
/* 552 */ if (StringUtil.isEmpty(dir)) {
/* 553 */ loadUserDir((String)request.getAttribute("sys.rootpath"), buf);
/* */ } else {
/* 555 */ File fl = new File(dir);
/* 556 */ FileUtil.checkRight(request, fl);
/* 557 */ File[] files = fl.listFiles();
/* 558 */ FileUtil.sortFiles(files);
/* 559 */ loadFilesBuf(files, buf);
/* */ }
/* 561 */ response.getWriter().print(buf);
/* */ }
/* */
/* */ public void getDir(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 566 */ String dir = request.getParameter("dir");
/* 567 */ boolean appRoot = StringUtil.getStringBool(request
/* 568 */ .getParameter("setAppRoot"));
/* 569 */ StringBuilder buf = new StringBuilder();
/* */
/* 571 */ if (StringUtil.isEmpty(dir)) {
/* 572 */ if ((appRoot) || (!loadFilesBuf(File.listRoots(), buf))) {
/* 573 */ buf = new StringBuilder();
/* 574 */ loadAppDir((String)request.getAttribute("sys.path"), buf);
/* */ }
/* */ } else {
/* 577 */ File[] files = new File(dir).listFiles();
/* 578 */ FileUtil.sortFiles(files);
/* 579 */ loadFilesBuf(files, buf);
/* */ }
/* 581 */ response.getWriter().print(buf);
/* */ }
/* */
/* */ public void getFile(HttpServletRequest request, HttpServletResponse response) throws Exception
/* */ {
/* 586 */ String dir = request.getParameter("dir");
/* 587 */ File dirFile = new File(dir);
/* 588 */ FileUtil.checkRight(request, dirFile);
/* 589 */ File[] files = dirFile.listFiles();
/* 590 */ if (files == null) {
/* 591 */ response.getWriter().print("{total:0,row:[]}");
/* 592 */ return;
/* */ }
/* 594 */ FileUtil.sortFiles(files);
/* 595 */ StringBuilder buf = new StringBuilder();
/* 596 */ FileSystemView fileView = FileSystemView.getFileSystemView();
/* 597 */ boolean isFirst = true;
/* 598 */ String start = request.getParameter("start");
/* 599 */ String limit = request.getParameter("limit");
/* 600 */ int count = 0;
/* */ int startValue;
/* */
/* 602 */ if (start == null)
/* 603 */ startValue = 1;
/* */ else
/* 605 */ startValue = Integer.parseInt(start) + 1;
/* */ int limitValue;
/* */
/* 606 */ if (limit == null)
/* 607 */ limitValue = 2147483647 - startValue;
/* */ else
/* 609 */ limitValue = Integer.parseInt(limit);
/* 610 */ int end = startValue + limitValue - 1;
/* 611 */ buf.append("{total:");
/* 612 */ buf.append(files.length);
/* 613 */ buf.append(",row:[");
/* 614 */ for (File file : files) {
/* 615 */ count++;
/* 616 */ if (count < startValue)
/* */ continue;
/* 618 */ if (count > end)
/* */ break;
/* 620 */ if (isFirst)
/* 621 */ isFirst = false;
/* */ else
/* 623 */ buf.append(",");
/* 624 */ boolean isDir = file.isDirectory();
/* 625 */ buf.append("{filename:\"");
/* 626 */ if (isDir)
/* 627 */ buf.append("0");
/* */ else
/* 629 */ buf.append("1");
/* 630 */ String fileName = file.getName();
/* 631 */ buf.append(fileName);
/* 632 */ buf.append("\",size:");
/* 633 */ if (isDir)
/* 634 */ buf.append(-1);
/* */ else
/* 636 */ buf.append(file.length());
/* 637 */ buf.append(",file:\"");
/* 638 */ buf.append(StringUtil.replace(file.getAbsolutePath(), "\\", "/"));
/* 639 */ buf.append("\",type:\"");
/* 640 */ if (isDir) {
/* 641 */ buf.append("0文件夹"); } else {
/* */ String type;
/* */ try { type = fileView.getSystemTypeDescription(file);
/* */ }
/* */ catch (Exception e)
/* */ {
/* */
/* 646 */ type = null;
/* */ }
/* 648 */ if (type != null) {
/* 649 */ buf.append("1" + StringUtil.toExpress(type));
/* */ } else {
/* 651 */ String ext = FileUtil.extractFileExt(fileName);
/* 652 */ if (StringUtil.isEmpty(ext)) {
/* 653 */ buf.append("1文件");
/* */ } else {
/* 655 */ buf.append("1" + ext);
/* 656 */ buf.append(" 文件");
/* */ }
/* */ }
/* */ }
/* 660 */ buf.append("\",modifyTime:\"");
/* 661 */ buf.append(DateUtil.dateToString(new Date(file.lastModified())));
/* 662 */ buf.append("\"}");
/* */ }
/* 664 */ buf.append("]}");
/* 665 */ response.getWriter().print(buf);
/* */ }
/* */
/* */ public void getIcon(HttpServletRequest request, HttpServletResponse response)
/* */ throws Exception
/* */ {
/* 671 */ File file = new File(new String(request.getParameter("file")
/* 671 */ .getBytes("ISO-8859-1"), "utf-8")); FileSystemView fileView = FileSystemView.getFileSystemView(); Icon icon = fileView.getSystemIcon(file); response.reset(); if (icon == null) { response.setContentType("image/gif"); InputStream is = new FileInputStream(request.getAttribute("sys.path") + "webbuilder/images/file.gif"); SysUtil.inputStreamToOutputStream(is, response.getOutputStream()); is.close(); } else { response.setContentType("image/jpeg"); int width = icon.getIconWidth(); int height = icon.getIconHeight(); BufferedImage image = new BufferedImage(width, height, 1); Graphics graphics = image.getGraphics(); graphics.setColor(Color.white); graphics.fillRect(0, 0, width, height); icon.paintIcon(null, graphics, 0, 0); ImageIO.write(image, "jpeg", response.getOutputStream()); graphics.dispose();
/* */ }
/* */ }
/* */
/* */ private void loadPubDir(String pubDir, StringBuilder buf)
/* */ {
/* 697 */ buf.append("[");
/* 698 */ buf.append("{text:\"公共文件\",dir:\"");
/* 699 */ buf.append(StringUtil.toExpress(pubDir));
/* 700 */ buf.append("\"}");
/* 701 */ buf.append("]");
/* */ }
/* */
/* */ private void loadUserDir(String userDir, StringBuilder buf) {
/* 705 */ buf.append("[");
/* 706 */ buf.append("{text:\"我的文件\",dir:\"");
/* 707 */ buf.append(StringUtil.toExpress(userDir));
/* 708 */ buf.append("\"}");
/* 709 */ buf.append("]");
/* */ }
/* */
/* */ private void loadAppDir(String appDir, StringBuilder buf) {
/* 713 */ String s = FileUtil.extractFileDir(appDir);
/* 714 */ buf.append("[");
/* 715 */ buf.append("{text:\"");
/* 716 */ buf.append(StringUtil.toExpress(s));
/* 717 */ buf.append("\",dir:\"");
/* 718 */ buf.append(StringUtil.toExpress(s));
/* 719 */ buf.append("\"}");
/* 720 */ buf.append("]");
/* */ }
/* */
/* */ private boolean loadFilesBuf(File[] files, StringBuilder buf) {
/* 724 */ boolean isOk = false; boolean isFirst = true;
/* */
/* 727 */ buf.append("[");
/* 728 */ for (File file : files) {
/* 729 */ if (file.isDirectory()) {
/* 730 */ isOk = true;
/* 731 */ if (isFirst)
/* 732 */ isFirst = false;
/* */ else
/* 734 */ buf.append(",");
/* 735 */ buf.append("{text:\"");
/* 736 */ String name = file.getName();
<|fim▁hole|>/* 739 */ name = FileUtil.extractFileDir(dir);
/* 740 */ buf.append(StringUtil.toExpress(name));
/* 741 */ buf.append("\",dir:\"");
/* 742 */ buf.append(StringUtil.replace(dir, "\\", "/"));
/* 743 */ if (FileUtil.hasSubFile(file, true))
/* 744 */ buf.append("\"}");
/* */ else
/* 746 */ buf.append("\",leaf:true,iconCls:\"icon_folder\"}");
/* */ }
/* */ }
/* 749 */ buf.append("]");
/* 750 */ return isOk;
/* */ }
/* */ }
/* Location: Z:\EXT\WebBuilderServer (1)\WEB-INF\lib\webbuilder2.jar
* Qualified Name: com.webbuilder.interact.Explorer
* JD-Core Version: 0.6.0
*/<|fim▁end|> | /* 737 */ String dir = StringUtil.replace(file.getAbsolutePath(), "\\", "/");
/* 738 */ if (StringUtil.isEmpty(name))
|
<|file_name|>cargo_rerast_tests.rs<|end_file_name|><|fim▁begin|>use assert_cmd::prelude::*;
use predicates::prelude::*;
use std::process::Command;
fn cargo_rerast(crate_root: &str) -> Command {
// We can't use Assert.current_dir, because then Assert::cargo_binary doesn't work, instead we
// pass the crate root as an argument and get our binary to change directories once it's
// running.<|fim▁hole|>}
#[test]
fn test_help() {
cargo_rerast(".")
.arg("--help")
.assert()
.success()
.stdout(predicate::str::contains("cargo rerast"));
}
#[test]
fn test_simple_diff() {
cargo_rerast("tests/crates/simple")
// TODO: Remove once #10 is fixed.
.env("RERAST_FULL_CARGO_CLEAN", "1")
.arg("-p")
.arg("p0: i32, p1: i32")
.arg("-s")
.arg("p0 > p1")
.arg("-r")
.arg("p1 < p0")
.arg("--diff")
.arg("--color=never")
.assert()
.stdout(predicate::eq(
r#"--- src/lib.rs
+++ src/lib.rs
@@ -8,7 +8,7 @@
mod tests2 {
#[test]
fn x() {
- if 1 > 42 {
+ if 42 < 1 {
assert!(false);
}
}
@@ -16,7 +16,7 @@
/// A well documented function.
pub fn foo(a: i32, b: i32) -> i32 {
- if a > b {
+ if b < a {
42
} else {
b
@@ -26,7 +26,7 @@
#[cfg(test)]
mod tests {
fn bar(a: i32, b: i32) -> i32 {
- if a > b {
+ if b < a {
42
} else {
b
"#,
));
}
#[test]
fn test_invalid_cargo_toml() {
cargo_rerast("tests/crates/invalid_cargo_toml")
.args(&["-s", "file!()", "-r", "\"foo\""])
.args(&["--diff", "--color=never"])
.assert()
.failure()
.stderr(
predicate::str::contains("cargo metadata failed")
.and(predicate::str::contains("could not parse input as TOML")),
);
}
#[test]
fn test_compilation_error() {
cargo_rerast("tests/crates/compilation_error")
.args(&["-s", "file!()", "-r", "\"foo\""])
.args(&["--diff", "--color=never"])
.assert()
.failure()
.stderr(predicate::str::contains("this is not an i32"));
}<|fim▁end|> | let mut cmd = Command::cargo_bin("cargo-rerast").unwrap();
cmd.arg("rerast").arg("--crate_root").arg(crate_root);
cmd |
<|file_name|>howto-logging.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import time,os,re,csv,sys,uuid,joblib
from datetime import date
import numpy as np
from sklearn import svm
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
def train_model(X,y,saved_model):
"""
function to train model
"""
## Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
## Specify parameters and model
params = {'C':1.0,'kernel':'linear','gamma':0.5}
clf = svm.SVC(**params,probability=True)
## fit model on training data
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(classification_report(y_test,y_pred))
## retrain using all data
clf.fit(X, y)
print("... saving model: {}".format(saved_model))
joblib.dump(clf,saved_model)
print(y_test[:5])
print(X_test[:5,:])
def _update_predict_log(y_pred,y_proba,query,runtime):
"""
update predict log file
"""
## name the logfile using something that cycles with date (day, month, year)
today = date.today()
logfile = "example-predict-{}-{}.log".format(today.year, today.month)
## write the data to a csv file
header = ['unique_id','timestamp','y_pred','y_proba','x_shape','model_version','runtime']
write_header = False
if not os.path.exists(logfile):
write_header = True
with open(logfile,'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|')
if write_header:
writer.writerow(header)
to_write = map(str,[uuid.uuid4(),time.time(),y_pred,y_proba,query.shape,MODEL_VERSION,runtime])
writer.writerow(to_write)
def predict(query):
"""
generic function for prediction
"""
## start timer for runtime
time_start = time.time()
## ensure the model is loaded
model = joblib.load(saved_model)
## output checking
if len(query.shape) == 1:
query = query.reshape(1, -1)
## make prediction and gather data for log entry
y_pred = model.predict(query)
y_proba = None
if 'predict_proba' in dir(model) and model.probability == True:
y_proba = model.predict_proba(query)
m, s = divmod(time.time()-time_start, 60)
h, m = divmod(m, 60)
runtime = "%03d:%02d:%02d"%(h, m, s)
## update the log file
_update_predict_log(y_pred,y_proba,query,runtime)
<|fim▁hole|> ## import some data to play with
iris = datasets.load_iris()
X = iris.data[:,:2]
y = iris.target
## train the model
MODEL_VERSION = 1.0
saved_model = "example-predict-{}.joblib".format(re.sub("\.","_",str(MODEL_VERSION)))
model = train_model(X,y,saved_model)
## example predict
query = np.array([[6.1,2.8]])
for query in [np.array([[6.1,2.8]]), np.array([[7.7,2.5]]), np.array([[5.8,3.8]])]:
y_pred = predict(query)
print("predicted: {}".format(y_pred))<|fim▁end|> | return(y_pred)
if __name__ == "__main__":
|
<|file_name|>geometry.rs<|end_file_name|><|fim▁begin|>use byteorder::{ReadBytesExt, LittleEndian};
use super::{Section, Struct, Result, Error, ReadExt, Stream};
use super::{Vec3, Uv, Sphere, Rgba};
use super::{Material, MaterialList, Extension};
use std::rc::Rc;
/// Holds a list of `Geometry`s to be passed around.
#[derive(Debug)]
pub struct GeometryList(pub Vec<Rc<Geometry>>);
/// Primary container object for dynamic model data.
///
/// The data itself is stored as lists of `Triangle`s stored in a `MorphTarget`. Each such Triangle
/// object also contains a reference to a `Material` object, which defines that triangle's appearance.
///
/// During scene generation process, `Geometry` data will be used to generate `Mesh` data for
/// rendering. Most of the time the pre-calculated `Mesh`es are already pre-calculated on the
/// Clump RenderWare Stream in the form of `MeshHeader`.
///
/// A Geometry object cannot be directly liked to a Frame as there is no storage for these.
/// Instead, you should create an Atomic with a reference the Geometry, then link that Atomic to a Frame.
#[derive(Debug)]
pub struct Geometry {
/// Render as triangle strips.
pub is_tri_strip: bool,
/// Pre-light colors.
///
/// One element for each vertex.
pub colors: Option<Vec<Rgba>>,
/// Texture coordinate sets.
///
/// One element for each coordinate set (uv0, uv1, ...), and then one element for each vertex.
pub uv_sets: Vec<Vec<Uv>>,
/// List of triangles related to this geometry.
///
/// Each triangle point to a vertex index on the current morph target and the a material index.
pub faces: Vec<Triangle>,
/// Defines vertex positions and normals.
pub targets: Vec<MorphTarget>,
/// Defines triangle's appearance.
pub matlist: MaterialList,
/// List of meshes to be rendered.
///
/// Notice the cached meshes have a different indexing propery of the underlying Geometry,
/// that is the `is_tri_strip` of the `Geometry` must be ignored in favor of the one in the
/// `MeshHeader`.
pub meshlist: MeshHeader,
}
/// Meshes are a caching system designed to speed up rendering.
///
/// To make efficient use of hardware acceleration, model geometry is grouped into Meshes when the
/// Geometry object is loaded and/or unlocked.
///
/// Meshes are generated by sorting the model geometry by Material to reduce repeated uploads of
/// the same texture data and Tristripping is also performed at the this level.
#[derive(Debug)]
pub struct Mesh {
// TODO priv data?
/// Material associated with this mesh triangles.
pub material: Rc<Material>,
/// Indices of triangles making the mesh.
pub indices: Vec<u16>,
}
/// Header for all meshes that constitute a single `Geometry`.
#[derive(Debug)]
pub struct MeshHeader {
/// Render as triangle strips.
pub is_tri_strip: bool,
/// Total triangle index count in all meshes.
pub total_indices: u32,
/// List of meshes.
pub meshes: Vec<Mesh>,
}
/// Represents a triangle in a geometry.
///
/// This is specified by three indices into the geometry's vertex list together with an index in to
/// the geometry's material list.
#[derive(Debug, Copy, Clone)]
pub struct Triangle {
// `Triangle`s are necessary only to calculate `Mesh`es, though those meshes are mostly like
// already precalculated inside our clump streams (dff).
/// Y vertex index.
pub y_id: u16,
/// X vertex index.
pub x_id: u16,
/// Index into material list
pub mat_id: u16,
/// Z vertex index.
pub z_id: u16,
}
/// Keyframe points for interpolation in animations. A single keyframe means a non-keyframe geometry.
#[derive(Debug)]
pub struct MorphTarget {
// Grand Theft Auto does not use keyframe animations, and as such there's always only a
// single morph target in Geometry.
/// Bounding sphere of the vertices.
pub sphere: Sphere,
pub unk1: u32, unk2: u32,
/// Keyframe / Geometry vertex positions.
pub verts: Option<Vec<Vec3>>,
/// Keyframe / Geometry normals.
pub normals: Option<Vec<Vec3>>,
}
impl Section for GeometryList {
fn section_id() -> u32 { 0x001A }
}
impl Section for Geometry {
fn section_id() -> u32 { 0x000F }
}
impl Section for MeshHeader {
fn section_id() -> u32 { 0x050E } // Bin Mesh PLG
}
impl GeometryList {
/// Gets the geometry at the specified index or `None` if out of range.
pub fn get(&self, index: usize) -> Option<Rc<Geometry>> {
self.0.get(index).map(|rcgeo| rcgeo.clone())
}
/// Reads a Geometry List off the RenderWare Stream.
pub fn read<R: ReadExt>(rws: &mut Stream<R>) -> Result<GeometryList> {
let _header = try!(Self::read_header(rws));
let numgeo = try!(Struct::read_up(rws, |rws| {
Ok(try!(rws.read_u32::<LittleEndian>()))
}));
let mut geolist = Vec::with_capacity(numgeo as usize);
for _ in (0..numgeo) {
geolist.push( Rc::new(try!(Geometry::read(rws))) );
}
Ok(GeometryList(geolist))
}
}
impl Geometry {
/// Reads a Geometry off the RenderWare Stream.
pub fn read<R: ReadExt>(rws: &mut Stream<R>) -> Result<Geometry> {
let header = try!(Self::read_header(rws));
let (flags, colors, uv_sets, faces, targets) = try!(Struct::read_up(rws, |rws| {
let flags = try!(rws.read_u16::<LittleEndian>());
let num_uv = try!(rws.read_u8());
let _natflags = try!(rws.read_u8()); // TODO what is this?
let num_tris = try!(rws.read_u32::<LittleEndian>());
let num_verts = try!(rws.read_u32::<LittleEndian>());
let num_morphs = try!(rws.read_u32::<LittleEndian>());
// On 3.4.0.3 and below there are some additional information
let _amb_difu_spec = {
if header.version <= 0x1003FFFF {
Some((
try!(rws.read_f32::<LittleEndian>()),
try!(rws.read_f32::<LittleEndian>()),
try!(rws.read_f32::<LittleEndian>()),
))
} else {
None
}
};
// This geometry has pre-light colors?
let colors = {
if (flags & 8) != 0 {
let mut v = Vec::with_capacity(num_verts as usize);
for _ in (0..num_verts) {
v.push(try!(Rgba::read(rws)));
}
Some(v)
} else {
None
}
};
// Texture coordinates sets.
let uv_sets = {
let mut sets = Vec::with_capacity(num_uv as usize);
for _ in (0..num_uv) {
let mut v = Vec::with_capacity(num_verts as usize);
for _ in (0..num_verts) {
v.push(try!(Uv::read(rws)));
}
sets.push(v)
}
sets
};
// Triangles that make up the model.
let faces = {
let mut v = Vec::with_capacity(num_tris as usize);
for _ in (0..num_tris) {
v.push(Triangle {
y_id: try!(rws.read_u16::<LittleEndian>()),
x_id: try!(rws.read_u16::<LittleEndian>()),
mat_id: try!(rws.read_u16::<LittleEndian>()),
z_id: try!(rws.read_u16::<LittleEndian>()),
});
}
v
};
// Morph targets.
let targets = {
let mut v = Vec::with_capacity(num_morphs as usize);
for _ in (0..num_morphs) {
v.push(MorphTarget {
sphere: try!(Sphere::read(rws)),
unk1: try!(rws.read_u32::<LittleEndian>()),
unk2: try!(rws.read_u32::<LittleEndian>()),
verts: {
// This geometry has positions?
if (flags & 2) != 0 {
let mut verts = Vec::with_capacity(num_verts as usize);
for _ in (0..num_verts) {
verts.push(try!(Vec3::read(rws)));
}
Some(verts)
} else {
None
}
},
normals: {
// This geometry has vertex normals?
if (flags & 16) != 0 {
let mut normz = Vec::with_capacity(num_verts as usize);
for _ in (0..num_verts) {
normz.push(try!(Vec3::read(rws)));
}
Some(normz)
} else {
None
}
},
});
}
v
};
Ok((flags, colors, uv_sets, faces, targets))
}));
let matlist = try!(MaterialList::read(rws));
let meshlist = try!(Extension::read_for(rws, |rws| MeshHeader::read(rws, &matlist)));
<|fim▁hole|> is_tri_strip: (flags & 1) != 0,
colors: colors,
uv_sets: uv_sets,
faces: faces,
targets: targets,
matlist: matlist,
meshlist: meshlist.unwrap_or_else(|| {
unimplemented!() // TODO calculate meshlist ourselves
}),
})
}
}
impl MeshHeader {
/// Reads a Bin Mesh PLG off the RenderWare Stream.
pub fn read<R: ReadExt>(rws: &mut Stream<R>, matlist: &MaterialList) -> Result<MeshHeader> {
let _header = try!(Self::read_header(rws));
let flags = try!(rws.read_u32::<LittleEndian>());
let num_mesh = try!(rws.read_u32::<LittleEndian>());
let total_idx = try!(rws.read_u32::<LittleEndian>());
Ok(MeshHeader {
is_tri_strip: (flags & 1) != 0, // TODO better analyze?
total_indices: total_idx,
meshes: try!((0..num_mesh).map(|_| Mesh::read(rws, matlist)).collect()),
})
}
}
impl Mesh {
/// Reads a single Mesh (from a Bin Mesh PLG) off the RenderWare Stream.
pub fn read<R: ReadExt>(rws: &mut Stream<R>, matlist: &MaterialList) -> Result<Mesh> {
let nidx = try!(rws.read_u32::<LittleEndian>()) as usize;
let matid = try!(rws.read_u32::<LittleEndian>()) as usize;
Ok(Mesh {
material: try!(matlist.get(matid)
.ok_or(Error::Other("Invalid 'Mesh' material id".into()))),
indices: {
let mut v = Vec::with_capacity(nidx);
for _ in (0..nidx) {
v.push(try!(rws.read_u32::<LittleEndian>().map(|x| x as u16)));
}
v
},
})
}
}<|fim▁end|> | Ok(Geometry { |
<|file_name|>DestroyEntitiesMessage.java<|end_file_name|><|fim▁begin|>package net.glowstone.net.message.play.entity;
import com.flowpowered.network.Message;
import java.util.List;
import lombok.Data;
<|fim▁hole|>public final class DestroyEntitiesMessage implements Message {
private final List<Integer> ids;
}<|fim▁end|> | @Data |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models, transaction, DatabaseError
from django.template.defaultfilters import slugify
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now as tznow
from pybb.compat import get_user_model_path, get_username_field, get_atomic_func
from pybb import defaults<|fim▁hole|>
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^annoying\.fields\.JSONField"])
add_introspection_rules([], ["^annoying\.fields\.AutoOneToOneField"])
except ImportError:
pass
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False,
help_text=_('If checked, this category will be visible only for staff'))
slug = models.SlugField(_("Slug"), max_length=100, unique=True)
class Meta(object):
ordering = ['position']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.name
def forum_count(self):
return self.forums.all().count()
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:category', kwargs={'slug': self.slug, })
return reverse('pybb:category', kwargs={'pk': self.id})
@property
def topics(self):
return Topic.objects.filter(forum__category=self).select_related()
@property
def posts(self):
return Post.objects.filter(topic__forum__category=self).select_related()
@python_2_unicode_compatible
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums', verbose_name=_('Category'))
parent = models.ForeignKey('self', related_name='child_forums', verbose_name=_('Parent forum'),
blank=True, null=True)
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
description = models.TextField(_('Description'), blank=True)
moderators = models.ManyToManyField(get_user_model_path(), blank=True, null=True, verbose_name=_('Moderators'))
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
topic_count = models.IntegerField(_('Topic count'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False)
readed_by = models.ManyToManyField(get_user_model_path(), through='ForumReadTracker', related_name='readed_forums')
headline = models.TextField(_('Headline'), blank=True, null=True)
slug = models.SlugField(verbose_name=_("Slug"), max_length=100)
class Meta(object):
ordering = ['position']
verbose_name = _('Forum')
verbose_name_plural = _('Forums')
unique_together = ('category', 'slug')
def __str__(self):
return self.name
def update_counters(self):
self.topic_count = Topic.objects.filter(forum=self).count()
if self.topic_count:
posts = Post.objects.filter(topic__forum_id=self.id)
self.post_count = posts.count()
if self.post_count:
try:
last_post = posts.order_by('-created', '-id')[0]
self.updated = last_post.updated or last_post.created
except IndexError:
pass
else:
self.post_count = 0
self.save()
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:forum', kwargs={'slug': self.slug, 'category_slug': self.category.slug})
return reverse('pybb:forum', kwargs={'pk': self.id})
@property
def posts(self):
return Post.objects.filter(topic__forum=self).select_related()
@cached_property
def last_post(self):
try:
return self.posts.order_by('-created', '-id')[0]
except IndexError:
return None
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = [self.category]
parent = self.parent
while parent is not None:
parents.insert(1, parent)
parent = parent.parent
return parents
@python_2_unicode_compatible
class Topic(models.Model):
POLL_TYPE_NONE = 0
POLL_TYPE_SINGLE = 1
POLL_TYPE_MULTIPLE = 2
POLL_TYPE_CHOICES = (
(POLL_TYPE_NONE, _('None')),
(POLL_TYPE_SINGLE, _('Single answer')),
(POLL_TYPE_MULTIPLE, _('Multiple answers')),
)
forum = models.ForeignKey(Forum, related_name='topics', verbose_name=_('Forum'))
name = models.CharField(_('Subject'), max_length=255)
created = models.DateTimeField(_('Created'), null=True)
updated = models.DateTimeField(_('Updated'), null=True)
user = models.ForeignKey(get_user_model_path(), verbose_name=_('User'))
views = models.IntegerField(_('Views count'), blank=True, default=0)
sticky = models.BooleanField(_('Sticky'), blank=True, default=False)
closed = models.BooleanField(_('Closed'), blank=True, default=False)
subscribers = models.ManyToManyField(get_user_model_path(), related_name='subscriptions',
verbose_name=_('Subscribers'), blank=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
readed_by = models.ManyToManyField(get_user_model_path(), through='TopicReadTracker', related_name='readed_topics')
on_moderation = models.BooleanField(_('On moderation'), default=False)
poll_type = models.IntegerField(_('Poll type'), choices=POLL_TYPE_CHOICES, default=POLL_TYPE_NONE)
poll_question = models.TextField(_('Poll question'), blank=True, null=True)
slug = models.SlugField(verbose_name=_("Slug"), max_length=100)
class Meta(object):
ordering = ['-created']
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
unique_together = ('forum', 'slug')
def __str__(self):
return self.name
@cached_property
def head(self):
try:
return self.posts.all().order_by('created', 'id')[0]
except IndexError:
return None
@cached_property
def last_post(self):
try:
return self.posts.order_by('-created', '-id').select_related('user')[0]
except IndexError:
return None
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:topic', kwargs={'slug': self.slug, 'forum_slug': self.forum.slug, 'category_slug': self.forum.category.slug})
return reverse('pybb:topic', kwargs={'pk': self.id})
def save(self, *args, **kwargs):
if self.id is None:
self.created = self.updated = tznow()
forum_changed = False
old_topic = None
if self.id is not None:
old_topic = Topic.objects.get(id=self.id)
if self.forum != old_topic.forum:
forum_changed = True
super(Topic, self).save(*args, **kwargs)
if forum_changed:
old_topic.forum.update_counters()
self.forum.update_counters()
def delete(self, using=None):
super(Topic, self).delete(using)
self.forum.update_counters()
def update_counters(self):
self.post_count = self.posts.count()
# force cache overwrite to get the real latest updated post
if hasattr(self, 'last_post'):
del self.last_post
if self.last_post:
self.updated = self.last_post.updated or self.last_post.created
self.save()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = self.forum.get_parents()
parents.append(self.forum)
return parents
def poll_votes(self):
if self.poll_type != self.POLL_TYPE_NONE:
return PollAnswerUser.objects.filter(poll_answer__topic=self).count()
else:
return None
class RenderableItem(models.Model):
"""
Base class for models that has markup, body, body_text and body_html fields.
"""
class Meta(object):
abstract = True
body = models.TextField(_('Message'))
body_html = models.TextField(_('HTML version'))
body_text = models.TextField(_('Text version'))
def render(self):
self.body_html = _get_markup_formatter()(self.body)
# Remove tags which was generated with the markup processor
text = strip_tags(self.body_html)
# Unescape entities which was generated with the markup processor
self.body_text = unescape(text)
@python_2_unicode_compatible
class Post(RenderableItem):
topic = models.ForeignKey(Topic, related_name='posts', verbose_name=_('Topic'))
user = models.ForeignKey(get_user_model_path(), related_name='posts', verbose_name=_('User'))
created = models.DateTimeField(_('Created'), blank=True, db_index=True)
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
user_ip = models.IPAddressField(_('User IP'), blank=True, default='0.0.0.0')
on_moderation = models.BooleanField(_('On moderation'), default=False)
class Meta(object):
ordering = ['created']
verbose_name = _('Post')
verbose_name_plural = _('Posts')
def summary(self):
limit = 50
tail = len(self.body) > limit and '...' or ''
return self.body[:limit] + tail
def __str__(self):
return self.summary()
def save(self, *args, **kwargs):
created_at = tznow()
if self.created is None:
self.created = created_at
self.render()
new = self.pk is None
topic_changed = False
old_post = None
if not new:
old_post = Post.objects.get(pk=self.pk)
if old_post.topic != self.topic:
topic_changed = True
super(Post, self).save(*args, **kwargs)
# If post is topic head and moderated, moderate topic too
if self.topic.head == self and not self.on_moderation and self.topic.on_moderation:
self.topic.on_moderation = False
self.topic.update_counters()
self.topic.forum.update_counters()
if topic_changed:
old_post.topic.update_counters()
old_post.topic.forum.update_counters()
def get_absolute_url(self):
return reverse('pybb:post', kwargs={'pk': self.id})
def delete(self, *args, **kwargs):
self_id = self.id
head_post_id = self.topic.posts.order_by('created', 'id')[0].id
if self_id == head_post_id:
self.topic.delete()
else:
super(Post, self).delete(*args, **kwargs)
self.topic.update_counters()
self.topic.forum.update_counters()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
return self.topic.forum.category, self.topic.forum, self.topic,
class Profile(PybbProfile):
"""
Profile class that can be used if you doesn't have
your site profile.
"""
user = AutoOneToOneField(get_user_model_path(), related_name='pybb_profile', verbose_name=_('User'))
class Meta(object):
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def get_absolute_url(self):
return reverse('pybb:user', kwargs={'username': getattr(self.user, get_username_field())})
def get_display_name(self):
return self.user.get_username()
class Attachment(models.Model):
class Meta(object):
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
post = models.ForeignKey(Post, verbose_name=_('Post'), related_name='attachments')
size = models.IntegerField(_('Size'))
file = models.FileField(_('File'),
upload_to=FilePathGenerator(to=defaults.PYBB_ATTACHMENT_UPLOAD_TO))
def save(self, *args, **kwargs):
self.size = self.file.size
super(Attachment, self).save(*args, **kwargs)
def size_display(self):
size = self.size
if size < 1024:
return '%db' % size
elif size < 1024 * 1024:
return '%dKb' % int(size / 1024)
else:
return '%.2fMb' % (size / float(1024 * 1024))
class TopicReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, topic):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = TopicReadTracker.objects.create(user=user, topic=topic)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
obj = TopicReadTracker.objects.get(user=user, topic=topic)
is_new = False
return obj, is_new
class TopicReadTracker(models.Model):
"""
Save per user topic read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
topic = models.ForeignKey(Topic, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = TopicReadTrackerManager()
class Meta(object):
verbose_name = _('Topic read tracker')
verbose_name_plural = _('Topic read trackers')
unique_together = ('user', 'topic')
class ForumReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, forum):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = ForumReadTracker.objects.create(user=user, forum=forum)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
is_new = False
obj = ForumReadTracker.objects.get(user=user, forum=forum)
return obj, is_new
class ForumReadTracker(models.Model):
"""
Save per user forum read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
forum = models.ForeignKey(Forum, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = ForumReadTrackerManager()
class Meta(object):
verbose_name = _('Forum read tracker')
verbose_name_plural = _('Forum read trackers')
unique_together = ('user', 'forum')
@python_2_unicode_compatible
class PollAnswer(models.Model):
topic = models.ForeignKey(Topic, related_name='poll_answers', verbose_name=_('Topic'))
text = models.CharField(max_length=255, verbose_name=_('Text'))
class Meta:
verbose_name = _('Poll answer')
verbose_name_plural = _('Polls answers')
def __str__(self):
return self.text
def votes(self):
return self.users.count()
def votes_percent(self):
topic_votes = self.topic.poll_votes()
if topic_votes > 0:
return 1.0 * self.votes() / topic_votes * 100
else:
return 0
@python_2_unicode_compatible
class PollAnswerUser(models.Model):
poll_answer = models.ForeignKey(PollAnswer, related_name='users', verbose_name=_('Poll answer'))
user = models.ForeignKey(get_user_model_path(), related_name='poll_answers', verbose_name=_('User'))
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _('Poll answer user')
verbose_name_plural = _('Polls answers users')
unique_together = (('poll_answer', 'user', ), )
def __str__(self):
return '%s - %s' % (self.poll_answer.topic, self.user)
def create_or_check_slug(instance, model, **extra_filters):
"""
returns a unique slug
:param instance : target instance
:param model: needed as instance._meta.model is available since django 1.6
:param extra_filters: filters needed for Forum and Topic for their unique_together field
"""
if not instance.slug:
instance.slug = slugify(instance.name)
slug = instance.slug
filters = {'slug__startswith': slug, }
if extra_filters:
filters.update(extra_filters)
count = 0
objs = model.objects.filter(**filters).exclude(pk=instance.pk)
slug_list = [obj.slug for obj in objs]
while slug in slug_list:
count += 1
slug = '%s-%d' % (instance.slug, count)
return slug<|fim▁end|> | from pybb.profiles import PybbProfile
from pybb.util import unescape, FilePathGenerator, _get_markup_formatter
from annoying.fields import AutoOneToOneField |
<|file_name|>packet.rs<|end_file_name|><|fim▁begin|>//! SIMD intersection result
use crate::geometry::{f32xN, m32xN, V3DxN};
use crate::intersection::Isect;
/// Intersection result
#[derive(Copy, Clone, Debug)]
pub struct IsectxN {
pub t: f32xN,
pub p: V3DxN,
pub n: V3DxN,
pub hit: m32xN,
}
<|fim▁hole|> t: f32xN::splat(1e17),
hit: m32xN::splat(false),
p: V3DxN::default(),
n: V3DxN::default(),
}
}
}
impl IsectxN {
pub fn get(&self, idx: usize) -> Isect {
Isect {
t: self.t.extract(idx),
p: self.p.get(idx),
n: self.n.get(idx),
hit: self.hit.extract(idx),
}
}
}<|fim▁end|> | impl Default for IsectxN {
#[inline]
fn default() -> Self {
Self { |
<|file_name|>VBoxGuest-win.cpp<|end_file_name|><|fim▁begin|>/* $Id: VBoxGuest-win.cpp $ */
/** @file
* VBoxGuest - Windows specifics.
*/
/*
* Copyright (C) 2010-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_SUP_DRV
#include "VBoxGuest-win.h"
#include "VBoxGuestInternal.h"
#include <iprt/asm.h>
#include <iprt/asm-amd64-x86.h>
#include <VBox/log.h>
#include <VBox/VBoxGuestLib.h>
#include <iprt/string.h>
/*
* XP DDK #defines ExFreePool to ExFreePoolWithTag. The latter does not exist
* on NT4, so... The same for ExAllocatePool.
*/
#ifdef TARGET_NT4
# undef ExAllocatePool
# undef ExFreePool
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
RT_C_DECLS_BEGIN
static NTSTATUS vbgdNtAddDevice(PDRIVER_OBJECT pDrvObj, PDEVICE_OBJECT pDevObj);
static void vbgdNtUnload(PDRIVER_OBJECT pDrvObj);
static NTSTATUS vbgdNtCreate(PDEVICE_OBJECT pDevObj, PIRP pIrp);
static NTSTATUS vbgdNtClose(PDEVICE_OBJECT pDevObj, PIRP pIrp);
static NTSTATUS vbgdNtIOCtl(PDEVICE_OBJECT pDevObj, PIRP pIrp);
static NTSTATUS vbgdNtInternalIOCtl(PDEVICE_OBJECT pDevObj, PIRP pIrp);
static NTSTATUS vbgdNtRegistryReadDWORD(ULONG ulRoot, PCWSTR pwszPath, PWSTR pwszName, PULONG puValue);
static NTSTATUS vbgdNtSystemControl(PDEVICE_OBJECT pDevObj, PIRP pIrp);
static NTSTATUS vbgdNtShutdown(PDEVICE_OBJECT pDevObj, PIRP pIrp);
static NTSTATUS vbgdNtNotSupportedStub(PDEVICE_OBJECT pDevObj, PIRP pIrp);
#ifdef DEBUG
static void vbgdNtDoTests(void);
#endif
RT_C_DECLS_END
/*******************************************************************************
* Exported Functions *
*******************************************************************************/
RT_C_DECLS_BEGIN
ULONG DriverEntry(PDRIVER_OBJECT pDrvObj, PUNICODE_STRING pRegPath);
RT_C_DECLS_END
#ifdef ALLOC_PRAGMA
# pragma alloc_text(INIT, DriverEntry)
# pragma alloc_text(PAGE, vbgdNtAddDevice)
# pragma alloc_text(PAGE, vbgdNtUnload)
# pragma alloc_text(PAGE, vbgdNtCreate)
# pragma alloc_text(PAGE, vbgdNtClose)
# pragma alloc_text(PAGE, vbgdNtShutdown)
# pragma alloc_text(PAGE, vbgdNtNotSupportedStub)
# pragma alloc_text(PAGE, vbgdNtScanPCIResourceList)
#endif
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** The detected NT (windows) version. */
VBGDNTVER g_enmVbgdNtVer = VBGDNTVER_INVALID;
/**
* Driver entry point.
*
* @returns appropriate status code.
* @param pDrvObj Pointer to driver object.
* @param pRegPath Registry base path.
*/
ULONG DriverEntry(PDRIVER_OBJECT pDrvObj, PUNICODE_STRING pRegPath)
{
NTSTATUS rc = STATUS_SUCCESS;
LogFunc(("Driver built: %s %s\n", __DATE__, __TIME__));
/*
* Check if the the NT version is supported and initializing
* g_enmVbgdNtVer in the process.
*/
ULONG ulMajorVer;
ULONG ulMinorVer;
ULONG ulBuildNo;
BOOLEAN fCheckedBuild = PsGetVersion(&ulMajorVer, &ulMinorVer, &ulBuildNo, NULL);
/* Use RTLogBackdoorPrintf to make sure that this goes to VBox.log */
RTLogBackdoorPrintf("VBoxGuest: Windows version %u.%u, build %u\n", ulMajorVer, ulMinorVer, ulBuildNo);
if (fCheckedBuild)
RTLogBackdoorPrintf("VBoxGuest: Windows checked build\n");
#ifdef DEBUG
vbgdNtDoTests();
#endif
switch (ulMajorVer)
{
case 10:
switch (ulMinorVer)
{
case 0:
/* Windows 10 Preview builds starting with 9926. */
default:
/* Also everything newer. */
g_enmVbgdNtVer = VBGDNTVER_WIN10;
break;
}
break;
case 6: /* Windows Vista or Windows 7 (based on minor ver) */
switch (ulMinorVer)
{
case 0: /* Note: Also could be Windows 2008 Server! */
g_enmVbgdNtVer = VBGDNTVER_WINVISTA;
break;
case 1: /* Note: Also could be Windows 2008 Server R2! */
g_enmVbgdNtVer = VBGDNTVER_WIN7;
break;
case 2:
g_enmVbgdNtVer = VBGDNTVER_WIN8;
break;
case 3:
g_enmVbgdNtVer = VBGDNTVER_WIN81;
break;
case 4:
/* Windows 10 Preview builds. */
default:
/* Also everything newer. */
g_enmVbgdNtVer = VBGDNTVER_WIN10;
break;
}
break;
case 5:
switch (ulMinorVer)
{
default:
case 2:
g_enmVbgdNtVer = VBGDNTVER_WIN2K3;
break;
case 1:
g_enmVbgdNtVer = VBGDNTVER_WINXP;
break;
case 0:
g_enmVbgdNtVer = VBGDNTVER_WIN2K;
break;
}
break;
case 4:
g_enmVbgdNtVer = VBGDNTVER_WINNT4;
break;
default:
if (ulMajorVer > 6)
{
/* "Windows 10 mode" for Windows 8.1+. */
g_enmVbgdNtVer = VBGDNTVER_WIN10;
}
else
{
if (ulMajorVer < 4)
LogRelFunc(("At least Windows NT4 required! (%u.%u)\n", ulMajorVer, ulMinorVer));
else
LogRelFunc(("Unknown version %u.%u!\n", ulMajorVer, ulMinorVer));
rc = STATUS_DRIVER_UNABLE_TO_LOAD;
}
break;
}
if (NT_SUCCESS(rc))
{
/*
* Setup the driver entry points in pDrvObj.
*/
pDrvObj->DriverUnload = vbgdNtUnload;
pDrvObj->MajorFunction[IRP_MJ_CREATE] = vbgdNtCreate;
pDrvObj->MajorFunction[IRP_MJ_CLOSE] = vbgdNtClose;
pDrvObj->MajorFunction[IRP_MJ_DEVICE_CONTROL] = vbgdNtIOCtl;
pDrvObj->MajorFunction[IRP_MJ_INTERNAL_DEVICE_CONTROL] = vbgdNtInternalIOCtl;
pDrvObj->MajorFunction[IRP_MJ_SHUTDOWN] = vbgdNtShutdown;
pDrvObj->MajorFunction[IRP_MJ_READ] = vbgdNtNotSupportedStub;
pDrvObj->MajorFunction[IRP_MJ_WRITE] = vbgdNtNotSupportedStub;
#ifdef TARGET_NT4
rc = vbgdNt4CreateDevice(pDrvObj, NULL /* pDevObj */, pRegPath);
#else
pDrvObj->MajorFunction[IRP_MJ_PNP] = vbgdNtPnP;
pDrvObj->MajorFunction[IRP_MJ_POWER] = vbgdNtPower;
pDrvObj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = vbgdNtSystemControl;
pDrvObj->DriverExtension->AddDevice = (PDRIVER_ADD_DEVICE)vbgdNtAddDevice;
#endif
}
LogFlowFunc(("Returning %#x\n", rc));
return rc;
}
#ifndef TARGET_NT4
/**
* Handle request from the Plug & Play subsystem.
*
* @returns NT status code
* @param pDrvObj Driver object
* @param pDevObj Device object
*/
static NTSTATUS vbgdNtAddDevice(PDRIVER_OBJECT pDrvObj, PDEVICE_OBJECT pDevObj)
{
NTSTATUS rc;
LogFlowFuncEnter();
/*
* Create device.
*/
UNICODE_STRING DevName;
RtlInitUnicodeString(&DevName, VBOXGUEST_DEVICE_NAME_NT);
PDEVICE_OBJECT pDeviceObject = NULL;
rc = IoCreateDevice(pDrvObj, sizeof(VBOXGUESTDEVEXTWIN), &DevName, FILE_DEVICE_UNKNOWN, 0, FALSE, &pDeviceObject);
if (NT_SUCCESS(rc))
{
/*
* Create symbolic link (DOS devices).
*/
UNICODE_STRING DosName;
RtlInitUnicodeString(&DosName, VBOXGUEST_DEVICE_NAME_DOS);
rc = IoCreateSymbolicLink(&DosName, &DevName);
if (NT_SUCCESS(rc))
{
/*
* Setup the device extension.
*/
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDeviceObject->DeviceExtension;
RT_ZERO(*pDevExt);
KeInitializeSpinLock(&pDevExt->MouseEventAccessLock);
pDevExt->pDeviceObject = pDeviceObject;
pDevExt->prevDevState = STOPPED;
pDevExt->devState = STOPPED;
pDevExt->pNextLowerDriver = IoAttachDeviceToDeviceStack(pDeviceObject, pDevObj);
if (pDevExt->pNextLowerDriver != NULL)
{
/*
* If we reached this point we're fine with the basic driver setup,
* so continue to init our own things.
*/
#ifdef VBOX_WITH_GUEST_BUGCHECK_DETECTION
vbgdNtBugCheckCallback(pDevExt); /* Ignore failure! */
#endif
if (NT_SUCCESS(rc))
{
/* VBoxGuestPower is pageable; ensure we are not called at elevated IRQL */
pDeviceObject->Flags |= DO_POWER_PAGABLE;
/* Driver is ready now. */
pDeviceObject->Flags &= ~DO_DEVICE_INITIALIZING;
LogFlowFunc(("Returning with rc=0x%x (success)\n", rc));
return rc;
}
IoDetachDevice(pDevExt->pNextLowerDriver);
}
else
{
LogFunc(("IoAttachDeviceToDeviceStack did not give a nextLowerDriver!\n"));
rc = STATUS_DEVICE_NOT_CONNECTED;
}
/* bail out */
IoDeleteSymbolicLink(&DosName);
}
else
LogFunc(("IoCreateSymbolicLink failed with rc=%#x!\n", rc));
IoDeleteDevice(pDeviceObject);
}
else
LogFunc(("IoCreateDevice failed with rc=%#x!\n", rc));
LogFunc(("Returning with rc=0x%x\n", rc));
return rc;
}
#endif
/**
* Debug helper to dump a device resource list.
*
* @param pResourceList list of device resources.
*/
static void vbgdNtShowDeviceResources(PCM_PARTIAL_RESOURCE_LIST pResourceList)
{
#ifdef LOG_ENABLED
PCM_PARTIAL_RESOURCE_DESCRIPTOR pResource = pResourceList->PartialDescriptors;
ULONG cResources = pResourceList->Count;
for (ULONG i = 0; i < cResources; ++i, ++pResource)
{
ULONG uType = pResource->Type;
static char const * const s_apszName[] =
{
"CmResourceTypeNull",
"CmResourceTypePort",
"CmResourceTypeInterrupt",
"CmResourceTypeMemory",
"CmResourceTypeDma",
"CmResourceTypeDeviceSpecific",
"CmResourceTypeBusNumber",
"CmResourceTypeDevicePrivate",
"CmResourceTypeAssignedResource",
"CmResourceTypeSubAllocateFrom",
};
LogFunc(("Type=%s", uType < RT_ELEMENTS(s_apszName) ? s_apszName[uType] : "Unknown"));
switch (uType)
{
case CmResourceTypePort:
case CmResourceTypeMemory:
LogFunc(("Start %8X%8.8lX, length=%X\n",
pResource->u.Port.Start.HighPart, pResource->u.Port.Start.LowPart,
pResource->u.Port.Length));
break;
case CmResourceTypeInterrupt:
LogFunc(("Level=%X, vector=%X, affinity=%X\n",
pResource->u.Interrupt.Level, pResource->u.Interrupt.Vector,
pResource->u.Interrupt.Affinity));
break;
case CmResourceTypeDma:
LogFunc(("Channel %d, Port %X\n",
pResource->u.Dma.Channel, pResource->u.Dma.Port));
break;
default:
LogFunc(("\n"));
break;
}
}
#endif
}
/**
* Global initialisation stuff (PnP + NT4 legacy).
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
#ifndef TARGET_NT4
NTSTATUS vbgdNtInit(PDEVICE_OBJECT pDevObj, PIRP pIrp)
#else
NTSTATUS vbgdNtInit(PDRIVER_OBJECT pDrvObj, PDEVICE_OBJECT pDevObj, PUNICODE_STRING pRegPath)
#endif
{
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
#ifndef TARGET_NT4
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
#endif
LogFlowFuncEnter();
int rc = STATUS_SUCCESS; /** @todo r=bird: s/rc/rcNt/ and s/int/NTSTATUS/. gee. */
#ifdef TARGET_NT4
/*
* Let's have a look at what our PCI adapter offers.
*/
LogFlowFunc(("Starting to scan PCI resources of VBoxGuest ...\n"));
/* Assign the PCI resources. */
PCM_RESOURCE_LIST pResourceList = NULL;
UNICODE_STRING classNameString;
RtlInitUnicodeString(&classNameString, L"VBoxGuestAdapter");
rc = HalAssignSlotResources(pRegPath, &classNameString,
pDrvObj, pDevObj,
PCIBus, pDevExt->busNumber, pDevExt->slotNumber,
&pResourceList);
if (pResourceList && pResourceList->Count > 0)
vbgdNtShowDeviceResources(&pResourceList->List[0].PartialResourceList);
if (NT_SUCCESS(rc))
rc = vbgdNtScanPCIResourceList(pResourceList, pDevExt);
#else
if (pStack->Parameters.StartDevice.AllocatedResources->Count > 0)
vbgdNtShowDeviceResources(&pStack->Parameters.StartDevice.AllocatedResources->List[0].PartialResourceList);
if (NT_SUCCESS(rc))
rc = vbgdNtScanPCIResourceList(pStack->Parameters.StartDevice.AllocatedResourcesTranslated, pDevExt);
#endif
if (NT_SUCCESS(rc))
{
/*
* Map physical address of VMMDev memory into MMIO region
* and init the common device extension bits.
*/
void *pvMMIOBase = NULL;
uint32_t cbMMIO = 0;
rc = vbgdNtMapVMMDevMemory(pDevExt,
pDevExt->vmmDevPhysMemoryAddress,
pDevExt->vmmDevPhysMemoryLength,
&pvMMIOBase,
&cbMMIO);
if (NT_SUCCESS(rc))
{
pDevExt->Core.pVMMDevMemory = (VMMDevMemory *)pvMMIOBase;
LogFunc(("pvMMIOBase=0x%p, pDevExt=0x%p, pDevExt->Core.pVMMDevMemory=0x%p\n",
pvMMIOBase, pDevExt, pDevExt ? pDevExt->Core.pVMMDevMemory : NULL));
int vrc = VbgdCommonInitDevExt(&pDevExt->Core,
pDevExt->Core.IOPortBase,
pvMMIOBase, cbMMIO,
vbgdNtVersionToOSType(g_enmVbgdNtVer),
VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
if (RT_FAILURE(vrc))
{
LogFunc(("Could not init device extension, rc=%Rrc\n", vrc));
rc = STATUS_DEVICE_CONFIGURATION_ERROR;
}
}
else
LogFunc(("Could not map physical address of VMMDev, rc=0x%x\n", rc));
}
if (NT_SUCCESS(rc))
{
int vrc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pPowerStateRequest,
sizeof(VMMDevPowerStateRequest), VMMDevReq_SetPowerStatus);
if (RT_FAILURE(vrc))
{
LogFunc(("Alloc for pPowerStateRequest failed, rc=%Rrc\n", vrc));
rc = STATUS_UNSUCCESSFUL;
}
}
if (NT_SUCCESS(rc))
{
/*
* Register DPC and ISR.
*/
LogFlowFunc(("Initializing DPC/ISR ...\n"));
IoInitializeDpcRequest(pDevExt->pDeviceObject, vbgdNtDpcHandler);
#ifdef TARGET_NT4
ULONG uInterruptVector;
KIRQL irqLevel;
/* Get an interrupt vector. */
/* Only proceed if the device provides an interrupt. */
if ( pDevExt->interruptLevel
|| pDevExt->interruptVector)
{
LogFlowFunc(("Getting interrupt vector (HAL): Bus=%u, IRQL=%u, Vector=%u\n",
pDevExt->busNumber, pDevExt->interruptLevel, pDevExt->interruptVector));
uInterruptVector = HalGetInterruptVector(PCIBus,
pDevExt->busNumber,
pDevExt->interruptLevel,
pDevExt->interruptVector,
&irqLevel,
&pDevExt->interruptAffinity);
LogFlowFunc(("HalGetInterruptVector returns vector=%u\n", uInterruptVector));
if (uInterruptVector == 0)
LogFunc(("No interrupt vector found!\n"));
}
else
LogFunc(("Device does not provide an interrupt!\n"));
#endif
if (pDevExt->interruptVector)
{
LogFlowFunc(("Connecting interrupt ...\n"));
rc = IoConnectInterrupt(&pDevExt->pInterruptObject, /* Out: interrupt object. */
(PKSERVICE_ROUTINE)vbgdNtIsrHandler, /* Our ISR handler. */
pDevExt, /* Device context. */
NULL, /* Optional spinlock. */
#ifdef TARGET_NT4
uInterruptVector, /* Interrupt vector. */
irqLevel, /* Interrupt level. */
irqLevel, /* Interrupt level. */
#else
pDevExt->interruptVector, /* Interrupt vector. */
(KIRQL)pDevExt->interruptLevel, /* Interrupt level. */
(KIRQL)pDevExt->interruptLevel, /* Interrupt level. */
#endif
pDevExt->interruptMode, /* LevelSensitive or Latched. */
TRUE, /* Shareable interrupt. */
pDevExt->interruptAffinity, /* CPU affinity. */
FALSE); /* Don't save FPU stack. */
if (NT_ERROR(rc))
LogFunc(("Could not connect interrupt, rc=0x%x\n", rc));
}
else
LogFunc(("No interrupt vector found!\n"));
}
#ifdef VBOX_WITH_HGCM
LogFunc(("Allocating kernel session data ...\n"));
int vrc = VbgdCommonCreateKernelSession(&pDevExt->Core, &pDevExt->pKernelSession);
if (RT_FAILURE(vrc))
{
LogFunc(("Failed to allocated kernel session data, rc=%Rrc\n", rc));
rc = STATUS_UNSUCCESSFUL;
}
#endif
if (RT_SUCCESS(rc))
{
ULONG ulValue = 0;
NTSTATUS rcNt = vbgdNtRegistryReadDWORD(RTL_REGISTRY_SERVICES,
L"VBoxGuest", L"LoggingEnabled", &ulValue);
if (NT_SUCCESS(rcNt))
{
pDevExt->Core.fLoggingEnabled = ulValue >= 0xFF;
if (pDevExt->Core.fLoggingEnabled)
LogRelFunc(("Logging to host log enabled (0x%x)", ulValue));
}
/* Ready to rumble! */
LogRelFunc(("Device is ready!\n"));
VBOXGUEST_UPDATE_DEVSTATE(pDevExt, WORKING);
}
else
pDevExt->pInterruptObject = NULL;
/** @todo r=bird: The error cleanup here is completely missing. We'll leak a
* whole bunch of things... */
LogFunc(("Returned with rc=0x%x\n", rc));
return rc;
}
/**
* Cleans up hardware resources.
* Do not delete DevExt here.
*
* @param pDrvObj Driver object.
*/
NTSTATUS vbgdNtCleanup(PDEVICE_OBJECT pDevObj)
{
LogFlowFuncEnter();
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
if (pDevExt)
{
#if 0 /* @todo: test & enable cleaning global session data */
#ifdef VBOX_WITH_HGCM
if (pDevExt->pKernelSession)
{
VbgdCommonCloseSession(pDevExt, pDevExt->pKernelSession);
pDevExt->pKernelSession = NULL;
}
#endif
#endif
if (pDevExt->pInterruptObject)
{
IoDisconnectInterrupt(pDevExt->pInterruptObject);
pDevExt->pInterruptObject = NULL;
}
/** @todo: cleanup the rest stuff */
#ifdef VBOX_WITH_GUEST_BUGCHECK_DETECTION
hlpDeregisterBugCheckCallback(pDevExt); /* ignore failure! */
#endif
/* According to MSDN we have to unmap previously mapped memory. */
vbgdNtUnmapVMMDevMemory(pDevExt);
}
return STATUS_SUCCESS;
}
/**
* Unload the driver.
*
* @param pDrvObj Driver object.
*/
static void vbgdNtUnload(PDRIVER_OBJECT pDrvObj)
{
LogFlowFuncEnter();
#ifdef TARGET_NT4
vbgdNtCleanup(pDrvObj->DeviceObject);
/* Destroy device extension and clean up everything else. */
if (pDrvObj->DeviceObject && pDrvObj->DeviceObject->DeviceExtension)
VbgdCommonDeleteDevExt((PVBOXGUESTDEVEXT)pDrvObj->DeviceObject->DeviceExtension);
/*
* I don't think it's possible to unload a driver which processes have
* opened, at least we'll blindly assume that here.
*/
UNICODE_STRING DosName;
RtlInitUnicodeString(&DosName, VBOXGUEST_DEVICE_NAME_DOS);
NTSTATUS rc = IoDeleteSymbolicLink(&DosName);
IoDeleteDevice(pDrvObj->DeviceObject);
#else /* !TARGET_NT4 */
/* On a PnP driver this routine will be called after
* IRP_MN_REMOVE_DEVICE (where we already did the cleanup),
* so don't do anything here (yet). */
#endif /* !TARGET_NT4 */
LogFlowFunc(("Returning\n"));
}
/**
* Create (i.e. Open) file entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
static NTSTATUS vbgdNtCreate(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
/** @todo AssertPtrReturn(pIrp); */
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
/** @todo AssertPtrReturn(pStack); */
PFILE_OBJECT pFileObj = pStack->FileObject;
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
NTSTATUS rc = STATUS_SUCCESS;
if (pDevExt->devState != WORKING)
{
LogFunc(("Device is not working currently, state=%d\n", pDevExt->devState));
rc = STATUS_UNSUCCESSFUL;
}
else if (pStack->Parameters.Create.Options & FILE_DIRECTORY_FILE)
{
/*
* We are not remotely similar to a directory...
* (But this is possible.)
*/
LogFlowFunc(("Uhm, we're not a directory!\n"));
rc = STATUS_NOT_A_DIRECTORY;
}
else
{
#ifdef VBOX_WITH_HGCM
if (pFileObj)
{
LogFlowFunc(("File object type=%d\n", pFileObj->Type));
int vrc;
PVBOXGUESTSESSION pSession;
if (pFileObj->Type == 5 /* File Object */)
{
/*
* Create a session object if we have a valid file object. This session object
* exists for every R3 process.
*/
vrc = VbgdCommonCreateUserSession(&pDevExt->Core, &pSession);
}
else
{
/* ... otherwise we've been called from R0! */
vrc = VbgdCommonCreateKernelSession(&pDevExt->Core, &pSession);
}
if (RT_SUCCESS(vrc))
pFileObj->FsContext = pSession;
}
#endif
}
/* Complete the request! */
pIrp->IoStatus.Information = 0;
pIrp->IoStatus.Status = rc;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
LogFlowFunc(("Returning rc=0x%x\n", rc));
return rc;
}
/**
* Close file entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
static NTSTATUS vbgdNtClose(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
PFILE_OBJECT pFileObj = pStack->FileObject;
LogFlowFunc(("pDevExt=0x%p, pFileObj=0x%p, FsContext=0x%p\n",
pDevExt, pFileObj, pFileObj->FsContext));
#ifdef VBOX_WITH_HGCM
/* Close both, R0 and R3 sessions. */
PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pFileObj->FsContext;
if (pSession)
VbgdCommonCloseSession(&pDevExt->Core, pSession);
#endif
pFileObj->FsContext = NULL;
pIrp->IoStatus.Information = 0;
pIrp->IoStatus.Status = STATUS_SUCCESS;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return STATUS_SUCCESS;
}
/**
* Device I/O Control entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
static NTSTATUS vbgdNtIOCtl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
NTSTATUS Status = STATUS_SUCCESS;
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
unsigned int uCmd = (unsigned int)pStack->Parameters.DeviceIoControl.IoControlCode;
char *pBuf = (char *)pIrp->AssociatedIrp.SystemBuffer; /* All requests are buffered. */
size_t cbData = pStack->Parameters.DeviceIoControl.InputBufferLength;
size_t cbOut = 0;
/* Do we have a file object associated?*/
PFILE_OBJECT pFileObj = pStack->FileObject;
PVBOXGUESTSESSION pSession = NULL;
if (pFileObj) /* ... then we might have a session object as well! */
pSession = (PVBOXGUESTSESSION)pFileObj->FsContext;
LogFlowFunc(("uCmd=%u, pDevExt=0x%p, pSession=0x%p\n",
uCmd, pDevExt, pSession));
/* We don't have a session associated with the file object? So this seems
* to be a kernel call then. */
/** @todo r=bird: What on earth is this supposed to be? Each kernel session
* shall have its own context of course, no hacks, pleeease. */
if (pSession == NULL)
{
LogFunc(("XXX: BUGBUG: FIXME: Using ugly kernel session data hack ...\n"));
#ifdef DEBUG_andy
RTLogBackdoorPrintf("XXX: BUGBUG: FIXME: Using ugly kernel session data hack ... Please don't forget to fix this one, Andy!\n");
#endif
pSession = pDevExt->pKernelSession;
}
/* Verify that it's a buffered CTL. */
if ((pStack->Parameters.DeviceIoControl.IoControlCode & 0x3) == METHOD_BUFFERED)
{
/*
* Process the common IOCtls.
*/
size_t cbDataReturned;
int vrc = VbgdCommonIoCtl(uCmd, &pDevExt->Core, pSession, pBuf, cbData, &cbDataReturned);
LogFlowFunc(("rc=%Rrc, pBuf=0x%p, cbData=%u, cbDataReturned=%u\n",
vrc, pBuf, cbData, cbDataReturned));
if (RT_SUCCESS(vrc))
{
if (RT_UNLIKELY( cbDataReturned > cbData
|| cbDataReturned > pStack->Parameters.DeviceIoControl.OutputBufferLength))
{
LogFunc(("Too much output data %u - expected %u!\n", cbDataReturned, cbData));
cbDataReturned = cbData;
Status = STATUS_BUFFER_TOO_SMALL;
}
if (cbDataReturned > 0)
cbOut = cbDataReturned;
}
else
{
if ( vrc == VERR_NOT_SUPPORTED
|| vrc == VERR_INVALID_PARAMETER)
Status = STATUS_INVALID_PARAMETER;
else if (vrc == VERR_OUT_OF_RANGE)
Status = STATUS_INVALID_BUFFER_SIZE;
else
Status = STATUS_UNSUCCESSFUL;
}
}
else
{
LogFunc(("Not buffered request (%#x) - not supported\n", pStack->Parameters.DeviceIoControl.IoControlCode));
Status = STATUS_NOT_SUPPORTED;
}
pIrp->IoStatus.Status = Status;
pIrp->IoStatus.Information = cbOut;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
//LogFlowFunc(("Returned cbOut=%d rc=%#x\n", cbOut, Status));
return Status;
}
/**
* Internal Device I/O Control entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
static NTSTATUS vbgdNtInternalIOCtl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
NTSTATUS Status = STATUS_SUCCESS;
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
unsigned int uCmd = (unsigned int)pStack->Parameters.DeviceIoControl.IoControlCode;
bool fProcessed = false;
unsigned Info = 0;
/*
* Override common behavior of some operations.
*/
/** @todo r=bird: Better to add dedicated worker functions for this! */
switch (uCmd)
{
case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
{
PVOID pvBuf = pStack->Parameters.Others.Argument1;
size_t cbData = (size_t)pStack->Parameters.Others.Argument2;
fProcessed = true;
if (cbData != sizeof(VBoxGuestMouseSetNotifyCallback))
{
AssertFailed();
Status = STATUS_INVALID_PARAMETER;
break;
}
VBoxGuestMouseSetNotifyCallback *pInfo = (VBoxGuestMouseSetNotifyCallback*)pvBuf;
/* we need a lock here to avoid concurrency with the set event functionality */
KIRQL OldIrql;
KeAcquireSpinLock(&pDevExt->MouseEventAccessLock, &OldIrql);
pDevExt->Core.MouseNotifyCallback = *pInfo;
KeReleaseSpinLock(&pDevExt->MouseEventAccessLock, OldIrql);
Status = STATUS_SUCCESS;
break;
}
default:
break;
}
if (fProcessed)
{
pIrp->IoStatus.Status = Status;
pIrp->IoStatus.Information = Info;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return Status;
}
/*
* No override, go to common code.
*/
return vbgdNtIOCtl(pDevObj, pIrp);
}
/**
* IRP_MJ_SYSTEM_CONTROL handler.
*
* @returns NT status code
* @param pDevObj Device object.
* @param pIrp IRP.
*/
NTSTATUS vbgdNtSystemControl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
LogFlowFuncEnter();
/* Always pass it on to the next driver. */
IoSkipCurrentIrpStackLocation(pIrp);
return IoCallDriver(pDevExt->pNextLowerDriver, pIrp);
}
/**
* IRP_MJ_SHUTDOWN handler.
*
* @returns NT status code
* @param pDevObj Device object.
* @param pIrp IRP.
*/
NTSTATUS vbgdNtShutdown(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
LogFlowFuncEnter();
VMMDevPowerStateRequest *pReq = pDevExt->pPowerStateRequest;
if (pReq)
{
pReq->header.requestType = VMMDevReq_SetPowerStatus;
pReq->powerState = VMMDevPowerState_PowerOff;
int rc = VbglGRPerform(&pReq->header);
if (RT_FAILURE(rc))
LogFunc(("Error performing request to VMMDev, rc=%Rrc\n", rc));
}
return STATUS_SUCCESS;
}
/**
* Stub function for functions we don't implemented.
*
* @returns STATUS_NOT_SUPPORTED
* @param pDevObj Device object.
* @param pIrp IRP.
*/
NTSTATUS vbgdNtNotSupportedStub(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
LogFlowFuncEnter();
pIrp->IoStatus.Information = 0;
pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return STATUS_NOT_SUPPORTED;
}
/**
* DPC handler.
*
* @param pDPC DPC descriptor.
* @param pDevObj Device object.
* @param pIrp Interrupt request packet.
* @param pContext Context specific pointer.
*/
void vbgdNtDpcHandler(PKDPC pDPC, PDEVICE_OBJECT pDevObj, PIRP pIrp, PVOID pContext)
{
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pDevObj->DeviceExtension;
Log3Func(("pDevExt=0x%p\n", pDevExt));
/* Test & reset the counter. */
if (ASMAtomicXchgU32(&pDevExt->Core.u32MousePosChangedSeq, 0))
{
/* we need a lock here to avoid concurrency with the set event ioctl handler thread,
* i.e. to prevent the event from destroyed while we're using it */
Assert(KeGetCurrentIrql() == DISPATCH_LEVEL);
KeAcquireSpinLockAtDpcLevel(&pDevExt->MouseEventAccessLock);
if (pDevExt->Core.MouseNotifyCallback.pfnNotify)
pDevExt->Core.MouseNotifyCallback.pfnNotify(pDevExt->Core.MouseNotifyCallback.pvUser);
KeReleaseSpinLockFromDpcLevel(&pDevExt->MouseEventAccessLock);
}
/* Process the wake-up list we were asked by the scheduling a DPC
* in vbgdNtIsrHandler(). */
VbgdCommonWaitDoWakeUps(&pDevExt->Core);
}
/**
* ISR handler.
*
* @return BOOLEAN Indicates whether the IRQ came from us (TRUE) or not (FALSE).
* @param pInterrupt Interrupt that was triggered.
* @param pServiceContext Context specific pointer.
*/
BOOLEAN vbgdNtIsrHandler(PKINTERRUPT pInterrupt, PVOID pServiceContext)
{
PVBOXGUESTDEVEXTWIN pDevExt = (PVBOXGUESTDEVEXTWIN)pServiceContext;
if (pDevExt == NULL)
return FALSE;
/*Log3Func(("pDevExt=0x%p, pVMMDevMemory=0x%p\n", pDevExt, pDevExt ? pDevExt->pVMMDevMemory : NULL));*/
/* Enter the common ISR routine and do the actual work. */
BOOLEAN fIRQTaken = VbgdCommonISR(&pDevExt->Core);
/* If we need to wake up some events we do that in a DPC to make
* sure we're called at the right IRQL. */
if (fIRQTaken)
{
Log3Func(("IRQ was taken! pInterrupt=0x%p, pDevExt=0x%p\n", pInterrupt, pDevExt));
if (ASMAtomicUoReadU32( &pDevExt->Core.u32MousePosChangedSeq)
|| !RTListIsEmpty(&pDevExt->Core.WakeUpList))
{
Log3Func(("Requesting DPC ...\n"));
IoRequestDpc(pDevExt->pDeviceObject, pDevExt->pCurrentIrp, NULL);
}
}
return fIRQTaken;
}
/**
* Overridden routine for mouse polling events.
*
* @param pDevExt Device extension structure.
*/
void VbgdNativeISRMousePollEvent(PVBOXGUESTDEVEXT pDevExt)
{
NOREF(pDevExt);
/* nothing to do here - i.e. since we can not KeSetEvent from ISR level,
* we rely on the pDevExt->u32MousePosChangedSeq to be set to a non-zero value on a mouse event
* and queue the DPC in our ISR routine in that case doing KeSetEvent from the DPC routine */
}
/**
* Queries (gets) a DWORD value from the registry.
*
* @return NTSTATUS
* @param ulRoot Relative path root. See RTL_REGISTRY_SERVICES or RTL_REGISTRY_ABSOLUTE.
* @param pwszPath Path inside path root.
* @param pwszName Actual value name to look up.
* @param puValue On input this can specify the default value (if RTL_REGISTRY_OPTIONAL is
* not specified in ulRoot), on output this will retrieve the looked up
* registry value if found.
*/
NTSTATUS vbgdNtRegistryReadDWORD(ULONG ulRoot, PCWSTR pwszPath, PWSTR pwszName, PULONG puValue)
{
if (!pwszPath || !pwszName || !puValue)
return STATUS_INVALID_PARAMETER;
ULONG ulDefault = *puValue;
RTL_QUERY_REGISTRY_TABLE tblQuery[2];
RtlZeroMemory(tblQuery, sizeof(tblQuery));
/** @todo Add RTL_QUERY_REGISTRY_TYPECHECK! */
tblQuery[0].Flags = RTL_QUERY_REGISTRY_DIRECT;
tblQuery[0].Name = pwszName;
tblQuery[0].EntryContext = puValue;
tblQuery[0].DefaultType = REG_DWORD;
tblQuery[0].DefaultData = &ulDefault;
tblQuery[0].DefaultLength = sizeof(ULONG);
return RtlQueryRegistryValues(ulRoot,
pwszPath,
&tblQuery[0],
NULL /* Context */,
NULL /* Environment */);
}
/**
* Helper to scan the PCI resource list and remember stuff.
*
* @param pResList Resource list
* @param pDevExt Device extension
*/
NTSTATUS vbgdNtScanPCIResourceList(PCM_RESOURCE_LIST pResList, PVBOXGUESTDEVEXTWIN pDevExt)
{
/* Enumerate the resource list. */
LogFlowFunc(("Found %d resources\n",
pResList->List->PartialResourceList.Count));
NTSTATUS rc = STATUS_SUCCESS;
PCM_PARTIAL_RESOURCE_DESCRIPTOR pPartialData = NULL;
ULONG rangeCount = 0;
ULONG cMMIORange = 0;
PVBOXGUESTWINBASEADDRESS pBaseAddress = pDevExt->pciBaseAddress;
for (ULONG i = 0; i < pResList->List->PartialResourceList.Count; i++)
{
pPartialData = &pResList->List->PartialResourceList.PartialDescriptors[i];
switch (pPartialData->Type)
{
case CmResourceTypePort:
{
/* Overflow protection. */
if (rangeCount < PCI_TYPE0_ADDRESSES)
{
LogFlowFunc(("I/O range: Base=%08x:%08x, length=%08x\n",
pPartialData->u.Port.Start.HighPart,
pPartialData->u.Port.Start.LowPart,
pPartialData->u.Port.Length));
/* Save the IO port base. */
/** @todo Not so good.
* Update/bird: What is not so good? That we just consider the last range? */
pDevExt->Core.IOPortBase = (RTIOPORT)pPartialData->u.Port.Start.LowPart;
/* Save resource information. */
pBaseAddress->RangeStart = pPartialData->u.Port.Start;
pBaseAddress->RangeLength = pPartialData->u.Port.Length;
pBaseAddress->RangeInMemory = FALSE;
pBaseAddress->ResourceMapped = FALSE;
LogFunc(("I/O range for VMMDev found! Base=%08x:%08x, length=%08x\n",
pPartialData->u.Port.Start.HighPart,
pPartialData->u.Port.Start.LowPart,
pPartialData->u.Port.Length));
/* Next item ... */
rangeCount++; pBaseAddress++;
}
break;
}
case CmResourceTypeInterrupt:
{
LogFunc(("Interrupt: Level=%x, vector=%x, mode=%x\n",
pPartialData->u.Interrupt.Level,
pPartialData->u.Interrupt.Vector,
pPartialData->Flags));
/* Save information. */
pDevExt->interruptLevel = pPartialData->u.Interrupt.Level;
pDevExt->interruptVector = pPartialData->u.Interrupt.Vector;
pDevExt->interruptAffinity = pPartialData->u.Interrupt.Affinity;
/* Check interrupt mode. */
if (pPartialData->Flags & CM_RESOURCE_INTERRUPT_LATCHED)
pDevExt->interruptMode = Latched;
else
pDevExt->interruptMode = LevelSensitive;
break;
}
case CmResourceTypeMemory:
{
/* Overflow protection. */
if (rangeCount < PCI_TYPE0_ADDRESSES)
{
LogFlowFunc(("Memory range: Base=%08x:%08x, length=%08x\n",
pPartialData->u.Memory.Start.HighPart,
pPartialData->u.Memory.Start.LowPart,
pPartialData->u.Memory.Length));
/* We only care about read/write memory. */
/** @todo Reconsider memory type. */
if ( cMMIORange == 0 /* Only care about the first MMIO range (!!!). */
&& (pPartialData->Flags & VBOX_CM_PRE_VISTA_MASK) == CM_RESOURCE_MEMORY_READ_WRITE)
{
/* Save physical MMIO base + length for VMMDev. */
pDevExt->vmmDevPhysMemoryAddress = pPartialData->u.Memory.Start;
pDevExt->vmmDevPhysMemoryLength = (ULONG)pPartialData->u.Memory.Length;
/* Save resource information. */
pBaseAddress->RangeStart = pPartialData->u.Memory.Start;
pBaseAddress->RangeLength = pPartialData->u.Memory.Length;
pBaseAddress->RangeInMemory = TRUE;
pBaseAddress->ResourceMapped = FALSE;
LogFunc(("Memory range for VMMDev found! Base = %08x:%08x, Length = %08x\n",
pPartialData->u.Memory.Start.HighPart,
pPartialData->u.Memory.Start.LowPart,
pPartialData->u.Memory.Length));
/* Next item ... */
rangeCount++; pBaseAddress++; cMMIORange++;
}
else
LogFunc(("Ignoring memory: Flags=%08x\n", pPartialData->Flags));
}
break;
}
default:
{
LogFunc(("Unhandled resource found, type=%d\n", pPartialData->Type));
break;
}
}
}
/* Memorize the number of resources found. */
pDevExt->pciAddressCount = rangeCount;
return rc;
}
/**
* Maps the I/O space from VMMDev to virtual kernel address space.
*
* @return NTSTATUS
*
* @param pDevExt The device extension.
* @param PhysAddr Physical address to map.
* @param cbToMap Number of bytes to map.
* @param ppvMMIOBase Pointer of mapped I/O base.
* @param pcbMMIO Length of mapped I/O base.
*/
NTSTATUS vbgdNtMapVMMDevMemory(PVBOXGUESTDEVEXTWIN pDevExt, PHYSICAL_ADDRESS PhysAddr, ULONG cbToMap,
void **ppvMMIOBase, uint32_t *pcbMMIO)
{
AssertPtrReturn(pDevExt, VERR_INVALID_POINTER);
AssertPtrReturn(ppvMMIOBase, VERR_INVALID_POINTER);
/* pcbMMIO is optional. */
NTSTATUS rc = STATUS_SUCCESS;
if (PhysAddr.LowPart > 0) /* We're mapping below 4GB. */
{
VMMDevMemory *pVMMDevMemory = (VMMDevMemory *)MmMapIoSpace(PhysAddr, cbToMap, MmNonCached);
LogFlowFunc(("pVMMDevMemory = 0x%x\n", pVMMDevMemory));
if (pVMMDevMemory)
{
LogFunc(("VMMDevMemory: Version = 0x%x, Size = %d\n", pVMMDevMemory->u32Version, pVMMDevMemory->u32Size));
/* Check version of the structure; do we have the right memory version? */
if (pVMMDevMemory->u32Version == VMMDEV_MEMORY_VERSION)
{
/* Save results. */
*ppvMMIOBase = pVMMDevMemory;
if (pcbMMIO) /* Optional. */
*pcbMMIO = pVMMDevMemory->u32Size;
LogFlowFunc(("VMMDevMemory found and mapped! pvMMIOBase = 0x%p\n", *ppvMMIOBase));
}
else
{
/* Not our version, refuse operation and unmap the memory. */
LogFunc(("Wrong version (%u), refusing operation!\n", pVMMDevMemory->u32Version));
vbgdNtUnmapVMMDevMemory(pDevExt);
rc = STATUS_UNSUCCESSFUL;
}
}
else
rc = STATUS_UNSUCCESSFUL;
}
return rc;
}
/**
* Unmaps the VMMDev I/O range from kernel space.
*
* @param pDevExt The device extension.
*/
void vbgdNtUnmapVMMDevMemory(PVBOXGUESTDEVEXTWIN pDevExt)
{
LogFlowFunc(("pVMMDevMemory = 0x%x\n", pDevExt->Core.pVMMDevMemory));
if (pDevExt->Core.pVMMDevMemory)
{
MmUnmapIoSpace((void*)pDevExt->Core.pVMMDevMemory, pDevExt->vmmDevPhysMemoryLength);
pDevExt->Core.pVMMDevMemory = NULL;
}
pDevExt->vmmDevPhysMemoryAddress.QuadPart = 0;
pDevExt->vmmDevPhysMemoryLength = 0;
}
VBOXOSTYPE vbgdNtVersionToOSType(VBGDNTVER enmNtVer)
{
VBOXOSTYPE enmOsType;
switch (enmNtVer)
{
case VBGDNTVER_WINNT4:
enmOsType = VBOXOSTYPE_WinNT4;
break;
case VBGDNTVER_WIN2K:
enmOsType = VBOXOSTYPE_Win2k;
break;
case VBGDNTVER_WINXP:
#if ARCH_BITS == 64
enmOsType = VBOXOSTYPE_WinXP_x64;
#else
enmOsType = VBOXOSTYPE_WinXP;
#endif
break;
case VBGDNTVER_WIN2K3:
#if ARCH_BITS == 64
enmOsType = VBOXOSTYPE_Win2k3_x64;
#else
enmOsType = VBOXOSTYPE_Win2k3;
#endif
break;
case VBGDNTVER_WINVISTA:
#if ARCH_BITS == 64
enmOsType = VBOXOSTYPE_WinVista_x64;
#else
enmOsType = VBOXOSTYPE_WinVista;
#endif
break;
case VBGDNTVER_WIN7:
#if ARCH_BITS == 64
enmOsType = VBOXOSTYPE_Win7_x64;
#else
enmOsType = VBOXOSTYPE_Win7;
#endif
break;
case VBGDNTVER_WIN8:
#if ARCH_BITS == 64
enmOsType = VBOXOSTYPE_Win8_x64;
#else
enmOsType = VBOXOSTYPE_Win8;
#endif
break;
case VBGDNTVER_WIN81:
#if ARCH_BITS == 64
enmOsType = VBOXOSTYPE_Win81_x64;
#else
enmOsType = VBOXOSTYPE_Win81;
#endif
break;
case VBGDNTVER_WIN10:
#if ARCH_BITS == 64
enmOsType = VBOXOSTYPE_Win10_x64;
#else
enmOsType = VBOXOSTYPE_Win10;
#endif
break;
default:
/* We don't know, therefore NT family. */
enmOsType = VBOXOSTYPE_WinNT;
break;
}
return enmOsType;
}
#ifdef DEBUG
/**
* A quick implementation of AtomicTestAndClear for uint32_t and multiple bits.
*/
static uint32_t vboxugestwinAtomicBitsTestAndClear(void *pu32Bits, uint32_t u32Mask)
{
AssertPtrReturn(pu32Bits, 0);
LogFlowFunc(("*pu32Bits=0x%x, u32Mask=0x%x\n", *(uint32_t *)pu32Bits, u32Mask));
uint32_t u32Result = 0;
uint32_t u32WorkingMask = u32Mask;
int iBitOffset = ASMBitFirstSetU32 (u32WorkingMask);
while (iBitOffset > 0)
{
bool fSet = ASMAtomicBitTestAndClear(pu32Bits, iBitOffset - 1);
if (fSet)
u32Result |= 1 << (iBitOffset - 1);
u32WorkingMask &= ~(1 << (iBitOffset - 1));
iBitOffset = ASMBitFirstSetU32 (u32WorkingMask);
}
LogFlowFunc(("Returning 0x%x\n", u32Result));
return u32Result;
}
static void vbgdNtTestAtomicTestAndClearBitsU32(uint32_t u32Mask, uint32_t u32Bits, uint32_t u32Exp)
{
ULONG u32Bits2 = u32Bits;
uint32_t u32Result = vboxugestwinAtomicBitsTestAndClear(&u32Bits2, u32Mask);
if ( u32Result != u32Exp
|| (u32Bits2 & u32Mask)
|| (u32Bits2 & u32Result)
|| ((u32Bits2 | u32Result) != u32Bits)
)
AssertLogRelMsgFailed(("%s: TEST FAILED: u32Mask=0x%x, u32Bits (before)=0x%x, u32Bits (after)=0x%x, u32Result=0x%x, u32Exp=ox%x\n",
__PRETTY_FUNCTION__, u32Mask, u32Bits, u32Bits2,
u32Result));
}
static void vbgdNtDoTests(void)
{
vbgdNtTestAtomicTestAndClearBitsU32(0x00, 0x23, 0);
vbgdNtTestAtomicTestAndClearBitsU32(0x11, 0, 0);
vbgdNtTestAtomicTestAndClearBitsU32(0x11, 0x22, 0);
vbgdNtTestAtomicTestAndClearBitsU32(0x11, 0x23, 0x1);
vbgdNtTestAtomicTestAndClearBitsU32(0x11, 0x32, 0x10);
vbgdNtTestAtomicTestAndClearBitsU32(0x22, 0x23, 0x22);
}
#endif /* DEBUG */
#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
/*
* DPC latency checker.
*/<|fim▁hole|> */
typedef struct DPCSAMPLE
{
LARGE_INTEGER PerfDelta;
LARGE_INTEGER PerfCounter;
LARGE_INTEGER PerfFrequency;
uint64_t u64TSC;
} DPCSAMPLE;
AssertCompileSize(DPCSAMPLE, 4*8);
/**
* The DPC latency measurement workset.
*/
typedef struct DPCDATA
{
KDPC Dpc;
KTIMER Timer;
KSPIN_LOCK SpinLock;
ULONG ulTimerRes;
bool volatile fFinished;
/** The timer interval (relative). */
LARGE_INTEGER DueTime;
LARGE_INTEGER PerfCounterPrev;
/** Align the sample array on a 64 byte boundrary just for the off chance
* that we'll get cache line aligned memory backing this structure. */
uint32_t auPadding[ARCH_BITS == 32 ? 5 : 7];
int cSamples;
DPCSAMPLE aSamples[8192];
} DPCDATA;
AssertCompileMemberAlignment(DPCDATA, aSamples, 64);
# define VBOXGUEST_DPC_TAG 'DPCS'
/**
* DPC callback routine for the DPC latency measurement code.
*
* @param pDpc The DPC, not used.
* @param pvDeferredContext Pointer to the DPCDATA.
* @param SystemArgument1 System use, ignored.
* @param SystemArgument2 System use, ignored.
*/
static VOID vbgdNtDpcLatencyCallback(PKDPC pDpc, PVOID pvDeferredContext, PVOID SystemArgument1, PVOID SystemArgument2)
{
DPCDATA *pData = (DPCDATA *)pvDeferredContext;
KeAcquireSpinLockAtDpcLevel(&pData->SpinLock);
if (pData->cSamples >= RT_ELEMENTS(pData->aSamples))
pData->fFinished = true;
else
{
DPCSAMPLE *pSample = &pData->aSamples[pData->cSamples++];
pSample->u64TSC = ASMReadTSC();
pSample->PerfCounter = KeQueryPerformanceCounter(&pSample->PerfFrequency);
pSample->PerfDelta.QuadPart = pSample->PerfCounter.QuadPart - pData->PerfCounterPrev.QuadPart;
pData->PerfCounterPrev.QuadPart = pSample->PerfCounter.QuadPart;
KeSetTimer(&pData->Timer, pData->DueTime, &pData->Dpc);
}
KeReleaseSpinLockFromDpcLevel(&pData->SpinLock);
}
/**
* Handles the DPC latency checker request.
*
* @returns VBox status code.
*/
int VbgdNtIOCtl_DpcLatencyChecker(void)
{
/*
* Allocate a block of non paged memory for samples and related data.
*/
DPCDATA *pData = (DPCDATA *)ExAllocatePoolWithTag(NonPagedPool, sizeof(DPCDATA), VBOXGUEST_DPC_TAG);
if (!pData)
{
RTLogBackdoorPrintf("VBoxGuest: DPC: DPCDATA allocation failed.\n");
return VERR_NO_MEMORY;
}
/*
* Initialize the data.
*/
KeInitializeDpc(&pData->Dpc, vbgdNtDpcLatencyCallback, pData);
KeInitializeTimer(&pData->Timer);
KeInitializeSpinLock(&pData->SpinLock);
pData->fFinished = false;
pData->cSamples = 0;
pData->PerfCounterPrev.QuadPart = 0;
pData->ulTimerRes = ExSetTimerResolution(1000 * 10, 1);
pData->DueTime.QuadPart = -(int64_t)pData->ulTimerRes / 10;
/*
* Start the DPC measurements and wait for a full set.
*/
KeSetTimer(&pData->Timer, pData->DueTime, &pData->Dpc);
while (!pData->fFinished)
{
LARGE_INTEGER Interval;
Interval.QuadPart = -100 * 1000 * 10;
KeDelayExecutionThread(KernelMode, TRUE, &Interval);
}
ExSetTimerResolution(0, 0);
/*
* Log everything to the host.
*/
RTLogBackdoorPrintf("DPC: ulTimerRes = %d\n", pData->ulTimerRes);
for (int i = 0; i < pData->cSamples; i++)
{
DPCSAMPLE *pSample = &pData->aSamples[i];
RTLogBackdoorPrintf("[%d] pd %lld pc %lld pf %lld t %lld\n",
i,
pSample->PerfDelta.QuadPart,
pSample->PerfCounter.QuadPart,
pSample->PerfFrequency.QuadPart,
pSample->u64TSC);
}
ExFreePoolWithTag(pData, VBOXGUEST_DPC_TAG);
return VINF_SUCCESS;
}
#endif /* VBOX_WITH_DPC_LATENCY_CHECKER */<|fim▁end|> |
/**
* One DPC latency sample. |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>angular.module('editor', [
'ui.router',
'editor.conf.persistence',
'editor.conf.loadingbar',
'editor.views.recipes',
'editor.views.recipes.recipe',
'editor.views.recipes.sheet',
'editor.views.ingredients',
'editor.views.equipments',
'editor.views.equipments.equipment'
])
.config(['$urlRouterProvider', '$urlMatcherFactoryProvider',
function($urlRouterProvider, $urlMatcherFactoryProvider) {
$urlRouterProvider.when('', '/recipes');<|fim▁hole|><|fim▁end|> | $urlMatcherFactoryProvider.strictMode(false);
}]); |
<|file_name|>userController.js<|end_file_name|><|fim▁begin|>var express = require('express');
var userRouter = express.Router();
var passport = require('passport');
var Model = require('../models/user');
var authenticate = require('./auth');
<|fim▁hole|>
/* GET all the users */
exports.getAll = function(req, res, next) {
Model.Users.forge()
.fetch({ columns: ['_id', 'username', 'admin'] })
.then(function(user) {
res.json(user);
}).catch(function(err) {
console.log(err);
});
};
/* GET a user given its id */
exports.getUser = function(req, res, next) {
var userID = req.params.userID;
Model.User.forge({'_id':userID})
.fetch({columns:['_id', 'username', 'admin']})
.then(function (user) {
res.json(user);
});
};
/* PUT update a user given its id */
exports.updateUser = function(req, res, next) {
var userID = req.params.userID;
Model.User.forge({'_id':userID})
.fetch({columns:['_id', 'username', 'admin']})
.then(function (user) {
res.json(user);
});
};
/* GET logout */
exports.signOut = function(req, res){
req.logout();
res.status(200).json({message: 'Login user out'});
};
/* POST login. */
exports.signIn = function(req, res, next) {
passport.authenticate('local', function(err, user, info) {
if (err) {
return next(err);
}
if (!user) {
return res.status(401).json({
err: info
});
}
return req.logIn(user, function(err) {
if (err) {
console.log(err);
return res.status(500).json({
error: 'Could not log in user'
});
}
var token = authenticate.getToken(user);
res.status(200).json({
status: 'Login successful',
succes: true,
token: token
});
});
})(req, res, next);
};
/* POST Register. */
exports.signUp = function(req, res, next) {
var userData = req.body;
Model.User.forge({
username: userData.username
}).fetch() //see if user already exists
.then(function(user) {
if (user) {
return res.status(400).json({
title: 'signup',
errorMessage: 'That username already exists'
});
} else {
//User does not existe, lets add it
var signUpUser = Model.User.forge({
username: userData.username,
password: userData.password,
admin: false
});
signUpUser.save()
.then(function(user) {
var result = 'User ' + user.get('username') + ' created.';
return res.status(200).json({
message: result,
user: {
id: user.get('id'),
username: user.get('username'),
}
});
});
}
});
};<|fim▁end|> | |
<|file_name|>test_api.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
def test_api_endpoint_existence(todolist_app):
with todolist_app.test_client() as client:
resp = client.get('/tasks')
assert resp.status_code == 200
def test_task_creation(todolist_app):
with todolist_app.test_client() as client:
resp = client.jpost(
'/tasks', {
"title": "First task"
}
)
assert resp['status'] == 'success'
assert 'id' in resp['result']
assert resp['result']['id'] == 1
def test_task_updation(todolist_app):
with todolist_app.test_client() as client:
modified_title = "First task - modified"
resp = client.jput(
'/tasks/1', {
"title": "First task - modified"
}
)
assert resp['status'] == 'success'
assert 'id' in resp['result']
assert resp['result']['title'] == modified_title<|fim▁end|> | import requests
import json
|
<|file_name|>prev-button.js<|end_file_name|><|fim▁begin|>import React from 'react';
import cn from 'classnames';
import keyboardJS from 'keyboardjs';
import ReactTooltip from 'react-tooltip';
import sendToAddon from '../client-lib/send-to-addon';
export default class PrevButton extends React.Component {
constructor(props) {
super(props);
this.state = {historyIndex: 0};
}
componentDidMount() {
// previous track
keyboardJS.bind('<', () => this.prevTrack());
}
prevTrack() {
let index;
// if clicked more than once within
// 5 seconds increment the index so
// the user can get to further back<|fim▁hole|> if (this.searchingHistory) {
if (this.props.history.length > this.state.historyIndex + 1) {
this.setState({historyIndex: this.state.historyIndex + 1});
}
index = this.state.historyIndex;
} else {
index = 0;
this.searchingHistory = true;
setTimeout(() => {
this.searchingHistory = false;
this.setState({historyIndex: 0});
}, 5000);
}
sendToAddon({
action: 'track-added-from-history',
index
});
}
render() {
return (
<div className={cn('prev-wrapper', {hidden: (!this.props.hovered && !this.props.minimized) || this.props.confirm || !this.props.history.length})}>
<a onClick={this.prevTrack.bind(this)}
className='prev' data-tip data-for='prev' />
<ReactTooltip id='prev' effect='solid' place='right'>{this.props.strings.ttPrev}</ReactTooltip>
</div>
);
}
}<|fim▁end|> | // in history. Resets when timeout wears out. |
<|file_name|>TestProject.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright (C) 2007, Robin Rosenberg <robin.rosenberg@dewire.com>
* Copyright (C) 2006, Shawn O. Pearce <spearce@spearce.org>
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*******************************************************************************/
package org.eclipse.egit.core.test;
import java.net.URL;
import org.eclipse.core.resources.IFolder;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.IProjectDescription;
import org.eclipse.core.resources.IWorkspaceRoot;
import org.eclipse.core.resources.ResourcesPlugin;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.Path;
import org.eclipse.core.runtime.Platform;
import org.eclipse.jdt.core.IClasspathEntry;
import org.eclipse.jdt.core.ICompilationUnit;
import org.eclipse.jdt.core.IJavaProject;
import org.eclipse.jdt.core.IPackageFragment;
import org.eclipse.jdt.core.IPackageFragmentRoot;
import org.eclipse.jdt.core.IType;
import org.eclipse.jdt.core.JavaCore;
import org.eclipse.jdt.core.JavaModelException;
import org.eclipse.jdt.launching.JavaRuntime;
import org.osgi.framework.Bundle;
public class TestProject {
public IProject project;
public IJavaProject javaProject;
private IPackageFragmentRoot sourceFolder;
/**
* @throws CoreException
* If project already exists
*/
public TestProject() throws CoreException {
this(false);
}
/**
* @param remove
* should project be removed if already exists
* @throws CoreException
*/
public TestProject(final boolean remove) throws CoreException {
IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
project = root.getProject("Project-1");
if (remove)
project.delete(true, null);
project.create(null);
project.open(null);
javaProject = JavaCore.create(project);
IFolder binFolder = createBinFolder();
setJavaNature();
javaProject.setRawClasspath(new IClasspathEntry[0], null);
createOutputFolder(binFolder);
addSystemLibraries();
}
public IProject getProject() {
return project;
}
public IJavaProject getJavaProject() {
return javaProject;
}
public void addJar(String plugin, String jar) throws JavaModelException {
Path result = findFileInPlugin(plugin, jar);
IClasspathEntry[] oldEntries = javaProject.getRawClasspath();
IClasspathEntry[] newEntries = new IClasspathEntry[oldEntries.length + 1];
System.arraycopy(oldEntries, 0, newEntries, 0, oldEntries.length);
newEntries[oldEntries.length] = JavaCore.newLibraryEntry(result, null,
null);
javaProject.setRawClasspath(newEntries, null);
}
public IPackageFragment createPackage(String name) throws CoreException {
if (sourceFolder == null)
sourceFolder = createSourceFolder();
return sourceFolder.createPackageFragment(name, false, null);
}
public IType createType(IPackageFragment pack, String cuName, String source)
throws JavaModelException {
StringBuffer buf = new StringBuffer();
buf.append("package " + pack.getElementName() + ";\n");
buf.append("\n");
buf.append(source);
ICompilationUnit cu = pack.createCompilationUnit(cuName,<|fim▁hole|> return cu.getTypes()[0];
}
public void dispose() throws CoreException {
waitForIndexer();
project.delete(true, true, null);
}
private IFolder createBinFolder() throws CoreException {
IFolder binFolder = project.getFolder("bin");
binFolder.create(false, true, null);
return binFolder;
}
private void setJavaNature() throws CoreException {
IProjectDescription description = project.getDescription();
description.setNatureIds(new String[] { JavaCore.NATURE_ID });
project.setDescription(description, null);
}
private void createOutputFolder(IFolder binFolder)
throws JavaModelException {
IPath outputLocation = binFolder.getFullPath();
javaProject.setOutputLocation(outputLocation, null);
}
public IPackageFragmentRoot createSourceFolder() throws CoreException {
IFolder folder = project.getFolder("src");
folder.create(false, true, null);
IPackageFragmentRoot root = javaProject.getPackageFragmentRoot(folder);
IClasspathEntry[] oldEntries = javaProject.getRawClasspath();
IClasspathEntry[] newEntries = new IClasspathEntry[oldEntries.length + 1];
System.arraycopy(oldEntries, 0, newEntries, 0, oldEntries.length);
newEntries[oldEntries.length] = JavaCore.newSourceEntry(root.getPath());
javaProject.setRawClasspath(newEntries, null);
return root;
}
private void addSystemLibraries() throws JavaModelException {
IClasspathEntry[] oldEntries = javaProject.getRawClasspath();
IClasspathEntry[] newEntries = new IClasspathEntry[oldEntries.length + 1];
System.arraycopy(oldEntries, 0, newEntries, 0, oldEntries.length);
newEntries[oldEntries.length] = JavaRuntime
.getDefaultJREContainerEntry();
javaProject.setRawClasspath(newEntries, null);
}
private Path findFileInPlugin(String plugin, String file) {
Bundle bundle = Platform.getBundle(plugin);
URL resource = bundle.getResource(file);
return new Path(resource.getPath());
}
public void waitForIndexer() {
// new SearchEngine().searchAllTypeNames(ResourcesPlugin.getWorkspace(),
// null, null, IJavaSearchConstants.EXACT_MATCH,
// IJavaSearchConstants.CASE_SENSITIVE,
// IJavaSearchConstants.CLASS, SearchEngine
// .createJavaSearchScope(new IJavaElement[0]),
// new ITypeNameRequestor() {
// public void acceptClass(char[] packageName,
// char[] simpleTypeName, char[][] enclosingTypeNames,
// String path) {
// }
// public void acceptInterface(char[] packageName,
// char[] simpleTypeName, char[][] enclosingTypeNames,
// String path) {
// }
// }, IJavaSearchConstants.WAIT_UNTIL_READY_TO_SEARCH, null);
}
/**
* @return Returns the sourceFolder.
*/
public IPackageFragmentRoot getSourceFolder() {
return sourceFolder;
}
/**
* @param sourceFolder The sourceFolder to set.
*/
public void setSourceFolder(IPackageFragmentRoot sourceFolder) {
this.sourceFolder = sourceFolder;
}
}<|fim▁end|> | buf.toString(), false, null); |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | CONGRESS_API_KEY = "" |
<|file_name|>0079_convert_species_null_values_to_empty_strings.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for field in ('species', 'cultivar', 'other', 'gender', 'bloom_period',
'fruit_period', 'fact_sheet', 'plant_guide'):
orm.Species.objects.filter(**{field + '__isnull': True}) \
.update(**{field: ''})
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {<|fim▁hole|> 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_index': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'previous_value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'ref': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.benefitcurrencyconversion': {
'Meta': {'object_name': 'BenefitCurrencyConversion'},
'co2_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'currency_symbol': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'electricity_kwh_to_currency': ('django.db.models.fields.FloatField', [], {}),
'h20_gal_to_currency': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'natural_gas_kbtu_to_currency': ('django.db.models.fields.FloatField', [], {}),
'nox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'o3_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'pm10_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'sox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'voc_lb_to_currency': ('django.db.models.fields.FloatField', [], {})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'unique_together': "((u'model_name', u'field_name', u'role', u'instance'),)", 'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'adjuncts_timestamp': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "u'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'bounds': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'center_override': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'config': ('treemap.json_field.JSONField', [], {'blank': 'True'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'default_role'", 'to': u"orm['treemap.Role']"}),
'eco_benefits_conversion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.BenefitCurrencyConversion']", 'null': 'True', 'blank': 'True'}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'itree_region_default': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.User']", 'null': 'True', 'through': u"orm['treemap.InstanceUser']", 'blank': 'True'})
},
u'treemap.instanceuser': {
'Meta': {'unique_together': "((u'instance', u'user'),)", 'object_name': 'InstanceUser'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.itreecodeoverride': {
'Meta': {'unique_together': "((u'instance_species', u'region'),)", 'object_name': 'ITreeCodeOverride'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']"}),
'itree_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ITreeRegion']"})
},
u'treemap.itreeregion': {
'Meta': {'object_name': 'ITreeRegion'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'treemap.mapfeature': {
'Meta': {'object_name': 'MapFeature'},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'udfs': (u'treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.mapfeaturephoto': {
'Meta': {'object_name': 'MapFeaturePhoto'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'map_feature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.MapFeature']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
u'treemap.plot': {
'Meta': {'object_name': 'Plot', '_ormbases': [u'treemap.MapFeature']},
'length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'mapfeature_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['treemap.MapFeature']", 'unique': 'True', 'primary_key': 'True'}),
'owner_orig_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.reputationmetric': {
'Meta': {'object_name': 'ReputationMetric'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'approval_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'denial_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'direct_write_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'treemap.role': {
'Meta': {'object_name': 'Role'},
'default_permission': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rep_thresh': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.species': {
'Meta': {'object_name': 'Species'},
'bloom_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cultivar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fact_sheet': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fall_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'flower_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fruit_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'max_dbh': ('django.db.models.fields.IntegerField', [], {'default': '200'}),
'max_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'native_status': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'otm_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'palatable_human': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'plant_guide': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'udfs': (u'treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'}),
'wildlife_value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.tree': {
'Meta': {'object_name': 'Tree'},
'canopy_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_planted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_removed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diameter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'plot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Plot']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']", 'null': 'True', 'blank': 'True'}),
'udfs': (u'treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.treefavorite': {
'Meta': {'unique_together': "((u'user', u'tree'),)", 'object_name': 'TreeFavorite'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tree': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Tree']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.treephoto': {
'Meta': {'object_name': 'TreePhoto', '_ormbases': [u'treemap.MapFeaturePhoto']},
u'mapfeaturephoto_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['treemap.MapFeaturePhoto']", 'unique': 'True', 'primary_key': 'True'}),
'tree': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Tree']"})
},
u'treemap.user': {
'Meta': {'object_name': 'User'},
'allow_email_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'make_info_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'treemap.userdefinedcollectionvalue': {
'Meta': {'object_name': 'UserDefinedCollectionValue'},
'data': (u'django_hstore.fields.DictionaryField', [], {}),
'field_definition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.UserDefinedFieldDefinition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.userdefinedfielddefinition': {
'Meta': {'object_name': 'UserDefinedFieldDefinition'},
'datatype': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'iscollection': ('django.db.models.fields.BooleanField', [], {}),
'model_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['treemap']
symmetrical = True<|fim▁end|> | 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, |
<|file_name|>AssetLoader.java<|end_file_name|><|fim▁begin|>package com.slicer.utils;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.audio.Sound;
import com.badlogic.gdx.files.FileHandle;
import com.badlogic.gdx.graphics.Texture;
public class AssetLoader {
public static Texture bgGame, bgGameOver, bgFinish, score, soundOn, soundOff;
public static Sound gameOver, victory, slice, click;
public static FileHandle levelsFile;
public static boolean sound = true;
public static void load() {<|fim▁hole|> bgGameOver = new Texture(Gdx.files.internal("data/images/bg3.png"));
bgFinish = new Texture(Gdx.files.internal("data/images/bg2.png"));
soundOn = new Texture(Gdx.files.internal("data/images/on.png"));
soundOff = new Texture(Gdx.files.internal("data/images/off.png"));
score = new Texture(Gdx.files.internal("data/images/score.png"));
gameOver = Gdx.audio.newSound(Gdx.files.internal("data/sound/gameOver.wav"));
victory = Gdx.audio.newSound(Gdx.files.internal("data/sound/victory.wav"));
slice = Gdx.audio.newSound(Gdx.files.internal("data/sound/slice.wav"));
click = Gdx.audio.newSound(Gdx.files.internal("data/sound/click.wav"));
levelsFile = Gdx.files.internal("data/levels/levels.json");
}
public static void play(Sound s) {
if (sound)
s.play();
}
public static void dispose() {
bgGame.dispose();
bgGameOver.dispose();
bgFinish.dispose();
gameOver.dispose();
victory.dispose();
slice.dispose();
}
}<|fim▁end|> |
bgGame = new Texture(Gdx.files.internal("data/images/bg1.jpeg")); |
<|file_name|>swarmingserver_bot_fake.py<|end_file_name|><|fim▁begin|># Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import base64
import copy
import json
import os
import sys
import threading
BOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(
0,
os.path.join(os.path.dirname(BOT_DIR), '..', '..', '..', 'client', 'tests'))
import httpserver
sys.path.pop(0)
sys.path.insert(0, os.path.join(os.path.dirname(BOT_DIR), 'server'))
import bot_archive
sys.path.pop(0)
def gen_zip(url):
"""Returns swarming_bot.zip content."""
with open(os.path.join(BOT_DIR, 'config', 'bot_config.py'), 'rb') as f:
bot_config_content = f.read()
return bot_archive.get_swarming_bot_zip(
BOT_DIR, url, '1', {'config/bot_config.py': bot_config_content}, None)
def flatten_task_updates(updates):<|fim▁hole|> """
out = {}
for update in updates:
if out.get('output') and update.get('output'):
# Accumulate output.
update = update.copy()
out['output'] = base64.b64encode(
base64.b64decode(out['output']) +
base64.b64decode(update.pop('output')))
update.pop('output_chunk_start')
out.update(update)
return out
class Handler(httpserver.Handler):
"""Minimal Swarming bot server fake implementation."""
def do_GET(self):
if self.path == '/swarming/api/v1/bot/server_ping':
self.send_response(200)
self.end_headers()
return None
if self.path == '/auth/api/v1/server/oauth_config':
return self.send_json({
'client_id': 'id',
'client_not_so_secret': 'hunter2',
'primary_url': self.server.url,
})
raise NotImplementedError(self.path)
def do_POST(self):
data = json.loads(self.read_body())
if self.path == '/auth/api/v1/accounts/self/xsrf_token':
return self.send_json({'xsrf_token': 'a'})
if self.path == '/swarming/api/v1/bot/event':
self.server.parent._add_bot_event(data)
return self.send_json({})
if self.path == '/swarming/api/v1/bot/handshake':
return self.send_json({'xsrf_token': 'fine'})
if self.path == '/swarming/api/v1/bot/poll':
self.server.parent.has_polled.set()
return self.send_json({'cmd': 'sleep', 'duration': 60})
if self.path.startswith('/swarming/api/v1/bot/task_update/'):
task_id = self.path[len('/swarming/api/v1/bot/task_update/'):]
must_stop = self.server.parent._on_task_update(task_id, data)
return self.send_json({'ok': True, 'must_stop': must_stop})
if self.path.startswith('/swarming/api/v1/bot/task_error'):
task_id = self.path[len('/swarming/api/v1/bot/task_error/'):]
self.server.parent._add_task_error(task_id, data)
return self.send_json({'resp': 1})
raise NotImplementedError(self.path)
def do_PUT(self):
raise NotImplementedError(self.path)
class Server(httpserver.Server):
"""Fake a Swarming bot API server for local testing."""
_HANDLER_CLS = Handler
def __init__(self):
super(Server, self).__init__()
self._lock = threading.Lock()
# Accumulated bot events.
self._bot_events = []
# Running tasks.
self._tasks = {}
# Bot reported task errors.
self._task_errors = {}
self.has_polled = threading.Event()
self.has_updated_task = threading.Event()
self.must_stop = False
def get_bot_events(self):
"""Returns the events reported by the bots."""
with self._lock:
return self._bot_events[:]
def get_tasks(self):
"""Returns the tasks run by the bots."""
with self._lock:
return copy.deepcopy(self._tasks)
def get_task_errors(self):
"""Returns the task errors reported by the bots."""
with self._lock:
return self._task_errors.copy()
def _add_bot_event(self, data):
# Used by the handler.
with self._lock:
self._bot_events.append(data)
def _on_task_update(self, task_id, data):
with self._lock:
self._tasks.setdefault(task_id, []).append(data)
must_stop = self.must_stop
self.has_updated_task.set()
return must_stop
def _add_task_error(self, task_id, data):
# Used by the handler.
with self._lock:
self._task_errors.setdefault(task_id, []).append(data)<|fim▁end|> | """Flatten a list of task updates into a single result.
This is more or less the equivalent of what task_scheduler.bot_update_task()
would do after all the bot API calls. |
<|file_name|>events.py<|end_file_name|><|fim▁begin|>from flask import g
from flask.ext.restplus import Namespace, reqparse, marshal
from app.api.attendees import TICKET
from app.api.microlocations import MICROLOCATION
from app.api.sessions import SESSION
from app.api.speakers import SPEAKER
from app.api.sponsors import SPONSOR
from app.api.tracks import TRACK
from app.helpers.data import save_to_db, record_activity
from app.models.call_for_papers import CallForPaper as EventCFS
from app.models.event import Event as EventModel
from app.models.event_copyright import EventCopyright
from app.models.role import Role
from app.models.social_link import SocialLink as SocialLinkModel
from app.models.user import ORGANIZER
from app.models.users_events_roles import UsersEventsRoles
from helpers.special_fields import EventTypeField, EventTopicField, \
EventPrivacyField, EventSubTopicField, EventStateField
from app.api.helpers import custom_fields as fields
from app.api.helpers.helpers import requires_auth, parse_args, \
can_access, fake_marshal_with, fake_marshal_list_with, erase_from_dict
from app.api.helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \
PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO
from app.api.helpers.utils import Resource, ETAG_HEADER_DEFN
api = Namespace('events', description='Events')
EVENT_CREATOR = api.model('EventCreator', {
'id': fields.Integer(),
'email': fields.Email()
})
EVENT_COPYRIGHT = api.model('EventCopyright', {
'holder': fields.String(),
'holder_url': fields.Uri(),
'licence': fields.String(),
'licence_url': fields.Uri(),
'year': fields.Integer(),
'logo': fields.String()
})
EVENT_CFS = api.model('EventCFS', {
'announcement': fields.String(),
'start_date': fields.DateTime(),
'end_date': fields.DateTime(),
'timezone': fields.String(),
'privacy': EventPrivacyField() # [public, private]
})
EVENT_VERSION = api.model('EventVersion', {
'event_ver': fields.Integer(),
'sessions_ver': fields.Integer(),
'speakers_ver': fields.Integer(),
'tracks_ver': fields.Integer(),
'sponsors_ver': fields.Integer(),
'microlocations_ver': fields.Integer()
})
SOCIAL_LINK = api.model('SocialLink', {
'id': fields.Integer(),
'name': fields.String(required=True),
'link': fields.String(required=True)
})
SOCIAL_LINK_POST = api.clone('SocialLinkPost', SOCIAL_LINK)
del SOCIAL_LINK_POST['id']
EVENT = api.model('Event', {
'id': fields.Integer(required=True),
'identifier': fields.String(),
'name': fields.String(required=True),
'event_url': fields.Uri(),
'email': fields.Email(),
'logo': fields.Upload(),
'start_time': fields.DateTime(required=True),
'end_time': fields.DateTime(required=True),
'timezone': fields.String(),
'latitude': fields.Float(),
'longitude': fields.Float(),
'background_image': fields.Upload(attribute='background_url'),
'description': fields.String(),
'location_name': fields.String(),
'searchable_location_name': fields.String(),
'organizer_name': fields.String(),
'organizer_description': fields.String(),
'state': EventStateField(default='Draft'),
'type': EventTypeField(),
'topic': EventTopicField(),
'sub_topic': EventSubTopicField(),
'privacy': EventPrivacyField(),
'ticket_url': fields.Uri(),
'creator': fields.Nested(EVENT_CREATOR, allow_null=True),
'copyright': fields.Nested(EVENT_COPYRIGHT, allow_null=True),
'schedule_published_on': fields.DateTime(),
'code_of_conduct': fields.String(),
'social_links': fields.List(fields.Nested(SOCIAL_LINK), attribute='social_link'),
'call_for_papers': fields.Nested(EVENT_CFS, allow_null=True),
'version': fields.Nested(EVENT_VERSION),
'has_session_speakers': fields.Boolean(default=False),
'thumbnail': fields.Uri(),
'large': fields.Uri()
})
EVENT_COMPLETE = api.clone('EventComplete', EVENT, {
'sessions': fields.List(fields.Nested(SESSION), attribute='session'),
'microlocations': fields.List(fields.Nested(MICROLOCATION), attribute='microlocation'),
'tracks': fields.List(fields.Nested(TRACK), attribute='track'),
'sponsors': fields.List(fields.Nested(SPONSOR), attribute='sponsor'),
'speakers': fields.List(fields.Nested(SPEAKER), attribute='speaker'),
'tickets': fields.List(fields.Nested(TICKET), attribute='tickets'),
})
EVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {
'results': fields.List(fields.Nested(EVENT))
})
EVENT_POST = api.clone('EventPost', EVENT)
del EVENT_POST['id']
del EVENT_POST['creator']
del EVENT_POST['social_links']
del EVENT_POST['version']
# ###################
# Data Access Objects
# ###################
class SocialLinkDAO(ServiceDAO):
"""
Social Link DAO
"""
version_key = 'event_ver'
class EventDAO(BaseDAO):
"""
Event DAO
"""
version_key = 'event_ver'
def fix_payload(self, data):
"""
Fixes the payload data.
Here converts string time from datetime obj
"""
datetime_fields = ['start_time', 'end_time', 'schedule_published_on']
for f in datetime_fields:
if f in data:
data[f] = EVENT_POST[f].from_str(data.get(f))
# cfs datetimes
if data.get('call_for_papers'):
for _ in ['start_date', 'end_date']:
if _ in data['call_for_papers']:
data['call_for_papers'][_] = EVENT_CFS[_].from_str(
data['call_for_papers'].get(_))
return data
def create(self, data, url):
data = self.validate(data)
payload = self.fix_payload(data)
# save copyright info
payload['copyright'] = CopyrightDAO.create(payload.get('copyright', {}), validate=False)
# save cfs info
if payload.get('call_for_papers'): # don't create if call_for_papers==null
payload['call_for_papers'] = CFSDAO.create(payload['call_for_papers'], validate=False)
# save event
new_event = self.model(**payload)
new_event.creator = g.user
save_to_db(new_event, "Event saved")
# set organizer
role = Role.query.filter_by(name=ORGANIZER).first()
uer = UsersEventsRoles(g.user, new_event, role)
save_to_db(uer, 'UER saved')
# Return created resource with a 201 status code and its Location
# (url) in the header.
resource_location = url + '/' + str(new_event.id)
return self.get(new_event.id), 201, {'Location': resource_location}
def update(self, event_id, data):
data = self.validate_put(data)
payload = self.fix_payload(data)
# get event
event = self.get(event_id)
# update copyright if key exists
if 'copyright' in payload:
CopyrightDAO.update(event.copyright.id, payload['copyright']
if payload['copyright'] else {})
payload.pop('copyright')
# update cfs
if 'call_for_papers' in payload:
cfs_data = payload.get('call_for_papers')
if event.call_for_papers:
if cfs_data: # update existing
CFSDAO.update(event.call_for_papers.id, cfs_data)
else: # delete if null
CFSDAO.delete(event.call_for_papers.id)
elif cfs_data: # create new (only if data exists)
CFSDAO.create(cfs_data, validate=False)
payload.pop('call_for_papers')
# master update<|fim▁hole|>DAO = EventDAO(EventModel, EVENT_POST)
CopyrightDAO = BaseDAO(EventCopyright, EVENT_COPYRIGHT)
CFSDAO = BaseDAO(EventCFS, EVENT_CFS) # CFS = Call For Speakers
# DEFINE PARAMS
EVENT_PARAMS = {
'location': {},
'contains': {
'description': 'Contains the string in name and description'
},
'state': {},
'privacy': {},
'type': {},
'topic': {},
'sub_topic': {},
'start_time_gt': {},
'start_time_lt': {},
'end_time_gt': {},
'end_time_lt': {},
'time_period': {},
'include': {
'description': 'Comma separated list of additional fields to load. '
'Supported: sessions,tracks,microlocations,speakers,sponsors)'
},
}
SINGLE_EVENT_PARAMS = {
'include': {
'description': 'Comma separated list of additional fields to load. '
'Supported: sessions,tracks,microlocations,speakers,sponsors,tickets)'
},
}
def get_extended_event_model(includes=None):
if includes is None:
includes = []
included_fields = {}
if 'sessions' in includes:
included_fields['sessions'] = fields.List(fields.Nested(SESSION), attribute='session')
if 'tracks' in includes:
included_fields['tracks'] = fields.List(fields.Nested(TRACK), attribute='track')
if 'microlocations' in includes:
included_fields['microlocations'] = fields.List(fields.Nested(MICROLOCATION), attribute='microlocation')
if 'sponsors' in includes:
included_fields['sponsors'] = fields.List(fields.Nested(SPONSOR), attribute='sponsor')
if 'speakers' in includes:
included_fields['speakers'] = fields.List(fields.Nested(SPEAKER), attribute='speaker')
if 'tickets' in includes:
included_fields['tickets'] = fields.List(fields.Nested(TICKET), attribute='tickets')
return EVENT.extend('ExtendedEvent', included_fields)
# DEFINE RESOURCES
class EventResource():
"""
Event Resource Base class
"""
event_parser = reqparse.RequestParser()
event_parser.add_argument('location', type=unicode, dest='__event_search_location')
event_parser.add_argument('contains', type=unicode, dest='__event_contains')
event_parser.add_argument('state', type=str)
event_parser.add_argument('privacy', type=str)
event_parser.add_argument('type', type=str)
event_parser.add_argument('topic', type=str)
event_parser.add_argument('sub_topic', type=str)
event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')
event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')
event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')
event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')
event_parser.add_argument('time_period', type=str, dest='__event_time_period')
event_parser.add_argument('include', type=str)
class SingleEventResource():
event_parser = reqparse.RequestParser()
event_parser.add_argument('include', type=str)
@api.route('/<int:event_id>')
@api.param('event_id')
@api.response(404, 'Event not found')
class Event(Resource, SingleEventResource):
@api.doc('get_event', params=SINGLE_EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_with(EVENT_COMPLETE) # Fake marshal decorator to add response model to swagger doc
def get(self, event_id):
"""Fetch an event given its id"""
includes = parse_args(self.event_parser).get('include', '').split(',')
return marshal(DAO.get(event_id), get_extended_event_model(includes))
@requires_auth
@can_access
@api.doc('delete_event')
@api.marshal_with(EVENT)
def delete(self, event_id):
"""Delete an event given its id"""
event = DAO.delete(event_id)
record_activity('delete_event', event_id=event_id)
return event
@requires_auth
@can_access
@api.doc('update_event', responses=PUT_RESPONSES)
@api.marshal_with(EVENT)
@api.expect(EVENT_POST)
def put(self, event_id):
"""Update an event given its id"""
event = DAO.update(event_id, self.api.payload)
record_activity('update_event', event_id=event_id)
return event
@api.route('/<int:event_id>/event')
@api.param('event_id')
@api.response(404, 'Event not found')
class EventWebapp(Resource, SingleEventResource):
@api.doc('get_event_for_webapp')
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_with(EVENT_COMPLETE) # Fake marshal decorator to add response model to swagger doc
def get(self, event_id):
"""Fetch an event given its id.
Alternate endpoint for fetching an event.
"""
includes = parse_args(self.event_parser).get('include', '').split(',')
return marshal(DAO.get(event_id), get_extended_event_model(includes))
@api.route('')
class EventList(Resource, EventResource):
@api.doc('list_events', params=EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_list_with(EVENT_COMPLETE)
def get(self):
"""List all events"""
parsed_args = parse_args(self.event_parser)
includes = parsed_args.get('include', '').split(',')
erase_from_dict(parsed_args, 'include')
return marshal(DAO.list(**parsed_args), get_extended_event_model(includes))
@requires_auth
@api.doc('create_event', responses=POST_RESPONSES)
@api.marshal_with(EVENT)
@api.expect(EVENT_POST)
def post(self):
"""Create an event"""
item = DAO.create(self.api.payload, self.api.url_for(self))
record_activity('create_event', event_id=item[0].id)
return item
@api.route('/page')
class EventListPaginated(Resource, PaginatedResourceBase, EventResource):
@api.doc('list_events_paginated', params=PAGE_PARAMS)
@api.doc(params=EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(EVENT_PAGINATED)
def get(self):
"""List events in a paginated manner"""
args = self.parser.parse_args()
return DAO.paginated_list(args=args, **parse_args(self.event_parser))
@api.route('/<int:event_id>/links')
@api.param('event_id')
class SocialLinkList(Resource):
@api.doc('list_social_links')
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_list_with(SOCIAL_LINK)
def get(self, event_id):
"""List all social links"""
return LinkDAO.list(event_id)
@requires_auth
@can_access
@api.doc('create_social_link', responses=POST_RESPONSES)
@api.marshal_with(SOCIAL_LINK)
@api.expect(SOCIAL_LINK_POST)
def post(self, event_id):
"""Create a social link"""
return LinkDAO.create(
event_id,
self.api.payload,
self.api.url_for(self, event_id=event_id)
)
@api.route('/<int:event_id>/links/<int:link_id>')
class SocialLink(Resource):
@requires_auth
@can_access
@api.doc('delete_social_link')
@api.marshal_with(SOCIAL_LINK)
def delete(self, event_id, link_id):
"""Delete a social link given its id"""
return LinkDAO.delete(event_id, link_id)
@requires_auth
@can_access
@api.doc('update_social_link', responses=PUT_RESPONSES)
@api.marshal_with(SOCIAL_LINK)
@api.expect(SOCIAL_LINK_POST)
def put(self, event_id, link_id):
"""Update a social link given its id"""
return LinkDAO.update(event_id, link_id, self.api.payload)
@api.hide
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(SOCIAL_LINK)
def get(self, event_id, link_id):
"""Fetch a social link given its id"""
return LinkDAO.get(event_id, link_id)<|fim▁end|> | return BaseDAO.update(self, event_id, payload, validate=False)
LinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST) |
<|file_name|>bitcoin_et.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="et" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="14"/>
<source>About Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/aboutdialog.ui" line="53"/>
<source><b>Bitcoin</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/aboutdialog.ui" line="97"/>
<source>Copyright © 2009-2012 Bitcoin Developers
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file license.txt or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young (eay@cryptsoft.com) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="14"/>
<source>Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="20"/>
<source>These are your Bitcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="36"/>
<source>Double-click to edit address or label</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="63"/>
<source>Create a new address</source>
<translation>Loo uus aadress</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="77"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="66"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="80"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="91"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="102"/>
<source>Sign a message to prove you own this address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="105"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="116"/>
<source>Delete the currently selected address from the list. Only sending addresses can be deleted.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="119"/>
<source>&Delete</source>
<translation>&Kustuta</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="63"/>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../addressbookpage.cpp" line="65"/>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../addressbookpage.cpp" line="292"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../addressbookpage.cpp" line="293"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../addressbookpage.cpp" line="306"/>
<source>Error exporting</source>
<translation>Viga eksportimisel</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="306"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="142"/>
<source>Label</source>
<translation>Silt</translation>
</message>
<message>
<location filename="../addresstablemodel.cpp" line="142"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location filename="../addresstablemodel.cpp" line="178"/>
<source>(no label)</source>
<translation>(silti pole)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="47"/>
<source>Enter passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="61"/>
<source>New passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="75"/>
<source>Repeat new passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="34"/>
<source>Encrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="37"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="42"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="45"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="50"/>
<source>Decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="53"/>
<source>Change passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="54"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="100"/>
<source>Confirm wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="101"/>
<source>WARNING: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR BITCOINS</b>!
Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="110"/>
<location filename="../askpassphrasedialog.cpp" line="159"/>
<source>Wallet encrypted</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="111"/>
<source>Bitcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bitcoins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="207"/>
<location filename="../askpassphrasedialog.cpp" line="231"/>
<source>Warning: The Caps Lock key is on.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="116"/>
<location filename="../askpassphrasedialog.cpp" line="123"/>
<location filename="../askpassphrasedialog.cpp" line="165"/>
<location filename="../askpassphrasedialog.cpp" line="171"/>
<source>Wallet encryption failed</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="117"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="124"/>
<location filename="../askpassphrasedialog.cpp" line="172"/>
<source>The supplied passphrases do not match.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="135"/>
<source>Wallet unlock failed</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="136"/>
<location filename="../askpassphrasedialog.cpp" line="147"/>
<location filename="../askpassphrasedialog.cpp" line="166"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="146"/>
<source>Wallet decryption failed</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="160"/>
<source>Wallet passphrase was succesfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="73"/>
<source>Bitcoin Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="215"/>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="248"/>
<source>Show/Hide &Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="515"/>
<source>Synchronizing with network...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="185"/>
<source>&Overview</source>
<translation>&Ülevaade</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="186"/>
<source>Show general overview of wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="191"/>
<source>&Transactions</source>
<translation>&Tehingud</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="192"/>
<source>Browse transaction history</source>
<translation>Sirvi tehingute ajalugu</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="197"/>
<source>&Address Book</source>
<translation>&Aadressiraamat</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="198"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="203"/>
<source>&Receive coins</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="204"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="209"/>
<source>&Send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="216"/>
<source>Prove you control an address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="235"/>
<source>E&xit</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="236"/>
<source>Quit application</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="239"/>
<source>&About %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="240"/>
<source>Show information about Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="242"/>
<source>About &Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="243"/>
<source>Show information about Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="245"/>
<source>&Options...</source>
<translation>&Valikud...</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="252"/>
<source>&Encrypt Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="255"/>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="257"/>
<source>&Change Passphrase...</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="517"/>
<source>~%n block(s) remaining</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="528"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="250"/>
<source>&Export...</source>
<translation>&Ekspordi...</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="210"/>
<source>Send coins to a Bitcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="246"/>
<source>Modify configuration options for Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="249"/>
<source>Show or hide the Bitcoin window</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="251"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="253"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="256"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="258"/>
<source>Change the passphrase used for wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="259"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="260"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="261"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="262"/>
<source>Verify a message signature</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="286"/>
<source>&File</source>
<translation>&Fail</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="296"/>
<source>&Settings</source>
<translation>&Seaded</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="302"/>
<source>&Help</source>
<translation>&Abiinfo</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="311"/>
<source>Tabs toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="322"/>
<source>Actions toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="334"/>
<location filename="../bitcoingui.cpp" line="343"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="343"/>
<location filename="../bitcoingui.cpp" line="399"/>
<source>Bitcoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="492"/>
<source>%n active connection(s) to Bitcoin network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="540"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="555"/>
<source>%n second(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="559"/>
<source>%n minute(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="563"/>
<source>%n hour(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="567"/>
<source>%n day(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="573"/>
<source>Up to date</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="580"/>
<source>Catching up...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="590"/>
<source>Last received block was generated %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="649"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="654"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="681"/>
<source>Sent transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="682"/>
<source>Incoming transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="683"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="804"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="812"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="835"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="835"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="838"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoingui.cpp" line="838"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="112"/>
<source>A fatal error occured. Bitcoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="84"/>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>DisplayOptionsPage</name>
<message>
<location filename="../optionsdialog.cpp" line="246"/>
<source>Display</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="257"/>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="263"/>
<source>The user interface language can be set here. This setting will only take effect after restarting Bitcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="252"/>
<source>User Interface &Language:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="273"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="277"/>
<source>Choose the default subdivision unit to show in the interface, and when sending coins</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="284"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="285"/>
<source>Whether to show Bitcoin addresses in the transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="303"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="303"/>
<source>This setting will take effect after restarting Bitcoin.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="14"/>
<source>Edit Address</source>
<translation>Muuda aadressi</translation>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="25"/>
<source>&Label</source>
<translation>Si&lt</translation>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="35"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="42"/>
<source>&Address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="52"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="20"/>
<source>New receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="24"/>
<source>New sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="27"/>
<source>Edit receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="31"/>
<source>Edit sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="91"/>
<source>The entered address "%1" is already in the address book.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="96"/>
<source>The entered address "%1" is not a valid Bitcoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="101"/>
<source>Could not unlock wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="106"/>
<source>New key generation failed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>HelpMessageBox</name>
<message>
<location filename="../bitcoin.cpp" line="133"/>
<location filename="../bitcoin.cpp" line="143"/>
<source>Bitcoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="133"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="135"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="136"/>
<source>options</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="138"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="139"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="140"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="141"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>MainOptionsPage</name>
<message>
<location filename="../optionsdialog.cpp" line="227"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="212"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="204"/>
<source>Main</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="206"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="222"/>
<source>&Start Bitcoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="223"/>
<source>Automatically start Bitcoin after logging in to the system</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="226"/>
<source>&Detach databases at shutdown</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>MessagePage</name>
<message>
<location filename="../forms/messagepage.ui" line="14"/>
<source>Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="20"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="38"/>
<source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="48"/>
<source>Choose adress from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="58"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="71"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="81"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="93"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="128"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="131"/>
<source>&Copy Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="142"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="145"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../messagepage.cpp" line="31"/>
<source>Click "Sign Message" to get signature</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="114"/>
<source>Sign a message to prove you own this address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/messagepage.ui" line="117"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../messagepage.cpp" line="30"/>
<source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../messagepage.cpp" line="83"/>
<location filename="../messagepage.cpp" line="90"/>
<location filename="../messagepage.cpp" line="105"/>
<location filename="../messagepage.cpp" line="117"/>
<source>Error signing</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../messagepage.cpp" line="83"/>
<source>%1 is not a valid address.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../messagepage.cpp" line="90"/>
<source>%1 does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../messagepage.cpp" line="105"/>
<source>Private key for %1 is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../messagepage.cpp" line="117"/>
<source>Sign failed</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>NetworkOptionsPage</name>
<message>
<location filename="../optionsdialog.cpp" line="345"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="347"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="348"/>
<source>Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="351"/>
<source>&Connect through SOCKS4 proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="352"/>
<source>Connect to the Bitcon network through a SOCKS4 proxy (e.g. when connecting through Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="357"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="366"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="363"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="372"/>
<source>Port of the proxy (e.g. 1234)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../optionsdialog.cpp" line="135"/>
<source>Options</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="47"/>
<location filename="../forms/overviewpage.ui" line="204"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Bitcoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="89"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="147"/>
<source>Number of transactions:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="118"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="40"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="197"/>
<source><b>Recent transactions</b></source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="105"/>
<source>Your current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="134"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="154"/>
<source>Total number of transactions in wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="110"/>
<location filename="../overviewpage.cpp" line="111"/>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="32"/>
<source>QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="55"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="70"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="105"/>
<source>NOBL</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="121"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="144"/>
<source>Message:</source>
<translation>Sõnum:</translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="186"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="45"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="63"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="120"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="120"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="14"/>
<source>Bitcoin debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="56"/>
<location filename="../forms/rpcconsole.ui" line="79"/>
<location filename="../forms/rpcconsole.ui" line="102"/>
<location filename="../forms/rpcconsole.ui" line="125"/>
<location filename="../forms/rpcconsole.ui" line="161"/>
<location filename="../forms/rpcconsole.ui" line="214"/>
<location filename="../forms/rpcconsole.ui" line="237"/>
<location filename="../forms/rpcconsole.ui" line="260"/>
<location filename="../rpcconsole.cpp" line="245"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="69"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="24"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="39"/>
<source>Client</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="115"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="144"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="151"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="174"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="197"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="204"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="227"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="250"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="292"/>
<source>Debug logfile</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="299"/>
<source>Open the Bitcoin debug logfile from the current data directory. This can take a few seconds for large logfiles.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="302"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="323"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="92"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="372"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="212"/>
<source>Welcome to the Bitcoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="213"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="214"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="14"/>
<location filename="../sendcoinsdialog.cpp" line="122"/>
<location filename="../sendcoinsdialog.cpp" line="127"/>
<location filename="../sendcoinsdialog.cpp" line="132"/>
<location filename="../sendcoinsdialog.cpp" line="137"/>
<location filename="../sendcoinsdialog.cpp" line="143"/>
<location filename="../sendcoinsdialog.cpp" line="148"/>
<location filename="../sendcoinsdialog.cpp" line="153"/>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="64"/>
<source>Send to multiple recipients at once</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="67"/>
<source>&Add Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="84"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="87"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="106"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="113"/>
<source>123.456 NOBL</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="144"/>
<source>Confirm the send action</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="147"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="94"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="99"/>
<source>Confirm send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="100"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="100"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="123"/>
<source>The recepient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="128"/>
<source>The amount to pay must be larger than 0.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="133"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="138"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="144"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="149"/>
<source>Error: Transaction creation failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="154"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="29"/>
<source>A&mount:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="42"/>
<source>Pay &To:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="66"/>
<location filename="../sendcoinsentry.cpp" line="25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="75"/>
<source>&Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="93"/>
<source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="103"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="113"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="120"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="130"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="137"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="26"/>
<source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="21"/>
<source>Open for %1 blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="23"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="29"/>
<source>%1/offline?</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="31"/>
<source>%1/unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="33"/>
<source>%1 confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="51"/>
<source><b>Status:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="56"/>
<source>, has not been successfully broadcast yet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="58"/>
<source>, broadcast through %1 node</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="60"/>
<source>, broadcast through %1 nodes</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="64"/>
<source><b>Date:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="71"/>
<source><b>Source:</b> Generated<br></source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="77"/>
<location filename="../transactiondesc.cpp" line="94"/>
<source><b>From:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="94"/>
<source>unknown</source>
<translation>tundmatu</translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="95"/>
<location filename="../transactiondesc.cpp" line="118"/>
<location filename="../transactiondesc.cpp" line="178"/>
<source><b>To:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="98"/>
<source> (yours, label: </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="100"/>
<source> (yours)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="136"/>
<location filename="../transactiondesc.cpp" line="150"/>
<location filename="../transactiondesc.cpp" line="195"/>
<location filename="../transactiondesc.cpp" line="212"/>
<source><b>Credit:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="138"/>
<source>(%1 matures in %2 more blocks)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="142"/>
<source>(not accepted)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="186"/>
<location filename="../transactiondesc.cpp" line="194"/>
<location filename="../transactiondesc.cpp" line="209"/>
<source><b>Debit:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="200"/>
<source><b>Transaction fee:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="216"/>
<source><b>Net amount:</b> </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="222"/>
<source>Message:</source>
<translation>Sõnum:</translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="224"/>
<source>Comment:</source>
<translation>Kommentaar:</translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="226"/>
<source>Transaction ID:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiondesc.cpp" line="229"/>
<source>Generated coins must wait 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, it will change to "not accepted" and not be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="14"/>
<source>Transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/transactiondescdialog.ui" line="20"/>
<source>This pane shows a detailed description of the transaction</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="226"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="226"/>
<source>Type</source>
<translation>Tüüp</translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="226"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="226"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message numerus="yes">
<location filename="../transactiontablemodel.cpp" line="281"/>
<source>Open for %n block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="284"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="287"/>
<source>Offline (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="290"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="293"/>
<source>Confirmed (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location filename="../transactiontablemodel.cpp" line="301"/>
<source>Mined balance will be available in %n more blocks</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="307"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="310"/>
<source>Generated but not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="353"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="355"/>
<source>Received from</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="358"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="360"/>
<source>Payment to yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="362"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="400"/>
<source>(n/a)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="599"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="601"/>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="603"/>
<source>Type of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="605"/>
<source>Destination address of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="607"/>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="55"/>
<location filename="../transactionview.cpp" line="71"/>
<source>All</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="56"/>
<source>Today</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="57"/>
<source>This week</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="58"/>
<source>This month</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="59"/>
<source>Last month</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="60"/>
<source>This year</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="61"/>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="72"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="74"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="76"/>
<source>To yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="77"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="78"/>
<source>Other</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="85"/>
<source>Enter address or label to search</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="92"/>
<source>Min amount</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="126"/>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="127"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="128"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="129"/>
<source>Edit label</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="130"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="270"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="271"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="279"/>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="280"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="281"/>
<source>Type</source>
<translation>Tüüp</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="282"/>
<source>Label</source>
<translation>Silt</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="283"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="284"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="285"/>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="289"/>
<source>Error exporting</source>
<translation>Viga eksportimisel</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="289"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="384"/>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../transactionview.cpp" line="392"/>
<source>to</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>VerifyMessageDialog</name>
<message>
<location filename="../forms/verifymessagedialog.ui" line="14"/>
<source>Verify Signed Message</source>
<translation type="unfinished"/>
</message>
<message><|fim▁hole|> <translation type="unfinished"/>
</message>
<message>
<location filename="../forms/verifymessagedialog.ui" line="62"/>
<source>Verify a message and obtain the Bitcoin address used to sign the message</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/verifymessagedialog.ui" line="65"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/verifymessagedialog.ui" line="79"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/verifymessagedialog.ui" line="82"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/verifymessagedialog.ui" line="93"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/verifymessagedialog.ui" line="96"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../verifymessagedialog.cpp" line="28"/>
<source>Enter Bitcoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../verifymessagedialog.cpp" line="29"/>
<source>Click "Verify Message" to obtain address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../verifymessagedialog.cpp" line="55"/>
<location filename="../verifymessagedialog.cpp" line="62"/>
<source>Invalid Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../verifymessagedialog.cpp" line="55"/>
<source>The signature could not be decoded. Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../verifymessagedialog.cpp" line="62"/>
<source>The signature did not match the message digest. Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../verifymessagedialog.cpp" line="72"/>
<source>Address not found in address book.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../verifymessagedialog.cpp" line="72"/>
<source>Address found in address book: %1</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="158"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WindowOptionsPage</name>
<message>
<location filename="../optionsdialog.cpp" line="313"/>
<source>Window</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="316"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="317"/>
<source>Show only a tray icon after minimizing the window</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="320"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="321"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="43"/>
<source>Bitcoin version</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="44"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="45"/>
<source>Send command to -server or bitcoind</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="46"/>
<source>List commands</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="47"/>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="49"/>
<source>Options:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="50"/>
<source>Specify configuration file (default: bitcoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="51"/>
<source>Specify pid file (default: bitcoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="52"/>
<source>Generate coins</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="53"/>
<source>Don't generate coins</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="54"/>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="55"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="56"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="57"/>
<source>Specify connection timeout (in milliseconds)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="63"/>
<source>Listen for connections on <port> (default: 8333 or testnet: 18333)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="64"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="66"/>
<source>Connect only to the specified node</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="67"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="68"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="69"/>
<source>Only connect to nodes in network <net> (IPv4 or IPv6)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="70"/>
<source>Try to discover public IP address (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="73"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="75"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="76"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="79"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 10000)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="80"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 10000)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="83"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="86"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="87"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="88"/>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="89"/>
<source>Output extra debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="90"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="91"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="92"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="93"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="94"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="95"/>
<source>Listen for JSON-RPC connections on <port> (default: 8332)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="96"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="97"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="98"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="101"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="102"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="103"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="104"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="105"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="106"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="108"/>
<source>
SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="111"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="112"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="113"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="114"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="145"/>
<source>Warning: Disk space is low</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="107"/>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="121"/>
<source>Cannot obtain a lock on data directory %s. Bitcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="48"/>
<source>Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="30"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="58"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="59"/>
<source>Select the version of socks proxy to use (4 or 5, 5 is default)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="60"/>
<source>Do not use proxy for connections to network <net> (IPv4 or IPv6)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="61"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="62"/>
<source>Pass DNS requests to (SOCKS5) proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="142"/>
<source>Loading addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="132"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="134"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="135"/>
<source>Error loading wallet.dat: Wallet requires newer version of Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="136"/>
<source>Wallet needed to be rewritten: restart Bitcoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="137"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="124"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="125"/>
<source>Unknown network specified in -noproxy: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="127"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="126"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="128"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="129"/>
<source>Not listening on any port</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="130"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="117"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="143"/>
<source>Error: could not start node</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="31"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="32"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="35"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="36"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="37"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="41"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="42"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="131"/>
<source>Loading block index...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="65"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="28"/>
<source>Unable to bind to %s on this computer. Bitcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="71"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="72"/>
<source>Accept connections from outside (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="74"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="81"/>
<source>Use Universal Plug and Play to map the listening port (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="82"/>
<source>Use Universal Plug and Play to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="85"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="118"/>
<source>Warning: -paytxfee is set very high. This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="133"/>
<source>Loading wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="138"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="139"/>
<source>Cannot initialize keypool</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="140"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="141"/>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="144"/>
<source>Done loading</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="8"/>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="9"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=bitcoinrpc
rpcpassword=%s
(you do not need to remember this password)
If the file does not exist, create it with owner-readable-only file permissions.
</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="18"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="19"/>
<source>An error occured while setting up the RPC port %i for listening: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="20"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="25"/>
<source>Warning: Please check that your computer's date and time are correct. If your clock is wrong Bitcoin will not work properly.</source>
<translation type="unfinished"/>
</message>
</context>
</TS><|fim▁end|> | <location filename="../forms/verifymessagedialog.ui" line="20"/>
<source>Enter the message and signature below (be careful to correctly copy newlines, spaces, tabs and other invisible characters) to obtain the Bitcoin address used to sign the message.</source> |
<|file_name|>test_setup.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Setup/installation tests for this package."""
from ade25.assetmanager.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of ade25.assetmanager into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if ade25.assetmanager is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProductInstalled('ade25.assetmanager'))
def test_uninstall(self):
"""Test if ade25.assetmanager is cleanly uninstalled."""
self.installer.uninstallProducts(['ade25.assetmanager'])
self.assertFalse(self.installer.isProductInstalled('ade25.assetmanager'))
<|fim▁hole|> def test_browserlayer(self):
"""Test that IAde25AssetmanagerLayer is registered."""
from ade25.assetmanager.interfaces import IAde25AssetmanagerLayer
from plone.browserlayer import utils
self.failUnless(IAde25AssetmanagerLayer in utils.registered_layers())<|fim▁end|> | # browserlayer.xml |
<|file_name|>vec3.rs<|end_file_name|><|fim▁begin|>// vec3.rs
fn main() {
let mut v1 = vec![10,20,30,40];
v1.pop();
let mut v2 = Vec::new();
v2.push(10);
v2.push(20);
v2.push(30);
assert_eq!(v1,v2);
<|fim▁hole|> assert_eq!(v2,&[10,20,30,0,1]);
}<|fim▁end|> | v2.extend(0..2); |
<|file_name|>photometry.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import numpy as np
from astropy.io import fits
import scipy.ndimage
import scipy.fftpack
import scipy.optimize
def getcentroid(coordinates, values):
"""
Image centroid from image points im that match with a 2-d array pos, which
contains the locations of each point in an all-positive coordinate system.
"""
return np.sum(values*coordinates, axis=1) / np.sum(values)
def flatfunc(centroid, p0, p1, p2):
"""
Intended only for use with detrend().
"""
return p0*centroid[:, 0] + p1*centroid[:, 1] + p2
def detrend(flux,centroid):
"""
Detrend flux against centroid points. Returns normalized flux.
"""
for f in range(flux.shape[0]):
p, cov = scipy.optimize.curve_fit(flatfunc, centroid[f], flux[f])
flux[f] /= flatfunc(centroid[f], *p)
flux[f] /= np.median(flux[f])
return flux
def photometer(files, coords, obj, sky=None):
"""
Aperture photometery on images contained in files at initial star positions
near coords. Returns flux of each star with corresponding centroid locations.
"""
centroid = np.zeros((coords.shape[0],len(files),2))
flux = np.zeros((coords.shape[0],len(files)))
centroid[:,-1] = coords
if sky == None:
sky = obj
has_sky = sky != None
pos = np.mgrid[-sky:sky+1,-sky:sky+1]
dst = np.sqrt(np.sum(pos,0))
objap = dst <= obj
skyap = dst <= sky
objsize = np.sum(objap)
for f in range(len(files)):
im = fits.open(files[f])[0].data
if not has_sky:
skyval = np.median(im)*objsize
for c in range(coords.shape[0]):
#Could start new subprocess here
y,x = centroid[c,f-1]
if y > 0 and x > 0 and y < im.shape[0] and x < im.shape[1]:
y,x = seekmax(im,y,x)
y,x = getcentroid(*getstar(im,y,x))
if y > sky and x > sky and y < im.shape[0]-sky-1 and x < im.shape[1]-sky-1:
if has_sky:
skyval = np.median(im[y-sky:y+sky+1,x-sky:x+sky+1][skyap]) * objsize
flux[c,f] = np.sum(im[y-sky:y+sky+1,x-sky:x+sky+1][objap]) - skyval
centroid[c,f] = y,x
return flux,centroid
def find_stars(data):
#If passed a list, stack and median-combine first
if isinstance(data,list):
warps,aligned = astt.align(data)
aligned = np.asarray(aligned)
im = np.median(aligned,0)
else:
im = data
#Denoise the image with a fourier filter
fourier = np.fft.fft2(im)
fourier = np.fft.fftshift(fourier)
print(fourier.max())
fits.writeto('fourier.fits',abs(fourier),clobber=True)
exit()
#Compute the second derivative at every point
laplace = ndimage.laplace(smoothed)
#Image should be concave down where there are stars
stars = derivative < 0
#Stars should also be a local min in the laplacian
row_buffer = np.zeros(laplace.shape[0])
col_buffer = row_buffer[None,:]
above = np.vstack((laplace[1:],row_buffer[:]))
below = np.vstack((row_buffer[:,:],laplace[:-1]))
right = np.hstack((laplace[1:],row_buffer[:,:]))
stars = stars & (laplace < above) & (laplace < below) & (laplace < right)
#Denoise the image with a fourier filter
print(np.std(im))
fourier = scipy.fftpack.rfft(im)
fits.writeto('fft.fits',fourier,clobber=True)
fourier[0] = 0
fourier[-1] = 0
fourier[:,0] = 0
fourier[:,-1] = 0
test = scipy.fftpack.ifft(fourier).real
fits.writeto('ifft.fits',test,clobber=True)
print(np.std(test))
exit()
#Compute the second derivative at every point
laplace = ndimage.laplace(smoothed)
#Image should be concave down where there are stars
stars = derivative < 0
#Stars should also be a local min in the laplacian
row_buffer = np.zeros(laplace.shape[0])
col_buffer = np.zeros(laplace.shape[1][None,:])
above = np.vstack((laplace[1:],row_buffer[:]))
below = np.vstack((row_buffer[:,:],laplace[:-1]))
right = np.hstack((laplace[1:],row_buffer[:,:]))
stars = stars & (laplace < above) & (laplace < below) & (laplace < right) & (laplace < left)
#Pick a sky value
sky = np.median(im)
#Sigma threshold for sky level
signal = im > (sky + sky_sigma*np.sqrt(sky))
#Use binary erosion and propagation to remove isolated points of signal<|fim▁hole|> #Stars are only where signal is significant
stars = stars & signal
return stars
"""
image = fits.open('test.fits')[0].data
find_stars(image)
from astropy.io import fits
im = fits.open('sample_data/test_data0.fits')[0].data
find_stars(im)
"""
"""
Simple aperture photometry on image files
"""
def do_photometry(files, program_stars):
#Find stars
#Remove program stars from list
#Determine optimal aperture (s)
#Photometer everything
#Detrend against position
#Detrend against temperature, maybe other things
#Find good standards and correct
#Return flux and time arrays
pass<|fim▁end|> | eroded_signal = binary_erosion(signal)
signal = binary_propagation(eroded_signal,mask=signal)
|
<|file_name|>FaultLocalizationTransitionRelation.java<|end_file_name|><|fim▁begin|>package bixie.checker.transition_relation;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import bixie.boogie.controlflow.AbstractControlFlowFactory;
import bixie.boogie.controlflow.BasicBlock;
import bixie.boogie.controlflow.CfgProcedure;
import bixie.boogie.controlflow.expression.CfgExpression;
import bixie.boogie.controlflow.statement.CfgStatement;
import bixie.boogie.controlflow.util.HasseDiagram;
import bixie.boogie.controlflow.util.PartialBlockOrderNode;
import bixie.prover.Prover;
import bixie.prover.ProverExpr;
<|fim▁hole|> * @author schaef TODO: if we plan to do interprocedural analysis, we have to
* change the way globals are handled here.
*/
public class FaultLocalizationTransitionRelation extends
AbstractTransitionRelation {
public LinkedList<ProverExpr> obligations = new LinkedList<ProverExpr>();
public HashMap<CfgStatement, BasicBlock> stmtOriginMap = new HashMap<CfgStatement, BasicBlock>();
HasseDiagram hd;
public FaultLocalizationTransitionRelation(CfgProcedure cfg,
AbstractControlFlowFactory cff, Prover p) {
super(cfg, cff, p);
makePrelude();
// create the ProverExpr for the precondition
ProverExpr[] prec = new ProverExpr[cfg.getRequires().size()];
int i = 0;
for (CfgExpression expr : cfg.getRequires()) {
prec[i] = this.expression2proverExpression(expr);
i++;
}
this.requires = this.prover.mkAnd(prec);
// create the ProverExpr for the precondition
ProverExpr[] post = new ProverExpr[cfg.getEnsures().size()];
i = 0;
for (CfgExpression expr : cfg.getEnsures()) {
post[i] = this.expression2proverExpression(expr);
i++;
}
this.ensures = this.prover.mkAnd(post);
this.hd = new HasseDiagram(cfg);
computeSliceVC(cfg);
this.hd = null;
finalizeAxioms();
}
private void computeSliceVC(CfgProcedure cfg) {
PartialBlockOrderNode pon = hd.findNode(cfg.getRootNode());
LinkedList<BasicBlock> todo = new LinkedList<BasicBlock>();
todo.add(cfg.getRootNode());
HashSet<BasicBlock> mustreach = pon.getElements();
// System.err.println("-------");
// for (BasicBlock b : mustreach) System.err.println(b.getLabel());
// System.err.println("traverse ");
while (!todo.isEmpty()) {
BasicBlock current = todo.pop();
obligations.addAll(statements2proverExpression(current.getStatements()));
for (CfgStatement stmt : current.getStatements()) {
this.stmtOriginMap.put(stmt, current);
}
BasicBlock next = foo(current, mustreach);
if (next!=null) {
if (mustreach.contains(next)) {
todo.add(next);
} else {
System.err.println("FIXME: don't know what to do with "+next.getLabel());
}
}
}
// System.err.println("traverse done");
}
private BasicBlock foo(BasicBlock b, HashSet<BasicBlock> mustpass) {
HashSet<BasicBlock> done = new HashSet<BasicBlock>();
LinkedList<BasicBlock> todo = new LinkedList<BasicBlock>();
HashMap<BasicBlock, LinkedList<ProverExpr>> map = new HashMap<BasicBlock, LinkedList<ProverExpr>>();
todo.addAll(b.getSuccessors());
done.add(b);
map.put(b, new LinkedList<ProverExpr>());
map.get(b).add(this.prover.mkLiteral(true));
while (!todo.isEmpty()) {
BasicBlock current = todo.pop();
boolean allDone = true;
LinkedList<LinkedList<ProverExpr>> prefix = new LinkedList<LinkedList<ProverExpr>>();
for (BasicBlock pre : current.getPredecessors()) {
if (!done.contains(pre)) {
allDone = false; break;
}
prefix.add(map.get(pre));
}
if (!allDone) {
todo.add(current);
continue;
}
done.add(current);
LinkedList<ProverExpr> conj = new LinkedList<ProverExpr>();
if (prefix.size()>1) {
//TODO
LinkedList<ProverExpr> shared = prefix.getFirst();
for (LinkedList<ProverExpr> list : prefix) {
shared = sharedPrefix(shared, list);
}
conj.add(this.prover.mkAnd(shared.toArray(new ProverExpr[shared.size()])));
LinkedList<ProverExpr> disj = new LinkedList<ProverExpr>();
for (LinkedList<ProverExpr> list : prefix) {
LinkedList<ProverExpr> cutlist = new LinkedList<ProverExpr>();
cutlist.addAll(list);
cutlist.removeAll(shared);
disj.add(this.prover.mkAnd(cutlist.toArray(new ProverExpr[cutlist.size()])));
}
conj.add(this.prover.mkOr(disj.toArray(new ProverExpr[disj.size()])));
} else if (prefix.size()==1) {
conj.addAll(prefix.getFirst());
} else {
throw new RuntimeException("unexpected");
}
if (mustpass.contains(current)) {
if (conj.size()==1 && conj.getFirst().equals(this.prover.mkLiteral(true))) {
// in that case, the predecessor was already in mustpass so nothing needs to be done.
} else {
ProverExpr formula = this.prover.mkAnd(conj.toArray(new ProverExpr[conj.size()]));
this.obligations.add(formula);
}
return current;
} else {
for (CfgStatement stmt : current.getStatements()) {
this.stmtOriginMap.put(stmt, current);
}
conj.addAll(statements2proverExpression(current.getStatements()));
map.put(current, conj);
for (BasicBlock suc : current.getSuccessors()) {
if (!todo.contains(suc) && !done.contains(suc)) {
todo.add(suc);
}
}
}
}
return null;
}
private LinkedList<ProverExpr> sharedPrefix(LinkedList<ProverExpr> shared, LinkedList<ProverExpr> list) {
Iterator<ProverExpr> iterA = shared.iterator();
Iterator<ProverExpr> iterB = list.iterator();
LinkedList<ProverExpr> ret = new LinkedList<ProverExpr>();
while(iterA.hasNext() && iterB.hasNext()) {
ProverExpr next = iterA.next();
if (next == iterB.next()) {
ret.add(next);
}
}
return ret;
}
}<|fim▁end|> | /**
|
<|file_name|>loginEvent.py<|end_file_name|><|fim▁begin|>import userHelper
import serverPackets
import exceptions
import glob
import consoleHelper
import bcolors
import locationHelper
import countryHelper
import time
import generalFunctions
import channelJoinEvent
def handle(flaskRequest):
# Data to return
responseTokenString = "ayy"
responseData = bytes()
# The IP for your private network, to get the right location you should use your
# public IP (e.g http://ping.eu)
localIP = "172.20.7.107" # The ip you log in with
publicIP = "8.8.8.8" # google lul
# Get IP from flask request
requestIP = flaskRequest.headers.get("X-Forwarded-For")
if requestIP == localIP:
requestIP = publicIP
# Console output
print("> Accepting connection from {}...".format(requestIP))
# Split POST body so we can get username/password/hardware data
# 2:-3 thing is because requestData has some escape stuff that we don't need
loginData = str(flaskRequest.data)[2:-3].split("\\n")
# Process login<|fim▁hole|> # If true, print error to console
err = False
# Try to get the ID from username
userID = userHelper.getID(str(loginData[0]))
if userID == False:
# Invalid username
raise exceptions.loginFailedException()
if userHelper.checkLogin(userID, loginData[1]) == False:
# Invalid password
raise exceptions.loginFailedException()
# Make sure we are not banned
userAllowed = userHelper.getAllowed(userID)
if userAllowed == 0:
# Banned
raise exceptions.loginBannedException()
# Activate user (obviously not the banned.-.)
# But those who created an account without logging in through bancho yet
if userAllowed == 2:
# Not activated yet
userHelper.Activate(userID)
# No login errors!
# Delete old tokens for that user and generate a new one
glob.tokens.deleteOldTokens(userID)
responseToken = glob.tokens.addToken(userID)
responseTokenString = responseToken.token
# Get silence end
userSilenceEnd = max(0, userHelper.getSilenceEnd(userID)-int(time.time()))
# Get supporter/GMT
userRank = userHelper.getRankPrivileges(userID)
userGMT = False
userSupporter = True
if userRank >= 3:
userGMT = True
# Server restarting check
if glob.restarting == True:
raise exceptions.banchoRestartingException()
# Maintenance check
if glob.banchoConf.config["banchoMaintenance"] == True:
if userGMT == False:
# We are not mod/admin, delete token, send notification and logout
glob.tokens.deleteToken(responseTokenString)
raise exceptions.banchoMaintenanceException()
else:
# We are mod/admin, send warning notification and continue
responseToken.enqueue(serverPackets.notification("Bancho is in maintenance mode. Only mods/admins have full access to the server.\nType !system maintenance off in chat to turn off maintenance mode."))
# Send all needed login packets
responseToken.enqueue(serverPackets.silenceEndTime(userSilenceEnd))
responseToken.enqueue(serverPackets.userID(userID))
responseToken.enqueue(serverPackets.protocolVersion())
responseToken.enqueue(serverPackets.userSupporterGMT(userSupporter, userGMT))
responseToken.enqueue(serverPackets.userPanel(userID))
responseToken.enqueue(serverPackets.userStats(userID))
# Channel info end (before starting!?! wtf bancho?)
responseToken.enqueue(serverPackets.channelInfoEnd())
# Default opened channels
# TODO: Configurable default channels
channelJoinEvent.joinChannel(responseToken, "#osu")
channelJoinEvent.joinChannel(responseToken, "#announce")
if userRank >= 3:
# Join admin chanenl if we are mod/admin
# TODO: Separate channels for mods and admins
channelJoinEvent.joinChannel(responseToken, "#admin")
# Output channels info
for key, value in glob.channels.channels.items():
if value.publicRead == True:
responseToken.enqueue(serverPackets.channelInfo(key))
responseToken.enqueue(serverPackets.friendList(userID))
# Send main menu icon and login notification if needed
if glob.banchoConf.config["menuIcon"] != "":
responseToken.enqueue(serverPackets.mainMenuIcon(glob.banchoConf.config["menuIcon"]))
if glob.banchoConf.config["loginNotification"] != "":
responseToken.enqueue(serverPackets.notification(glob.banchoConf.config["loginNotification"]))
# Get everyone else userpanel
# TODO: Better online users handling
for key, value in glob.tokens.tokens.items():
responseToken.enqueue(serverPackets.userPanel(value.userID))
responseToken.enqueue(serverPackets.userStats(value.userID))
# Send online users IDs array
responseToken.enqueue(serverPackets.onlineUsers())
if requestIP == None:
# Get Last 'usual' IP from user (default 8.8.8.8 / USA / Google)
requestIP = userHelper.logInIP(userID)
# Get location and country from ip.zxq.co or database
if generalFunctions.stringToBool(glob.conf.config["server"]["localizeusers"]):
# Get location and country from IP
location = locationHelper.getLocation(requestIP)
country = countryHelper.getCountryID(locationHelper.getCountry(requestIP))
else:
# Set location to 0,0 and get country from db
print("[!] Location skipped")
location = [0,0]
country = countryHelper.getCountryID(userHelper.getCountry(userID))
# Set location and country
responseToken.setLocation(location)
responseToken.setCountry(country)
# Send to everyone our userpanel and userStats (so they now we have logged in)
glob.tokens.enqueueAll(serverPackets.userPanel(userID))
glob.tokens.enqueueAll(serverPackets.userStats(userID))
# Set reponse data to right value and reset our queue
responseData = responseToken.queue
responseToken.resetQueue()
# Some things about IP
logInIP = userHelper.logInIP(userID)
logInIP = logInIP['ip']
print("[!] First IP: "+format(logInIP))
if logInIP != requestIP:
# We'll inform...
message = "This is not your usual IP! Remember we don't like multiaccounting! (ignore if you did not)"
responseToken.enqueue(serverPackets.notification(message))
# Print logged in message
consoleHelper.printColored("> {} logged in ({})".format(loginData[0], responseToken.token), bcolors.GREEN)
except exceptions.loginFailedException:
# Login failed error packet
# (we don't use enqueue because we don't have a token since login has failed)
err = True
responseData += serverPackets.loginFailed()
except exceptions.loginBannedException:
# Login banned error packet
err = True
responseData += serverPackets.loginBanned()
except exceptions.banchoMaintenanceException:
# Bancho is in maintenance mode
responseData += serverPackets.notification("Our bancho server is in maintenance mode. Please try to login again later.")
responseData += serverPackets.loginError()
except exceptions.banchoRestartingException:
# Bancho is restarting
responseData += serverPackets.notification("Bancho is restarting. Try again in a few minutes.")
responseData += serverPackets.loginError()
finally:
# Print login failed message to console if needed
if err == True:
consoleHelper.printColored("> {}'s login failed".format(loginData[0]), bcolors.YELLOW)
return (responseTokenString, responseData)<|fim▁end|> | print("> Processing login request for {}...".format(loginData[0]))
try: |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>class MiddlewareMixin(object):
def __init__(self, get_response=None):<|fim▁hole|><|fim▁end|> | super(MiddlewareMixin, self).__init__() |
<|file_name|>MainActivity.java<|end_file_name|><|fim▁begin|>package com.ticktick.testimagecropper;
<|fim▁hole|>import java.io.FileNotFoundException;
import java.io.InputStream;
import com.ticktick.imagecropper.CropImageActivity;
import com.ticktick.imagecropper.CropIntent;
import android.app.Activity;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.Bundle;
import android.provider.MediaStore;
import android.view.View;
import android.widget.ImageView;
import android.widget.Toast;
public class MainActivity extends Activity {
public static final int REQUEST_CODE_PICK_IMAGE = 0x1;
public static final int REQUEST_CODE_IMAGE_CROPPER = 0x2;
public static final String CROPPED_IMAGE_FILEPATH = "/sdcard/test.jpg";
private ImageView mImageView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
mImageView = (ImageView)findViewById(R.id.CroppedImageView);
}
public void onClickButton(View v) {
Intent intent = new Intent(Intent.ACTION_PICK);
intent.setType("image/*");
startActivityForResult(intent,REQUEST_CODE_PICK_IMAGE);
}
public void startCropImage( Uri uri ) {
Intent intent = new Intent(this,CropImageActivity.class);
intent.setData(uri);
intent.putExtra(MediaStore.EXTRA_OUTPUT,Uri.fromFile(new File(CROPPED_IMAGE_FILEPATH)));
//intent.putExtra("aspectX",2);
//intent.putExtra("aspectY",1);
//intent.putExtra("outputX",320);
//intent.putExtra("outputY",240);
//intent.putExtra("maxOutputX",640);
//intent.putExtra("maxOutputX",480);
startActivityForResult(intent, REQUEST_CODE_IMAGE_CROPPER);
}
public void startCropImageByCropIntent( Uri uri ) {
CropIntent intent = new CropIntent();
intent.setImagePath(uri);
intent.setOutputPath(CROPPED_IMAGE_FILEPATH);
//intent.setAspect(2, 1);
//intent.setOutputSize(480,320);
//intent.setMaxOutputSize(480,320);
startActivityForResult(intent.getIntent(this), REQUEST_CODE_IMAGE_CROPPER);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (resultCode != RESULT_OK) {
return;
}
if( requestCode == REQUEST_CODE_PICK_IMAGE ) {
startCropImage(data.getData());
}
else if( requestCode == REQUEST_CODE_IMAGE_CROPPER ) {
Uri croppedUri = data.getExtras().getParcelable(MediaStore.EXTRA_OUTPUT);
InputStream in = null;
try {
in = getContentResolver().openInputStream(croppedUri);
Bitmap b = BitmapFactory.decodeStream(in);
mImageView.setImageBitmap(b);
Toast.makeText(this,"Crop success,saved at"+CROPPED_IMAGE_FILEPATH,Toast.LENGTH_LONG).show();
}
catch (FileNotFoundException e) {
e.printStackTrace();
}
}
super.onActivityResult(requestCode, resultCode, data);
}
}<|fim▁end|> | import java.io.File; |
<|file_name|>Track.py<|end_file_name|><|fim▁begin|>from src.deenuxapi.deezer.Model import Model
from src.deenuxapi.deezer.model.Artist import Artist
class Track(Model):
"""
Contains information about a track.
"""
def __init__(self, id: int, title: str, artist: Artist, duration: int = -1):
"""
Constructor of Track.
:param id: track's ID
:param title: track's full title
:param artist: track's artist
:param duration: track's duration in seconds (default is -1)
"""
super().__init__(id)
self.__title = title
self.__artist = artist
self.__duration = duration
@staticmethod
def map(obj):
return Track(
id=obj['id'],
title=obj['title'],
duration=obj['duration'],
artist=Artist(
id=obj['artist']['id'],
name=obj['artist']['name']
)
)
def __str__(self):
return '{} - {}'.format(self.__artist.name, self.__title)
"""
Getters and setters.
"""
@property
def title(self):
return self.__title
@title.setter
def title(self, title: str):
self.__title = title
@property
def artist(self):
return self.__artist
@artist.setter
def artist(self, artist: Artist):
self.__artist = artist<|fim▁hole|> return self.__duration
@duration.setter
def duration(self, duration: int):
self.__duration = duration<|fim▁end|> |
@property
def duration(self): |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-2013 Elanz (<http://www.openelanz.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#<|fim▁hole|><|fim▁end|> | ##############################################################################
import wizard |
<|file_name|>impl.go<|end_file_name|><|fim▁begin|>/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ramledger
import (
"bytes"
"fmt"
"github.com/hyperledger/fabric/orderer/ledger"
cb "github.com/hyperledger/fabric/protos/common"
ab "github.com/hyperledger/fabric/protos/orderer"
"github.com/op/go-logging"
)
var logger = logging.MustGetLogger("orderer/ramledger")
type cursor struct {
list *simpleList
}
type simpleList struct {
next *simpleList
signal chan struct{}
block *cb.Block
}
type ramLedger struct {
maxSize int
size int
oldest *simpleList
newest *simpleList
}
// Next blocks until there is a new block available, or returns an error if the
// next block is no longer retrievable
func (cu *cursor) Next() (*cb.Block, cb.Status) {
// This only loops once, as signal reading indicates non-nil next
for {
if cu.list.next != nil {
cu.list = cu.list.next
return cu.list.block, cb.Status_SUCCESS
}
<-cu.list.signal
}
}
// ReadyChan supplies a channel which will block until Next will not block<|fim▁hole|> return cu.list.signal
}
// Iterator returns an Iterator, as specified by a cb.SeekInfo message, and its
// starting block number
func (rl *ramLedger) Iterator(startPosition *ab.SeekPosition) (ledger.Iterator, uint64) {
var list *simpleList
switch start := startPosition.Type.(type) {
case *ab.SeekPosition_Oldest:
oldest := rl.oldest
list = &simpleList{
block: &cb.Block{Header: &cb.BlockHeader{Number: oldest.block.Header.Number - 1}},
next: oldest,
signal: make(chan struct{}),
}
close(list.signal)
case *ab.SeekPosition_Newest:
newest := rl.newest
list = &simpleList{
block: &cb.Block{Header: &cb.BlockHeader{Number: newest.block.Header.Number - 1}},
next: newest,
signal: make(chan struct{}),
}
close(list.signal)
case *ab.SeekPosition_Specified:
oldest := rl.oldest
specified := start.Specified.Number
logger.Debugf("Attempting to return block %d", specified)
// Note the two +1's here is to accomodate the 'preGenesis' block of ^uint64(0)
if specified+1 < oldest.block.Header.Number+1 || specified > rl.newest.block.Header.Number+1 {
logger.Debugf("Returning error iterator because specified seek was %d with oldest %d and newest %d",
specified, rl.oldest.block.Header.Number, rl.newest.block.Header.Number)
return &ledger.NotFoundErrorIterator{}, 0
}
if specified == oldest.block.Header.Number {
list = &simpleList{
block: &cb.Block{Header: &cb.BlockHeader{Number: oldest.block.Header.Number - 1}},
next: oldest,
signal: make(chan struct{}),
}
close(list.signal)
break
}
list = oldest
for {
if list.block.Header.Number == specified-1 {
break
}
list = list.next // No need for nil check, because of range check above
}
}
cursor := &cursor{list: list}
blockNum := list.block.Header.Number + 1
// If the cursor is for pre-genesis, skip it, the block number wraps
if blockNum == ^uint64(0) {
cursor.Next()
blockNum++
}
return cursor, blockNum
}
// Height returns the number of blocks on the ledger
func (rl *ramLedger) Height() uint64 {
return rl.newest.block.Header.Number + 1
}
// Append appends a new block to the ledger
func (rl *ramLedger) Append(block *cb.Block) error {
if block.Header.Number != rl.newest.block.Header.Number+1 {
return fmt.Errorf("Block number should have been %d but was %d",
rl.newest.block.Header.Number+1, block.Header.Number)
}
if rl.newest.block.Header.Number+1 != 0 { // Skip this check for genesis block insertion
if !bytes.Equal(block.Header.PreviousHash, rl.newest.block.Header.Hash()) {
return fmt.Errorf("Block should have had previous hash of %x but was %x",
rl.newest.block.Header.Hash(), block.Header.PreviousHash)
}
}
rl.appendBlock(block)
return nil
}
func (rl *ramLedger) appendBlock(block *cb.Block) {
rl.newest.next = &simpleList{
signal: make(chan struct{}),
block: block,
}
lastSignal := rl.newest.signal
logger.Debugf("Sending signal that block %d has a successor", rl.newest.block.Header.Number)
rl.newest = rl.newest.next
close(lastSignal)
rl.size++
if rl.size > rl.maxSize {
logger.Debugf("RAM ledger max size about to be exceeded, removing oldest item: %d",
rl.oldest.block.Header.Number)
rl.oldest = rl.oldest.next
rl.size--
}
}<|fim▁end|> | func (cu *cursor) ReadyChan() <-chan struct{} { |
<|file_name|>model_control_one_enabled_Integration_MovingAverage_Seasonal_WeekOfYear_AR.py<|end_file_name|><|fim▁begin|>import tests.model_control.test_ozone_custom_models_enabled as testmod
<|fim▁hole|><|fim▁end|> | testmod.build_model( ['Integration'] , ['MovingAverage'] , ['Seasonal_WeekOfYear'] , ['AR'] ); |
<|file_name|>consistency_check.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use std::cmp::Ordering;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering};
use std::sync::Arc;
use crate::storage::mvcc::{Lock, LockType, WriteRef, WriteType};
use engine_traits::{
IterOptions, Iterable, Iterator as EngineIterator, KvEngine, Peekable, SeekKey,
};
use engine_traits::{CF_DEFAULT, CF_LOCK, CF_RAFT, CF_WRITE};
use kvproto::kvrpcpb::{MvccInfo, MvccLock, MvccValue, MvccWrite, Op};
use raftstore::coprocessor::{ConsistencyCheckMethod, ConsistencyCheckObserver, Coprocessor};
use raftstore::Result;
use tikv_util::keybuilder::KeyBuilder;
use txn_types::Key;
const PHYSICAL_SHIFT_BITS: usize = 18;
const SAFE_POINT_WINDOW: usize = 120;
// When leader broadcasts a ComputeHash command to followers, it's possible that the safe point
// becomes stale when the command reaches followers. So use a 2 minutes window to reduce this.
fn get_safe_point_for_check(mut safe_point: u64) -> u64 {
safe_point >>= PHYSICAL_SHIFT_BITS;
safe_point += (SAFE_POINT_WINDOW * 1000) as u64; // 120s * 1000ms/s.
safe_point << PHYSICAL_SHIFT_BITS
}
const fn zero_safe_point_for_check() -> u64 {
let mut safe_point = 0;
safe_point >>= PHYSICAL_SHIFT_BITS;
safe_point += (SAFE_POINT_WINDOW * 1000) as u64; // 120s * 1000ms/s.
safe_point << PHYSICAL_SHIFT_BITS
}
#[derive(Clone)]
pub struct Mvcc<E: KvEngine> {
_engine: PhantomData<E>,
local_safe_point: Arc<AtomicU64>,
}
impl<E: KvEngine> Coprocessor for Mvcc<E> {}
impl<E: KvEngine> Mvcc<E> {
pub fn new(safe_point: Arc<AtomicU64>) -> Self {
Mvcc {
_engine: Default::default(),
local_safe_point: safe_point,
}
}
}
impl<E: KvEngine> ConsistencyCheckObserver<E> for Mvcc<E> {
fn update_context(&self, context: &mut Vec<u8>) -> bool {
context.push(ConsistencyCheckMethod::Mvcc as u8);
context.reserve(8);
let len = context.len();
let mut safe_point = self.local_safe_point.load(AtomicOrdering::Acquire);
safe_point = get_safe_point_for_check(safe_point);
unsafe {
context.set_len(len + 8);
std::ptr::copy_nonoverlapping(
safe_point.to_le_bytes().as_ptr(),
&mut context[len] as _,
8,
);
}
// Skiped all other observers.
true
}
fn compute_hash(
&self,
region: &kvproto::metapb::Region,
context: &mut &[u8],
snap: &E::Snapshot,
) -> Result<Option<u32>> {
if context.is_empty() {
return Ok(None);
}
assert_eq!(context[0], ConsistencyCheckMethod::Mvcc as u8);
let safe_point = u64::from_le_bytes(context[1..9].try_into().unwrap());
*context = &context[9..];
let local_safe_point = self.local_safe_point.load(AtomicOrdering::Acquire);
if safe_point < local_safe_point || safe_point <= zero_safe_point_for_check() {
warn!(
"skip consistency check"; "region_id" => region.get_id(),
"safe_ponit" => safe_point,
"local_safe_point" => local_safe_point,
"zero" => zero_safe_point_for_check(),
);
return Ok(None);
}
let mut scanner = MvccInfoScanner::new(
|cf, opts| snap.iterator_cf_opt(cf, opts).map_err(|e| box_err!(e)),
Some(&keys::data_key(region.get_start_key())),
Some(&keys::data_end_key(region.get_end_key())),
MvccChecksum::new(safe_point),
)?;
while scanner.next_item()?.is_some() {}
// Computes the hash from the Region state too.
let mut digest = scanner.observer.digest;
let region_state_key = keys::region_state_key(region.get_id());
digest.update(®ion_state_key);
match snap.get_value_cf(CF_RAFT, ®ion_state_key) {
Err(e) => return Err(e.into()),
Ok(Some(v)) => digest.update(&v),
Ok(None) => {}
}
Ok(Some(digest.finalize()))
}
}
pub trait MvccInfoObserver {
type Target;
// Meet a new mvcc record prefixed `key`.
fn on_new_item(&mut self, key: &[u8]);
// Emit a complete mvcc record.
fn emit(&mut self) -> Self::Target;
fn on_write(&mut self, key: &[u8], value: &[u8]) -> Result<bool>;
fn on_lock(&mut self, key: &[u8], value: &[u8]) -> Result<bool>;
fn on_default(&mut self, key: &[u8], value: &[u8]) -> Result<bool>;
}
pub struct MvccInfoScanner<Iter: EngineIterator, Ob: MvccInfoObserver> {
lock_iter: Iter,
default_iter: Iter,
write_iter: Iter,
observer: Ob,
}
impl<Iter: EngineIterator, Ob: MvccInfoObserver> MvccInfoScanner<Iter, Ob> {
pub fn new<F>(f: F, from: Option<&[u8]>, to: Option<&[u8]>, ob: Ob) -> Result<Self>
where
F: Fn(&str, IterOptions) -> Result<Iter>,
{
let from = from.unwrap_or(keys::DATA_MIN_KEY);
let to = to.unwrap_or(keys::DATA_MAX_KEY);
let key_builder = |key: &[u8]| -> Result<Option<KeyBuilder>> {
if !keys::validate_data_key(key) && key != keys::DATA_MAX_KEY {
return Err(box_err!("non-mvcc area {}", log_wrappers::Value::key(key)));
}
Ok(Some(KeyBuilder::from_vec(key.to_vec(), 0, 0)))
};
let iter_opts = IterOptions::new(key_builder(from)?, key_builder(to)?, false);
let gen_iter = |cf: &str| -> Result<Iter> {
let mut iter = f(cf, iter_opts.clone())?;
box_try!(iter.seek(SeekKey::Key(from)));
Ok(iter)
};
Ok(MvccInfoScanner {
lock_iter: gen_iter(CF_LOCK)?,
default_iter: gen_iter(CF_DEFAULT)?,
write_iter: gen_iter(CF_WRITE)?,
observer: ob,
})
}
fn next_item(&mut self) -> Result<Option<Ob::Target>> {
let mut lock_ok = box_try!(self.lock_iter.valid());
let mut writes_ok = box_try!(self.write_iter.valid());
let prefix = match (lock_ok, writes_ok) {
(false, false) => return Ok(None),
(true, false) => self.lock_iter.key(),
(false, true) => box_try!(Key::truncate_ts_for(self.write_iter.key())),
(true, true) => {
let prefix1 = self.lock_iter.key();
let prefix2 = box_try!(Key::truncate_ts_for(self.write_iter.key()));
match prefix1.cmp(prefix2) {
Ordering::Less => {
writes_ok = false;
prefix1
}
Ordering::Greater => {
lock_ok = false;
prefix2
}
Ordering::Equal => prefix1,
}
}
};
self.observer.on_new_item(prefix);
while writes_ok {
let (key, value) = (self.write_iter.key(), self.write_iter.value());
writes_ok = self.observer.on_write(key, value)? && box_try!(self.write_iter.next());
}
while lock_ok {
let (key, value) = (self.lock_iter.key(), self.lock_iter.value());
lock_ok = self.observer.on_lock(key, value)? && box_try!(self.lock_iter.next());
}
let mut ok = box_try!(self.default_iter.valid());
while ok {
let (key, value) = (self.default_iter.key(), self.default_iter.value());
ok = self.observer.on_default(key, value)? && box_try!(self.default_iter.next());
}
Ok(Some(self.observer.emit()))
}
}
#[derive(Clone, Default)]
struct MvccInfoCollector {
current_item: Vec<u8>,
mvcc_info: MvccInfo,
}
impl MvccInfoObserver for MvccInfoCollector {
type Target = (Vec<u8>, MvccInfo);
fn on_new_item(&mut self, key: &[u8]) {
self.current_item = key.to_vec();
}
fn emit(&mut self) -> Self::Target {
let item = std::mem::take(&mut self.current_item);
let info = std::mem::take(&mut self.mvcc_info);
(item, info)
}
fn on_write(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, commit_ts) = box_try!(Key::split_on_ts_for(key));
if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let write = box_try!(WriteRef::parse(&value));
let mut write_info = MvccWrite::default();
match write.write_type {
WriteType::Put => write_info.set_type(Op::Put),
WriteType::Delete => write_info.set_type(Op::Del),
WriteType::Lock => write_info.set_type(Op::Lock),
WriteType::Rollback => write_info.set_type(Op::Rollback),
}
write_info.set_start_ts(write.start_ts.into_inner());
write_info.set_commit_ts(commit_ts.into_inner());
if let Some(ref value) = write.short_value {
write_info.set_short_value(value.to_vec());
}
self.mvcc_info.mut_writes().push(write_info);
Ok(true)
}
fn on_lock(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
if key != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let lock = box_try!(Lock::parse(value));
let mut lock_info = MvccLock::default();
match lock.lock_type {
LockType::Put => lock_info.set_type(Op::Put),
LockType::Delete => lock_info.set_type(Op::Del),
LockType::Lock => lock_info.set_type(Op::Lock),
LockType::Pessimistic => lock_info.set_type(Op::PessimisticLock),
}
lock_info.set_start_ts(lock.ts.into_inner());
lock_info.set_primary(lock.primary);
lock_info.set_short_value(lock.short_value.unwrap_or_default());
self.mvcc_info.set_lock(lock_info);
Ok(true)
}
fn on_default(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, start_ts) = box_try!(Key::split_on_ts_for(key));
if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let mut value_info = MvccValue::default();
value_info.set_start_ts(start_ts.into_inner());
value_info.set_value(value.to_vec());
self.mvcc_info.mut_values().push(value_info);
Ok(true)
}
}
pub struct MvccInfoIterator<Iter: EngineIterator> {
scanner: MvccInfoScanner<Iter, MvccInfoCollector>,
limit: usize,
count: usize,
}
impl<Iter: EngineIterator> MvccInfoIterator<Iter> {
pub fn new<F>(f: F, from: Option<&[u8]>, to: Option<&[u8]>, limit: usize) -> Result<Self>
where
F: Fn(&str, IterOptions) -> Result<Iter>,
{
let scanner = MvccInfoScanner::new(f, from, to, MvccInfoCollector::default())?;
Ok(Self {
scanner,
limit,
count: 0,
})
}
}
impl<Iter: EngineIterator> Iterator for MvccInfoIterator<Iter> {
type Item = Result<(Vec<u8>, MvccInfo)>;
fn next(&mut self) -> Option<Result<(Vec<u8>, MvccInfo)>> {
if self.limit != 0 && self.count >= self.limit {
return None;
}
match self.scanner.next_item() {
Ok(Some(item)) => {
self.count += 1;
Some(Ok(item))
}
Ok(None) => None,
Err(e) => Some(Err(e)),
}
}
}
struct MvccChecksum {
safe_point: u64,
digest: crc32fast::Hasher,
current_item: Vec<u8>,
committed_txns: Vec<u64>,
committed_txns_sorted: bool,
}
impl MvccChecksum {
fn new(safe_point: u64) -> Self {
Self {
safe_point,
digest: crc32fast::Hasher::new(),
current_item: vec![],
committed_txns: vec![],
committed_txns_sorted: false,
}
}
}
impl MvccInfoObserver for MvccChecksum {
type Target = ();
fn on_new_item(&mut self, key: &[u8]) {
self.current_item = key.to_vec();
}
fn emit(&mut self) -> Self::Target {
self.current_item.clear();
self.committed_txns.clear();
}<|fim▁hole|> if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let commit_ts = commit_ts.into_inner();
if commit_ts <= self.safe_point {
// Skip stale records.
return Ok(true);
}
let write = box_try!(WriteRef::parse(&value));
let start_ts = write.start_ts.into_inner();
self.digest.update(key);
self.digest.update(value);
self.committed_txns.push(start_ts);
Ok(true)
}
fn on_lock(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let lock = box_try!(Lock::parse(value));
if lock.ts.into_inner() <= self.safe_point {
// Skip stale records.
return Ok(true);
}
self.digest.update(key);
self.digest.update(value);
Ok(true)
}
fn on_default(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, start_ts) = box_try!(Key::split_on_ts_for(key));
if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
if !self.committed_txns_sorted {
self.committed_txns.sort_unstable();
self.committed_txns_sorted = true;
}
let start_ts = start_ts.into_inner();
if start_ts > self.safe_point && self.committed_txns.binary_search(&start_ts).is_ok() {
self.digest.update(key);
self.digest.update(value);
}
Ok(true)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::kv::TestEngineBuilder;
use crate::storage::txn::tests::must_rollback;
use crate::storage::txn::tests::{must_commit, must_prewrite_delete, must_prewrite_put};
use engine_rocks::RocksEngine;
#[test]
fn test_update_context() {
let safe_point = Arc::new(AtomicU64::new((123 << PHYSICAL_SHIFT_BITS) * 1000));
let observer = Mvcc::<RocksEngine>::new(safe_point);
let mut context = Vec::new();
assert!(observer.update_context(&mut context));
assert_eq!(context.len(), 9);
assert_eq!(context[0], ConsistencyCheckMethod::Mvcc as u8);
let safe_point = u64::from_le_bytes(context[1..9].try_into().unwrap());
assert_eq!(safe_point, (243 << PHYSICAL_SHIFT_BITS) * 1000);
}
#[test]
fn test_mvcc_checksum() {
let engine = TestEngineBuilder::new().build().unwrap();
must_prewrite_put(&engine, b"zAAAAA", b"value", b"PRIMARY", 100);
must_commit(&engine, b"zAAAAA", 100, 101);
must_prewrite_put(&engine, b"zCCCCC", b"value", b"PRIMARY", 110);
must_commit(&engine, b"zCCCCC", 110, 111);
must_prewrite_put(&engine, b"zBBBBB", b"value", b"PRIMARY", 200);
must_commit(&engine, b"zBBBBB", 200, 201);
must_prewrite_put(&engine, b"zDDDDD", b"value", b"PRIMARY", 200);
must_rollback(&engine, b"zDDDDD", 200);
must_prewrite_put(&engine, b"zFFFFF", b"value", b"PRIMARY", 200);
must_prewrite_delete(&engine, b"zGGGGG", b"PRIMARY", 200);
let mut checksums = Vec::with_capacity(3);
for &safe_point in &[150, 160, 100] {
let raw = engine.get_rocksdb();
let mut scanner = MvccInfoScanner::new(
|cf, opts| raw.iterator_cf_opt(cf, opts).map_err(|e| box_err!(e)),
Some(&keys::data_key(b"")),
Some(&keys::data_end_key(b"")),
MvccChecksum::new(safe_point),
)
.unwrap();
while scanner.next_item().unwrap().is_some() {}
let digest = scanner.observer.digest;
checksums.push(digest.finalize());
}
assert_eq!(checksums[0], checksums[1]);
assert_ne!(checksums[0], checksums[2]);
}
#[test]
fn test_mvcc_info_collector() {
use crate::storage::mvcc::Write;
use engine_rocks::raw::{ColumnFamilyOptions, DBOptions};
use engine_rocks::raw_util::CFOptions;
use engine_traits::SyncMutable;
use txn_types::TimeStamp;
let tmp = tempfile::Builder::new()
.prefix("test_debug")
.tempdir()
.unwrap();
let path = tmp.path().to_str().unwrap();
let engine = Arc::new(
engine_rocks::raw_util::new_engine_opt(
path,
DBOptions::new(),
vec![
CFOptions::new(CF_DEFAULT, ColumnFamilyOptions::new()),
CFOptions::new(CF_WRITE, ColumnFamilyOptions::new()),
CFOptions::new(CF_LOCK, ColumnFamilyOptions::new()),
CFOptions::new(CF_RAFT, ColumnFamilyOptions::new()),
],
)
.unwrap(),
);
let engine = RocksEngine::from_db(engine);
let cf_default_data = vec![
(b"k1", b"v", 5.into()),
(b"k2", b"x", 10.into()),
(b"k3", b"y", 15.into()),
];
for &(prefix, value, ts) in &cf_default_data {
let encoded_key = Key::from_raw(prefix).append_ts(ts);
let key = keys::data_key(encoded_key.as_encoded().as_slice());
engine.put(key.as_slice(), value).unwrap();
}
let cf_lock_data = vec![
(b"k1", LockType::Put, b"v", 5.into()),
(b"k4", LockType::Lock, b"x", 10.into()),
(b"k5", LockType::Delete, b"y", 15.into()),
];
for &(prefix, tp, value, version) in &cf_lock_data {
let encoded_key = Key::from_raw(prefix);
let key = keys::data_key(encoded_key.as_encoded().as_slice());
let lock = Lock::new(
tp,
value.to_vec(),
version,
0,
None,
TimeStamp::zero(),
0,
TimeStamp::zero(),
);
let value = lock.to_bytes();
engine
.put_cf(CF_LOCK, key.as_slice(), value.as_slice())
.unwrap();
}
let cf_write_data = vec![
(b"k2", WriteType::Put, 5.into(), 10.into()),
(b"k3", WriteType::Put, 15.into(), 20.into()),
(b"k6", WriteType::Lock, 25.into(), 30.into()),
(b"k7", WriteType::Rollback, 35.into(), 40.into()),
];
for &(prefix, tp, start_ts, commit_ts) in &cf_write_data {
let encoded_key = Key::from_raw(prefix).append_ts(commit_ts);
let key = keys::data_key(encoded_key.as_encoded().as_slice());
let write = Write::new(tp, start_ts, None);
let value = write.as_ref().to_bytes();
engine
.put_cf(CF_WRITE, key.as_slice(), value.as_slice())
.unwrap();
}
let scan_mvcc = |start: &[u8], end: &[u8], limit: u64| {
MvccInfoIterator::new(
|cf, opts| engine.iterator_cf_opt(cf, opts).map_err(|e| box_err!(e)),
if start.is_empty() { None } else { Some(start) },
if end.is_empty() { None } else { Some(end) },
limit as usize,
)
.unwrap()
};
let mut count = 0;
for key_and_mvcc in scan_mvcc(b"z", &[], 30) {
assert!(key_and_mvcc.is_ok());
count += 1;
}
assert_eq!(count, 7);
}
}<|fim▁end|> |
fn on_write(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, commit_ts) = box_try!(Key::split_on_ts_for(key)); |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
""" Validators for wx widgets.
Copyright (c) Karol Będkowski, 2006-2013
This file is part of wxGTD
This is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, version 2.
"""
__author__ = "Karol Będkowski"
<|fim▁hole|>__copyright__ = "Copyright (c) Karol Będkowski, 2006-2013"
__version__ = '2013-04-21'
__all__ = ['ValidatorDv', 'Validator', 'ValidatorDate', 'ValidatorTime',
'ValidatorColorStr']
from .validator import Validator, ValidatorDv, ValidatorDate, ValidatorTime, \
ValidatorColorStr<|fim▁end|> | |
<|file_name|>np_array_ops.py<|end_file_name|><|fim▁begin|># Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common array methods."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import math
import numbers
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_export
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.util import nest
newaxis = np_export.np_export_constant(__name__, 'newaxis', np.newaxis)
@np_utils.np_doc('empty')
def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
return zeros(shape, dtype)
@np_utils.np_doc('empty_like')
def empty_like(a, dtype=None):
return zeros_like(a, dtype)
@np_utils.np_doc('zeros')
def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
dtype = (
np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type())
return array_ops.zeros(shape, dtype=dtype)
@np_utils.np_doc('zeros_like')
def zeros_like(a, dtype=None): # pylint: disable=missing-docstring
if dtype is None:
# We need to let np_utils.result_type decide the dtype, not tf.zeros_like
dtype = np_utils.result_type(a)
else:
# TF and numpy has different interpretations of Python types such as
# `float`, so we let `np_utils.result_type` decide.
dtype = np_utils.result_type(dtype)
dtype = dtypes.as_dtype(dtype) # Work around b/149877262
return array_ops.zeros_like(a, dtype)
@np_utils.np_doc('ones')
def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
if dtype:
dtype = np_utils.result_type(dtype)
return array_ops.ones(shape, dtype=dtype)
@np_utils.np_doc('ones_like')
def ones_like(a, dtype=None):
if dtype is None:
dtype = np_utils.result_type(a)
else:
dtype = np_utils.result_type(dtype)
return array_ops.ones_like(a, dtype)
@np_utils.np_doc('eye')
def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring
if dtype:
dtype = np_utils.result_type(dtype)
if not M:
M = N
# Making sure N, M and k are `int`
N = int(N)
M = int(M)
k = int(k)
if k >= M or -k >= N:
# tf.linalg.diag will raise an error in this case
return zeros([N, M], dtype=dtype)
if k == 0:
return linalg_ops.eye(N, M, dtype=dtype)
# We need the precise length, otherwise tf.linalg.diag will raise an error
diag_len = min(N, M)
if k > 0:
if N >= M:
diag_len -= k
elif N + k > M:
diag_len = M - k
elif k <= 0:
if M >= N:
diag_len += k
elif M - k > N:
diag_len = N + k
diagonal_ = array_ops.ones([diag_len], dtype=dtype)
return array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k)
@np_utils.np_doc('identity')
def identity(n, dtype=float):
return eye(N=n, M=n, dtype=dtype)
@np_utils.np_doc('full')
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
if not isinstance(shape, np_arrays.ndarray):
shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32))
shape = atleast_1d(shape)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, shape)
# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online).
@np_utils.np_doc_only('full_like')
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a)
dtype = dtype or np_utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, array_ops.shape(a))
def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Main implementation of np.array()."""
result_t = val
if not isinstance(result_t, ops.Tensor):
if not dtype:
dtype = np_utils.result_type(result_t)
# We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
# convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
# while np.array allows them. We need to convert-then-cast.
# EagerTensor conversion complains about "mixed types" when converting
# tensors with no dtype information. This is because it infers types based
# on one selected item in the list. So e.g. when converting [2., 2j]
# to a tensor, it will select float32 as the inferred type and not be able
# to convert the list to a float 32 tensor.
# Since we have some information about the final dtype we care about, we
# supply that information so that convert_to_tensor will do best-effort
# conversion to that dtype first.
result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)
result_t = math_ops.cast(result_t, dtype=dtype)
elif dtype:
result_t = math_ops.cast(result_t, dtype)
if copy:
result_t = array_ops.identity(result_t)
if ndmin == 0:
return result_t
ndims = array_ops.rank(result_t)
def true_fn():
old_shape = array_ops.shape(result_t)
new_shape = array_ops.concat(
[array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0)
return array_ops.reshape(result_t, new_shape)
result_t = np_utils.cond(
np_utils.greater(ndmin, ndims), true_fn, lambda: result_t)
return result_t
# TODO(wangpeng): investigate whether we can make `copy` default to False.
# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc_only('array')
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Since Tensors are immutable, a copy is made only if val is placed on a
different device than the current one. Even if `copy` is False, a new Tensor
may need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
is an ndarray or a Tensor.
""" # pylint:disable=g-docstring-missing-newline
if dtype:
dtype = np_utils.result_type(dtype)
return _array_internal(val, dtype, copy, ndmin)
# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc('asarray')
def asarray(a, dtype=None):
if dtype:
dtype = np_utils.result_type(dtype)
if isinstance(a, np_arrays.ndarray) and (
not dtype or dtype == a.dtype.as_numpy_dtype):
return a
return array(a, dtype, copy=False)
@np_utils.np_doc('asanyarray')
def asanyarray(a, dtype=None):
return asarray(a, dtype)
@np_utils.np_doc('ascontiguousarray')
def ascontiguousarray(a, dtype=None):
return array(a, dtype, ndmin=1)
# Numerical ranges.
@np_utils.np_doc('arange')
def arange(start, stop=None, step=1, dtype=None):
"""Returns `step`-separated values in the range [start, stop).
Args:
start: Start of the interval. Included in the range.
stop: End of the interval. If not specified, `start` is treated as 0 and
`start` value is used as `stop`. If specified, it is not included in the
range if `step` is integer. When `step` is floating point, it may or may
not be included.
step: The difference between 2 consecutive values in the output range. It is
recommended to use `linspace` instead of using non-integer values for
`step`.
dtype: Optional. Type of the resulting ndarray. Could be a python type, a
NumPy type or a TensorFlow `DType`. If not provided, the largest type of
`start`, `stop`, `step` is used.
Raises:
ValueError: If step is zero.
"""
if not step:
raise ValueError('step must be non-zero.')
if dtype:
dtype = np_utils.result_type(dtype)
else:
if stop is None:
dtype = np_utils.result_type(start, step)
else:
dtype = np_utils.result_type(start, step, stop)
if step > 0 and ((stop is not None and start > stop) or
(stop is None and start < 0)):
return array([], dtype=dtype)
if step < 0 and ((stop is not None and start < stop) or
(stop is None and start > 0)):
return array([], dtype=dtype)
# TODO(srbs): There are some bugs when start or stop is float type and dtype
# is integer type.
return math_ops.cast(
math_ops.range(start, limit=stop, delta=step), dtype=dtype)
# Building matrices.
@np_utils.np_doc('diag')
def diag(v, k=0): # pylint: disable=missing-docstring
"""Raises an error if input is not 1- or 2-d."""
v = asarray(v)
v_rank = array_ops.rank(v)
v.shape.with_rank_at_most(2)
# TODO(nareshmodi): Consider a np_utils.Assert version that will fail during
# tracing time if the shape is known.
control_flow_ops.Assert(
np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)),
[v_rank])
def _diag(v, k):
return np_utils.cond(
math_ops.equal(array_ops.size(v), 0),
lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype),
lambda: array_ops.matrix_diag(v, k=k))
def _diag_part(v, k):
v_shape = array_ops.shape(v)
v, k = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)),
np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)),
), lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k))
result = array_ops.matrix_diag_part(v, k=k)
return result
result = np_utils.cond(
math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k))
return result
@np_utils.np_doc('diagonal')
def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring
a = asarray(a)
maybe_rank = a.shape.rank
if maybe_rank is not None and offset == 0 and (
axis1 == maybe_rank - 2 or axis1 == -2) and (axis2 == maybe_rank - 1 or
axis2 == -1):
return array_ops.matrix_diag_part(a)
a = moveaxis(a, (axis1, axis2), (-2, -1))
a_shape = array_ops.shape(a)
def _zeros(): # pylint: disable=missing-docstring
return (array_ops.zeros(
array_ops.concat([a_shape[:-1], [0]], 0), dtype=a.dtype), 0)
# All zeros since diag_part doesn't handle all possible k (aka offset).
# Written this way since cond will run shape inference on both branches,
# and diag_part shape inference will fail when offset is out of bounds.
a, offset = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(offset, -1 * np_utils.getitem(a_shape, -2)),
np_utils.greater_equal(offset, np_utils.getitem(a_shape, -1)),
), _zeros, lambda: (a, offset))
a = array_ops.matrix_diag_part(a, k=offset)
return a
@np_utils.np_doc('diagflat')
def diagflat(v, k=0):
v = asarray(v)
return diag(array_ops.reshape(v, [-1]), k)
def _promote_dtype(*arrays):
dtype = np_utils.result_type(*arrays)
def _fast_asarray(a):
if isinstance(a, np_arrays.ndarray) and dtype == a.dtype.as_numpy_dtype:
return a
return _array_internal(a, dtype=dtype, copy=False)
return [_fast_asarray(a) for a in arrays]
def _promote_dtype_binary(t1, t2):
dtype = np_utils._result_type_binary(t1, t2) # pylint: disable=protected-access
if not(
isinstance(t1, np_arrays.ndarray) and dtype == t1.dtype.as_numpy_dtype):
t1 = _array_internal(t1, dtype=dtype, copy=False)
if not(
isinstance(t2, np_arrays.ndarray) and dtype == t2.dtype.as_numpy_dtype):
t2 = _array_internal(t2, dtype=dtype, copy=False)
return t1, t2
@np_utils.np_doc('all')
def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_all(input_tensor=a, axis=axis, keepdims=keepdims)
@np_utils.np_doc('any')
def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_any(input_tensor=a, axis=axis, keepdims=keepdims)
@np_utils.np_doc('compress')
def compress(condition, a, axis=None): # pylint: disable=redefined-outer-name,missing-function-docstring
condition = asarray(condition, dtype=bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError('condition must be a 1-d array.')
# `np.compress` treats scalars as 1-d arrays.
if a.ndim == 0:
a = ravel(a)
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
# `tf.boolean_mask` requires the first dimensions of array and condition to
# match. `np.compress` pads condition with False when it is shorter.
condition_t = condition
a_t = a
if condition.shape[0] < a.shape[axis]:
padding = array_ops.fill([a.shape[axis] - condition.shape[0]], False)
condition_t = array_ops.concat([condition_t, padding], axis=0)
return array_ops.boolean_mask(tensor=a_t, mask=condition_t, axis=axis)
@np_utils.np_doc('copy')
def copy(a):
return array(a, copy=True)
def _maybe_promote_to_int(a):
if dtypes.as_dtype(a.dtype).is_integer:
# If a is an integer type and its precision is less than that of `int`,
# the output type will be `int`.
a_numpy_dtype = a.dtype.as_numpy_dtype
output_type = np.promote_types(a_numpy_dtype, int)
if output_type != a_numpy_dtype:
a = asarray(a, dtype=output_type)
return a
@np_utils.np_doc('cumprod')
def cumprod(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumprod(a, axis)
@np_utils.np_doc('cumsum')
def cumsum(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumsum(a, axis)
@np_utils.np_doc('imag')
def imag(val):
val = asarray(val)
# TODO(srbs): np.imag returns a scalar if `val` is a scalar, whereas we always
# return an ndarray.
return math_ops.imag(val)
_TO_INT_ = 0
_TO_FLOAT = 1
def _reduce(tf_fn,
a,
axis=None,
dtype=None,
keepdims=None,
promote_int=_TO_INT_,
tf_bool_fn=None,
preserve_bool=False):
"""A general reduction function.
Args:
tf_fn: the TF reduction function.
a: the array to be reduced.
axis: (optional) the axis along which to do the reduction. If None, all
dimensions are reduced.
dtype: (optional) the dtype of the result.
keepdims: (optional) whether to keep the reduced dimension(s).
promote_int: how to promote integer and bool inputs. There are three
choices. (1) `_TO_INT_` always promotes them to np.int_ or np.uint; (2)
`_TO_FLOAT` always promotes them to a float type (determined by
dtypes.default_float_type); (3) None: don't promote.
tf_bool_fn: (optional) the TF reduction function for bool inputs. It will
only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype
is `np.bool_` and `preserve_bool` is True.
preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype
is `np.bool_` (some reductions such as np.sum convert bools to integers,
while others such as np.max preserve bools.
Returns:
An ndarray.
"""
if dtype:
dtype = np_utils.result_type(dtype)
if keepdims is None:
keepdims = False
a = asarray(a, dtype=dtype)
if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_) and
tf_bool_fn is not None):
return tf_bool_fn(input_tensor=a, axis=axis, keepdims=keepdims)
if dtype is None:
dtype = a.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.integer) or dtype == np.bool_:
if promote_int == _TO_INT_:
# If a is an integer/bool type and whose bit width is less than np.int_,
# numpy up-casts it to np.int_ based on the documentation at
# https://numpy.org/doc/1.18/reference/generated/numpy.sum.html
if dtype == np.bool_:
is_signed = True
width = 8 # We can use any number here that is less than 64
else:
is_signed = np.issubdtype(dtype, np.signedinteger)
width = np.iinfo(dtype).bits
# Numpy int_ and uint are defined as 'long' and 'unsigned long', so
# should have the same bit width.
if width < np.iinfo(np.int_).bits:
if is_signed:
dtype = np.int_
else:
dtype = np.uint
a = math_ops.cast(a, dtype)
elif promote_int == _TO_FLOAT:
a = math_ops.cast(a, np_dtypes.default_float_type())
if isinstance(axis, ops.Tensor) and axis.dtype not in (
dtypes.int32, dtypes.int64):
axis = math_ops.cast(axis, dtypes.int64)
return tf_fn(input_tensor=a, axis=axis, keepdims=keepdims)
# TODO (DarrenZhang01): Add `axis` support to the `size` API.
@np_utils.np_doc('size')
def size(x, axis=None): # pylint: disable=missing-docstring
if axis is not None:
raise NotImplementedError('axis argument is not supported in the current '
'`np.size` implementation')
if isinstance(x, (int, float, np.int32, np.int64, np.float32, np.float64)):
return 1
x = asarray(x)
if x.shape.is_fully_defined():
return np.prod(x.shape.as_list(), dtype=int)
else:
return array_ops.size_v2(x)
@np_utils.np_doc('sum')
def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin
return _reduce(
math_ops.reduce_sum,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_any)
@np_utils.np_doc('prod')
def prod(a, axis=None, dtype=None, keepdims=None):
return _reduce(
math_ops.reduce_prod,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_all)
@np_utils.np_doc('mean', unsupported_params=['out'])
def mean(a, axis=None, dtype=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_mean,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT)
@np_utils.np_doc('amax', unsupported_params=['out'])
def amax(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_max,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_any,
preserve_bool=True)
@np_utils.np_doc('amin', unsupported_params=['out'])
def amin(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_min,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_all,
preserve_bool=True)
@np_utils.np_doc('var')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None): # pylint: disable=missing-docstring
if dtype:
working_dtype = np_utils.result_type(a, dtype)
else:
working_dtype = None
if out is not None:
raise ValueError('Setting out is not supported.')
if ddof != 0:
# TF reduce_variance doesn't support ddof, so calculate it using raw ops.
def reduce_fn(input_tensor, axis, keepdims):
means = math_ops.reduce_mean(input_tensor, axis=axis, keepdims=True)
centered = input_tensor - means
if input_tensor.dtype in (dtypes.complex64, dtypes.complex128):
centered = math_ops.cast(
math_ops.real(centered * math_ops.conj(centered)),
input_tensor.dtype)
else:
centered = math_ops.square(centered)
squared_deviations = math_ops.reduce_sum(
centered, axis=axis, keepdims=keepdims)
if axis is None:
n = array_ops.size(input_tensor)
else:
if axis < 0:
axis += array_ops.rank(input_tensor)
n = math_ops.reduce_prod(
array_ops.gather(array_ops.shape(input_tensor), axis))
n = math_ops.cast(n - ddof, input_tensor.dtype)
return math_ops.cast(math_ops.divide(squared_deviations, n), dtype)
else:
reduce_fn = math_ops.reduce_variance
result = _reduce(
reduce_fn,
a,
axis=axis,
dtype=working_dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT)
if dtype:
result = math_ops.cast(result, dtype)
return result
@np_utils.np_doc('std')
def std(a, axis=None, keepdims=None): # pylint: disable=missing-function-docstring
return _reduce(
math_ops.reduce_std,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=_TO_FLOAT)
@np_utils.np_doc('ravel')
def ravel(a): # pylint: disable=missing-docstring
a = asarray(a)
return array_ops.reshape(a, [-1])
@np_utils.np_doc('real')
def real(val):
val = asarray(val)
# TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
# return an ndarray.
return math_ops.real(val)
@np_utils.np_doc('repeat')
def repeat(a, repeats, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
original_shape = a._shape_as_list() # pylint: disable=protected-access
# Best effort recovery of the shape.
known_shape = original_shape is not None and None not in original_shape
if known_shape:
if not original_shape:
original_shape = (repeats,)
else:
repeats_np = np.ravel(np.array(repeats))
if repeats_np.size == 1:
repeats_np = repeats_np.item()
if axis is None:
original_shape = (repeats_np * np.prod(original_shape),)
else:
original_shape[axis] = repeats_np * original_shape[axis]
else:
if axis is None:
original_shape = (repeats_np.sum(),)
else:
original_shape[axis] = repeats_np.sum()
repeats = asarray(repeats)
result = array_ops.repeat(a, repeats, axis)
if known_shape:
result.set_shape(original_shape)
return result
@np_utils.np_doc('around')
def around(a, decimals=0): # pylint: disable=missing-docstring
a = asarray(a)
dtype = a.dtype.as_numpy_dtype
factor = math.pow(10, decimals)
if np.issubdtype(dtype, np.inexact):
factor = math_ops.cast(factor, dtype)
else:
# Use float as the working dtype when a.dtype is exact (e.g. integer),
# because `decimals` can be negative.
float_dtype = np_dtypes.default_float_type()
a = a.astype(float_dtype)
factor = math_ops.cast(factor, float_dtype)
a = math_ops.multiply(a, factor)
a = math_ops.round(a)
a = math_ops.divide(a, factor)
return a.astype(dtype)
setattr(np_arrays.ndarray, '__round__', around)
@np_utils.np_doc('reshape')
def reshape(a, newshape, order='C'):
"""order argument can only b 'C' or 'F'."""
if order not in {'C', 'F'}:
raise ValueError('Unsupported order argument {}'.format(order))
a = asarray(a)
if isinstance(newshape, int):
newshape = [newshape]
if order == 'F':
r = array_ops.transpose(
array_ops.reshape(array_ops.transpose(a), newshape[::-1]))
else:
r = array_ops.reshape(a, newshape)
return r
def _reshape_method_wrapper(a, *newshape, **kwargs):
order = kwargs.pop('order', 'C')
if kwargs:
raise ValueError('Unsupported arguments: {}'.format(kwargs.keys()))
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return reshape(a, newshape, order=order)
@np_utils.np_doc('expand_dims')
def expand_dims(a, axis):
a = asarray(a)
return array_ops.expand_dims(a, axis=axis)
@np_utils.np_doc('squeeze')
def squeeze(a, axis=None):
a = asarray(a)
return array_ops.squeeze(a, axis)
@np_utils.np_doc('transpose')
def transpose(a, axes=None):
a = asarray(a)
if axes is not None:
axes = asarray(axes)
return array_ops.transpose(a=a, perm=axes)
@np_utils.np_doc('swapaxes')
def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring
a = asarray(a)
def adjust_axes(axes, rank):
def f(x):
if isinstance(x, int):
if x < 0:
x = x + rank
else:
x = array_ops.where_v2(x < 0, np_utils.add(x, a_rank), x)
return x
return nest.map_structure(f, axes)
if (a.shape.rank is not None and
isinstance(axis1, int) and isinstance(axis2, int)):
# This branch makes sure `perm` is statically known, to avoid a
# not-compile-time-constant XLA error.
a_rank = a.shape.rank
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = list(range(a_rank))
perm[axis1] = axis2
perm[axis2] = axis1
else:
a_rank = array_ops.rank(a)
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = math_ops.range(a_rank)
perm = array_ops.tensor_scatter_update(perm, [[axis1], [axis2]],
[axis2, axis1])
a = array_ops.transpose(a, perm)
return a
@np_utils.np_doc('moveaxis')
def moveaxis(a, source, destination): # pylint: disable=missing-docstring
"""Raises ValueError if source, destination not in (-ndim(a), ndim(a))."""
if not source and not destination:
return a
a = asarray(a)
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
if len(source) != len(destination):
raise ValueError('The lengths of source and destination must equal')
a_rank = np_utils._maybe_static(array_ops.rank(a)) # pylint: disable=protected-access
def _correct_axis(axis, rank):
if axis < 0:
return axis + rank
return axis
source = tuple(_correct_axis(axis, a_rank) for axis in source)
destination = tuple(_correct_axis(axis, a_rank) for axis in destination)
if a.shape.rank is not None:
perm = [i for i in range(a_rank) if i not in source]
for dest, src in sorted(zip(destination, source)):
assert dest <= len(perm)
perm.insert(dest, src)
else:
r = math_ops.range(a_rank)
def _remove_indices(a, b):
"""Remove indices (`b`) from `a`."""
items = array_ops.unstack(sort_ops.sort(array_ops.stack(b)), num=len(b))
i = 0
result = []
for item in items:
result.append(a[i:item])
i = item + 1
result.append(a[i:])
return array_ops.concat(result, 0)
minus_sources = _remove_indices(r, source)
minus_dest = _remove_indices(r, destination)
perm = array_ops.scatter_nd(
array_ops.expand_dims(minus_dest, 1), minus_sources, [a_rank])
perm = array_ops.tensor_scatter_update(
perm, array_ops.expand_dims(destination, 1), source)
a = array_ops.transpose(a, perm)
return a
@np_utils.np_doc('pad')
def pad(array, pad_width, mode, **kwargs): # pylint: disable=redefined-outer-name
"""Only supports modes 'constant', 'reflect' and 'symmetric' currently."""
constant_values = kwargs.get('constant_values', 0)
if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):
raise ValueError('Unsupported padding mode: ' + mode)
mode = mode.upper()
array = asarray(array)
pad_width = asarray(pad_width, dtype=dtypes.int32)
return array_ops.pad(
tensor=array,
paddings=pad_width,
mode=mode,
constant_values=constant_values)
@np_utils.np_doc('take')
def take(a, indices, axis=None, out=None, mode='clip'):
"""out argument is not supported, and default mode is clip."""
if out is not None:
raise ValueError('out argument is not supported in take.')
if mode not in {'raise', 'clip', 'wrap'}:
raise ValueError("Invalid mode '{}' for take".format(mode))
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = array_ops.reshape(a, [-1])
axis = 0
axis_size = array_ops.shape(a, out_type=indices.dtype)[axis]
if mode == 'clip':
indices = clip_ops.clip_by_value(indices, 0, axis_size - 1)
elif mode == 'wrap':
indices = math_ops.floormod(indices, axis_size)
else:
raise ValueError("The 'raise' mode to take is not supported.")
return array_ops.gather(a, indices, axis=axis)
@np_utils.np_doc_only('where')
def where(condition, x=None, y=None):
"""Raises ValueError if exactly one of x or y is not None."""
condition = asarray(condition, dtype=np.bool_)
if x is None and y is None:
return nonzero(condition)
elif x is not None and y is not None:
x, y = _promote_dtype(x, y)
return array_ops.where_v2(condition, x, y)
raise ValueError('Both x and y must be ndarrays, or both must be None.')
@np_utils.np_doc('select')
def select(condlist, choicelist, default=0): # pylint: disable=missing-docstring
if len(condlist) != len(choicelist):
msg = 'condlist must have length equal to choicelist ({} vs {})'
raise ValueError(msg.format(len(condlist), len(choicelist)))
if not condlist:
raise ValueError('condlist must be non-empty')
choices = _promote_dtype(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
# The traversal is in reverse order so we can return the first value in
# choicelist where condlist is True.
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
@np_utils.np_doc('shape', link=np_utils.Link(
'https://numpy.org/doc/1.18/reference/generated/numpy.shape.html'))
def shape(a):
a = asarray(a)
return a.shape
@np_utils.np_doc('ndim', link=np_utils.NoLink())
def ndim(a):
a = asarray(a)
return a.ndim
@np_utils.np_doc('isscalar')
def isscalar(num):
return ndim(num) == 0
def _boundaries_to_sizes(a, boundaries, axis):
"""Converting boundaries of splits to sizes of splits.
Args:
a: the array to be split.
boundaries: the boundaries, as in np.split.
axis: the axis along which to split.
Returns:
A list of sizes of the splits, as in tf.split.
"""
if axis >= len(a.shape):
raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
total_size = a.shape[axis]
sizes = []
sizes_sum = 0
prev = 0
for i, b in enumerate(boundaries):
size = b - prev
if size < 0:
raise ValueError('The %s-th boundary %s is smaller than the previous '
'boundary %s' % (i, b, prev))
size = min(size, max(0, total_size - sizes_sum))
sizes.append(size)
sizes_sum += size
prev = b
sizes.append(max(0, total_size - sizes_sum))
return sizes
@np_utils.np_doc('split')
def split(ary, indices_or_sections, axis=0):
ary = asarray(ary)
if not isinstance(indices_or_sections, six.integer_types):
indices_or_sections = _boundaries_to_sizes(ary, indices_or_sections, axis)
return array_ops.split(ary, indices_or_sections, axis=axis)
def _split_on_axis(np_fun_name, axis):
@np_utils.np_doc(np_fun_name)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis('vsplit', axis=0)
hsplit = _split_on_axis('hsplit', axis=1)
dsplit = _split_on_axis('dsplit', axis=2)
@np_utils.np_doc('broadcast_to')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
return full(shape, array)
@np_utils.np_doc('stack')
def stack(arrays, axis=0): # pylint: disable=missing-function-docstring
if isinstance(arrays, (np_arrays.ndarray, ops.Tensor)):
arrays = asarray(arrays)
if axis == 0:
return arrays
else:
return swapaxes(arrays, 0, axis)
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return asarray(array_ops.stack(unwrapped_arrays, axis))
@np_utils.np_doc('hstack')
def hstack(tup):
arrays = [atleast_1d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
rank = array_ops.rank(unwrapped_arrays[0])
return np_utils.cond(
math_ops.equal(rank,
1), lambda: array_ops.concat(unwrapped_arrays, axis=0),
lambda: array_ops.concat(unwrapped_arrays, axis=1))
@np_utils.np_doc('vstack')
def vstack(tup):
arrays = [atleast_2d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=0)
@np_utils.np_doc('dstack')
def dstack(tup):
arrays = [atleast_3d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=2)
def _pad_left_to(n, old_shape):
old_shape = asarray(old_shape, dtype=np.int32)
new_shape = array_ops.pad(
old_shape, [[math_ops.maximum(n - array_ops.size(old_shape), 0), 0]],
constant_values=1)
return asarray(new_shape)
def _atleast_nd(n, new_shape, *arys):
"""Reshape arrays to be at least `n`-dimensional.
Args:
n: The minimal rank.
new_shape: a function that takes `n` and the old shape and returns the
desired new shape.
*arys: ndarray(s) to be reshaped.
Returns:
The reshaped array(s).
"""
def f(x):
# pylint: disable=g-long-lambda
x = asarray(x)
return asarray(
np_utils.cond(
np_utils.greater(n, array_ops.rank(x)),
lambda: reshape(x, new_shape(n, array_ops.shape(x))),
lambda: x))
arys = list(map(f, arys))
if len(arys) == 1:
return arys[0]
else:
return arys
@np_utils.np_doc('atleast_1d')
def atleast_1d(*arys):
return _atleast_nd(1, _pad_left_to, *arys)
@np_utils.np_doc('atleast_2d')
def atleast_2d(*arys):
return _atleast_nd(2, _pad_left_to, *arys)
@np_utils.np_doc('atleast_3d')
def atleast_3d(*arys): # pylint: disable=missing-docstring
def new_shape(_, old_shape):
# pylint: disable=g-long-lambda
ndim_ = array_ops.size(old_shape)
return np_utils.cond(
math_ops.equal(ndim_, 0),
lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32),
lambda: np_utils.cond(
math_ops.equal(ndim_, 1), lambda: array_ops.pad(
old_shape, [[1, 1]], constant_values=1), lambda: array_ops.pad(
old_shape, [[0, 1]], constant_values=1)))
return _atleast_nd(3, new_shape, *arys)
@np_utils.np_doc('nonzero')
def nonzero(a):
a = atleast_1d(a)
if a.shape.rank is None:
raise ValueError("The rank of `a` is unknown, so we can't decide how many "
'arrays to return.')
return array_ops.unstack(
array_ops.where_v2(math_ops.cast(a, dtypes.bool)),
a.shape.rank,
axis=1)
@np_utils.np_doc('diag_indices')
def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name
if n < 0:
raise ValueError(
'n argument to diag_indices must be nonnegative, got {}'.format(n))
if ndim < 0:
raise ValueError(
'ndim argument to diag_indices must be nonnegative, got {}'.format(
ndim))
return (math_ops.range(n),) * ndim
@np_utils.np_doc('tri')
def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring
M = M if M is not None else N
if dtype is not None:
dtype = np_utils.result_type(dtype)
else:
dtype = np_dtypes.default_float_type()
if k < 0:
lower = -k - 1
if lower > N:
r = array_ops.zeros([N, M], dtype)
else:
# Keep as tf bool, since we create an upper triangular matrix and invert
# it.
o = array_ops.ones([N, M], dtype=dtypes.bool)
r = math_ops.cast(
math_ops.logical_not(array_ops.matrix_band_part(o, lower, -1)), dtype)
else:
o = array_ops.ones([N, M], dtype)
if k > M:
r = o
else:
r = array_ops.matrix_band_part(o, -1, k)
return r
@np_utils.np_doc('tril')
def tril(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to tril should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to tril must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), m, z)
@np_utils.np_doc('triu')
def triu(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to triu should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to triu must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), z, m)
@np_utils.np_doc('flip')
def flip(m, axis=None): # pylint: disable=missing-docstring
m = asarray(m)
if axis is None:
return array_ops.reverse(m, math_ops.range(array_ops.rank(m)))
axis = np_utils._canonicalize_axis(axis, array_ops.rank(m)) # pylint: disable=protected-access
return array_ops.reverse(m, [axis])
@np_utils.np_doc('flipud')
def flipud(m): # pylint: disable=missing-docstring
return flip(m, 0)
@np_utils.np_doc('fliplr')
def fliplr(m): # pylint: disable=missing-docstring
return flip(m, 1)
@np_utils.np_doc('roll')
def roll(a, shift, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
if axis is not None:
return manip_ops.roll(a, shift, axis)
# If axis is None, the roll happens as a 1-d tensor.
original_shape = array_ops.shape(a)
a = manip_ops.roll(array_ops.reshape(a, [-1]), shift, 0)
return array_ops.reshape(a, original_shape)
@np_utils.np_doc('rot90')
def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring
m_rank = array_ops.rank(m)
ax1, ax2 = np_utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = math_ops.range(m_rank)
perm = array_ops.tensor_scatter_update(perm, [[ax1], [ax2]], [ax2, ax1])
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@np_utils.np_doc('vander')
def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name
x = asarray(x)
x_shape = array_ops.shape(x)
N = N or x_shape[0]
N_temp = np_utils.get_static_value(N) # pylint: disable=invalid-name
if N_temp is not None:
N = N_temp
if N < 0:
raise ValueError('N must be nonnegative')
else:
control_flow_ops.Assert(N >= 0, [N])
rank = array_ops.rank(x)
rank_temp = np_utils.get_static_value(rank)
if rank_temp is not None:
rank = rank_temp
if rank != 1:
raise ValueError('x must be a one-dimensional array')
else:
control_flow_ops.Assert(math_ops.equal(rank, 1), [rank])
if increasing:
start = 0
limit = N
delta = 1
else:
start = N - 1
limit = -1
delta = -1
x = array_ops.expand_dims(x, -1)
return math_ops.pow(
x, math_ops.cast(math_ops.range(start, limit, delta), dtype=x.dtype))
@np_utils.np_doc('ix_')
def ix_(*args): # pylint: disable=missing-docstring
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
a_rank = array_ops.rank(a)
a_rank_temp = np_utils.get_static_value(a_rank)
if a_rank_temp is not None:
a_rank = a_rank_temp
if a_rank != 1:
raise ValueError('Arguments must be 1-d, got arg {} of rank {}'.format(
i, a_rank))
else:
control_flow_ops.Assert(math_ops.equal(a_rank, 1), [a_rank])
new_shape = [1] * n
new_shape[i] = -1
dtype = a.dtype
if dtype == dtypes.bool:
output.append(array_ops.reshape(nonzero(a)[0], new_shape))
elif dtype.is_integer:
output.append(array_ops.reshape(a, new_shape))
else:
raise ValueError(
'Only integer and bool dtypes are supported, got {}'.format(dtype))
return output
@np_utils.np_doc('broadcast_arrays')
def broadcast_arrays(*args, **kwargs): # pylint: disable=missing-docstring
subok = kwargs.pop('subok', False)
if subok:
raise ValueError('subok=True is not supported.')
if kwargs:
raise ValueError('Received unsupported arguments {}'.format(kwargs.keys()))
args = [asarray(arg) for arg in args]
return np_utils.tf_broadcast(*args)
@np_utils.np_doc_only('sign')
def sign(x, out=None, where=None, **kwargs): # pylint: disable=missing-docstring,redefined-outer-name
if out:
raise ValueError('tf.numpy doesnt support setting out.')
if where:
raise ValueError('tf.numpy doesnt support setting where.')
if kwargs:
raise ValueError('tf.numpy doesnt support setting {}'.format(kwargs.keys()))
x = asarray(x)
dtype = x.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.complexfloating):
result = math_ops.cast(math_ops.sign(math_ops.real(x)), dtype)
else:
result = math_ops.sign(x)
return result
# Note that np.take_along_axis may not be present in some supported versions of
# numpy.
@np_utils.np_doc('take_along_axis')
def take_along_axis(arr, indices, axis): # pylint: disable=missing-docstring
arr = asarray(arr)
indices = asarray(indices)
if axis is None:
return take_along_axis(arr.ravel(), indices, 0)
rank = array_ops.rank(arr)
axis = axis + rank if axis < 0 else axis
# Broadcast shapes to match, ensure that the axis of interest is not
# broadcast.
arr_shape_original = array_ops.shape(arr)
indices_shape_original = array_ops.shape(indices)
arr_shape = array_ops.tensor_scatter_update(arr_shape_original, [[axis]], [1])
indices_shape = array_ops.tensor_scatter_update(indices_shape_original,
[[axis]], [1])
broadcasted_shape = array_ops.broadcast_dynamic_shape(arr_shape,
indices_shape)
arr_shape = array_ops.tensor_scatter_update(broadcasted_shape, [[axis]],
[arr_shape_original[axis]])
indices_shape = array_ops.tensor_scatter_update(
broadcasted_shape, [[axis]], [indices_shape_original[axis]])
arr = array_ops.broadcast_to(arr, arr_shape)
indices = array_ops.broadcast_to(indices, indices_shape)
# Save indices shape so we can restore it later.
possible_result_shape = indices.shape
# Correct indices since gather doesn't correctly handle negative indices.
indices = array_ops.where_v2(indices < 0, indices + arr_shape[axis], indices)
swapaxes_ = lambda t: swapaxes(t, axis, -1)
dont_move_axis_to_end = math_ops.equal(axis, np_utils.subtract(rank, 1))
arr = np_utils.cond(dont_move_axis_to_end, lambda: arr,
lambda: swapaxes_(arr))
indices = np_utils.cond(dont_move_axis_to_end, lambda: indices,
lambda: swapaxes_(indices))
arr_shape = array_ops.shape(arr)
arr = array_ops.reshape(arr, [-1, arr_shape[-1]])
indices_shape = array_ops.shape(indices)
indices = array_ops.reshape(indices, [-1, indices_shape[-1]])
result = array_ops.gather(arr, indices, batch_dims=1)
result = array_ops.reshape(result, indices_shape)
result = np_utils.cond(dont_move_axis_to_end, lambda: result,
lambda: swapaxes_(result))
result.set_shape(possible_result_shape)
return result
_SLICE_ERORR = (
'only integers, slices (`:`), ellipsis (`...`), '
'numpy.newaxis (`None`) and integer or boolean arrays are valid indices')
def _as_index(idx, need_scalar=True):
"""Helper function to parse idx as an index.
Args:
idx: index
need_scalar: If idx needs to be a scalar value.
Returns:
A pair, (indx, bool). First one is the parsed index and can be a tensor,
or scalar integer / Dimension. Second one is True if rank is known to be 0.
Raises:
IndexError: For incorrect indices.
"""
if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
return idx, True
data = asarray(idx)
if data.dtype == dtypes.bool:
if data.shape.ndims != 1:
# TODO(agarwal): handle higher rank boolean masks.
raise NotImplementedError('Need rank 1 for bool index %s' % idx)
data = array_ops.where_v2(data)
data = array_ops.reshape(data, [-1])
if need_scalar and data.shape.rank not in (None, 0):
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
np_dtype = data.dtype.as_numpy_dtype
if not np.issubdtype(np_dtype, np.integer):
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
if data.dtype not in (dtypes.int64, dtypes.int32):
# TF slicing can only handle int32/int64. So we need to cast.
promoted_dtype = np.promote_types(np.int32, np_dtype)
if promoted_dtype == np.int32:
data = math_ops.cast(data, dtypes.int32)
elif promoted_dtype == np.int64:
data = math_ops.cast(data, dtypes.int64)
else:
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
return data, data.shape.rank == 0
class _UpdateMethod(enum.Enum):
UPDATE = 0
ADD = 1
MIN = 2
MAX = 3
def _slice_helper(tensor, slice_spec, update_method=None, updates=None):
"""Helper function for __getitem__ and _with_index_update_helper.
This function collects the indices in `slice_spec` into two buckets, which we
can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2
`gather`. They also correspond to "basic indices" and "advanced indices" in
numpy. This function supports both reading and writing at the indices. The
reading path can be summarized as `gather(stride_slice(tensor, idx1),
idx2)`. The writing path can be summarized as `strided_slice_update(tensor,
idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here
means `tf.gather` or `tf.gather_nd`; `scatter` here means
`tf.tensor_scatter_update`.) The writing path is inefficient because it needs
to first read out a portion (probably much larger than `updates`) of `tensor`
using `strided_slice`, update it, and then write the portion back. An
alternative approach is to only use `scatter`, which amounts to using the
indexing mechanism of gather/scatter to implement
strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter
because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but
not TF gather/scatter because they don't support spans (except those that
cover entire dimensions, i.e. `:`). If we materialize spans into individual
indices, the size of the index tensor would explode. (Note that XLA
Gather/Scatter have a similar problem for stride > 1 because they don't
support strides. Indices such as `1:2:8` will need to be materialized into
individual indices such as [1, 3, 5, 7].)
Args:
tensor: the tensor to be read from or write into.
slice_spec: the indices.
update_method: (optional) a member of `_UpdateMethod`, indicating how to
update the values (replacement, add, etc.). `None` indicates just reading.
updates: (optional) the new values to write into `tensor`. It must have the
same dtype as `tensor`.
Returns:
The result of reading (if `update_method` is `None`) or the updated `tensor`
after writing.
"""
begin, end, strides = [], [], []
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
advanced_indices = []
shrink_indices = []
for index, s in enumerate(slice_spec):
if isinstance(s, slice):
if s.start is not None:
begin.append(_as_index(s.start)[0])
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None:
end.append(_as_index(s.stop)[0])
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
strides.append(_as_index(s.step)[0])
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is array_ops.newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
s, is_scalar = _as_index(s, False)
if is_scalar:
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
shrink_indices.append(index)
else:
begin.append(0)
end.append(0)
strides.append(1)
begin_mask |= (1 << index)
end_mask |= (1 << index)
advanced_indices.append((index, s, ellipsis_mask != 0))
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
'strided_slice', [tensor] + begin + end + strides,
skip_on_eager=False) as name:
if begin:
packed_begin, packed_end, packed_strides = (array_ops.stack(begin),
array_ops.stack(end),
array_ops.stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant_op.constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
if update_method == _UpdateMethod.UPDATE and not advanced_indices:
return array_ops.tensor_strided_slice_update(
tensor,
packed_begin,
packed_end,
packed_strides,
updates,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
else:
# TODO(b/164251540): Find a better way to support update that does not
# involve one read + two writes.
if updates is not None:
original_tensor = tensor
# TODO(agarwal): set_shape on tensor to set rank.
tensor = array_ops.strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
if not advanced_indices:
if update_method is None:
return tensor
assert update_method != _UpdateMethod.UPDATE
# TF lacks TensorStridedSliceAdd and alike, so we need to do
# read+add+update.
if update_method == _UpdateMethod.ADD:
update_op = math_ops.add
elif update_method == _UpdateMethod.MIN:
update_op = math_ops.minimum
elif update_method == _UpdateMethod.MAX:
update_op = math_ops.maximum
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
update_op(tensor, updates),
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2')
advanced_indices_map = {}
for index, data, had_ellipsis in advanced_indices:
if had_ellipsis:
num_shrink = len([x for x in shrink_indices if x > index])
dim = index - len(slice_spec) + num_shrink
else:
num_shrink = len([x for x in shrink_indices if x < index])
dim = index - num_shrink
advanced_indices_map[dim] = data
dims = sorted(advanced_indices_map.keys())
dims_contiguous = True
if len(dims) > 1:
if dims[0] < 0 and dims[-1] >= 0: # not all same sign
dims_contiguous = False
else:
for i in range(len(dims) - 1):
if dims[i] + 1 != dims[i + 1]:
dims_contiguous = False
break
indices = [advanced_indices_map[x] for x in dims]
indices = _promote_dtype(*indices)
indices = np_utils.tf_broadcast(*indices)
stacked_indices = array_ops.stack(indices, axis=-1)
# Skip the contiguous-dims optimization for update because there is no
# tf.*scatter* op that supports the `axis` argument.
if not dims_contiguous or updates is not None:
if range(len(dims)) != dims:
tensor = moveaxis(tensor, dims, range(len(dims)))
tensor_shape_prefix = array_ops.shape(
tensor, out_type=stacked_indices.dtype)[:len(dims)]
stacked_indices = array_ops.where_v2(
stacked_indices < 0, stacked_indices + tensor_shape_prefix,
stacked_indices)
if updates is None:
return array_ops.gather_nd(tensor, stacked_indices)
else:
# We only need to move-axis `updates` in the contiguous case becausce
# only in this case the result dimensions of advanced indexing are in
# the middle of `updates`. In the non-contiguous case, those dimensions
# are always at the front.
if dims_contiguous:
# TODO(wangpeng): Support unknown rank (e.g. by partially flattening
# `updates`)
if stacked_indices.shape.rank is None:
raise NotImplementedError(
'Rank of the advanced indices must currently be known')
batch_size = stacked_indices.shape.rank - 1
batch_start = dims[0]
if batch_start < 0:
batch_start += len(dims) - batch_size
def range_(start, length):
return range(start, start + length)
updates = moveaxis(updates, range_(batch_start, batch_size),
range(batch_size))
if update_method == _UpdateMethod.UPDATE:
update_op = array_ops.tensor_scatter_update<|fim▁hole|> elif update_method == _UpdateMethod.ADD:
update_op = array_ops.tensor_scatter_add
elif update_method == _UpdateMethod.MIN:
update_op = array_ops.tensor_scatter_min
elif update_method == _UpdateMethod.MAX:
update_op = array_ops.tensor_scatter_max
tensor = update_op(
tensor, stacked_indices, updates)
if range(len(dims)) != dims:
tensor = moveaxis(tensor, range(len(dims)), dims)
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
tensor,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2')
# Note that gather_nd does not support gathering from inside the array.
# To avoid shuffling data back and forth, we transform the indices and
# do a gather instead.
rank = np_utils._maybe_static(array_ops.rank(tensor)) # pylint: disable=protected-access
dims = [(x + rank if x < 0 else x) for x in dims]
shape_tensor = array_ops.shape(tensor)
dim_sizes = array_ops.gather(shape_tensor, dims)
if len(dims) == 1:
stacked_indices = indices[0]
stacked_indices = math_ops.cast(stacked_indices, dtypes.int32)
stacked_indices = array_ops.where_v2(stacked_indices < 0,
stacked_indices + dim_sizes,
stacked_indices)
axis = dims[0]
if len(dims) > 1:
index_scaling = math_ops.cumprod(
dim_sizes, reverse=True, exclusive=True)
def _tensordot(a, b):
# TODO(b/168657656): This function should be replaced by
# tensordot(axis=1) once MatMul has int32 XLA kernel.
b = array_ops.broadcast_to(b, array_ops.shape(a))
return math_ops.reduce_sum(a * b, axis=-1)
stacked_indices = _tensordot(stacked_indices, index_scaling)
flat_shape = array_ops.concat(
[shape_tensor[:axis], [-1], shape_tensor[axis + len(dims):]],
axis=0)
tensor = array_ops.reshape(tensor, flat_shape)
return array_ops.gather(tensor, stacked_indices, axis=axis)
def _as_spec_tuple(slice_spec):
"""Convert slice_spec to tuple."""
if isinstance(slice_spec,
(list, tuple)) and not isinstance(slice_spec, np.ndarray):
is_index = True
for s in slice_spec:
if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)):
is_index = False
break
elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0:
is_index = False
break
if not is_index:
return tuple(slice_spec)
return (slice_spec,)
def _getitem(self, slice_spec):
"""Implementation of ndarray.__getitem__."""
if (isinstance(slice_spec, bool) or (isinstance(slice_spec, ops.Tensor) and
slice_spec.dtype == dtypes.bool) or
(isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and
slice_spec.dtype == np.bool_)):
return array_ops.boolean_mask(tensor=self, mask=slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
result_t = _slice_helper(self, slice_spec)
return result_t
def _with_index_update_helper(update_method, a, slice_spec, updates):
"""Implementation of ndarray._with_index_*."""
if (isinstance(slice_spec, bool) or (isinstance(slice_spec, ops.Tensor) and
slice_spec.dtype == dtypes.bool) or
(isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and
slice_spec.dtype == np.bool_)):
slice_spec = nonzero(slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
a_dtype = a.dtype
a, updates = _promote_dtype_binary(a, updates)
result_t = _slice_helper(a, slice_spec, update_method, updates)
return result_t.astype(a_dtype)
setattr(np_arrays.ndarray, '_numpy_style_getitem', _getitem)
setattr(np_arrays.ndarray, '_with_index_update',
functools.partial(_with_index_update_helper, _UpdateMethod.UPDATE))
setattr(np_arrays.ndarray, '_with_index_add',
functools.partial(_with_index_update_helper, _UpdateMethod.ADD))
setattr(np_arrays.ndarray, '_with_index_min',
functools.partial(_with_index_update_helper, _UpdateMethod.MIN))
setattr(np_arrays.ndarray, '_with_index_max',
functools.partial(_with_index_update_helper, _UpdateMethod.MAX))<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from base import ChoicesEnum<|fim▁hole|>from _version import __version__<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.