file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
Add.py | import os
import libxml2
from sfatables.command import Command
from sfatables.globals import sfatables_config, target_dir, match_dir
class Add(Command):
def __init__(self):
self.options = [('-A','--add')]
self.help = 'Add a rule to a chain'
self.matches = True
self.targets = True
return
def getnextfilename(self,type,chain):
dir = sfatables_config + "/"+chain;
last_rule_number = 0
for (root, dirs, files) in os.walk(dir):
for file in files:
if (file.startswith('sfatables-') and file.endswith(type)):
number_str = file.split('-')[1]
number = int(number_str)
if (number>last_rule_number):
last_rule_number = number
return "sfatables-%d-%s"%(last_rule_number+1,type)
def call_gen(self, chain, type, dir, options):
|
def call(self, command_options, match_options, target_options):
chain = command_options.args[0]
ret = self.call_gen(chain, 'match',match_dir, match_options)
if (ret):
ret = self.call_gen(chain, 'target',target_dir, target_options)
return ret
| filename = os.path.join(dir, options.name+".xml")
xmldoc = libxml2.parseFile(filename)
p = xmldoc.xpathNewContext()
supplied_arguments = options.arguments
if (hasattr(options,'element') and options.element):
element = options.element
else:
element='*'
for option in supplied_arguments:
option_name = option['name']
option_value = getattr(options,option_name)
if (hasattr(options,option_name) and getattr(options,option_name)):
context = p.xpathEval("//rule[@element='%s' or @element='*']/argument[name='%s']"%(element, option_name))
if (not context):
raise Exception('Unknown option %s for match %s and element %s'%(option,option['name'], element))
else:
# Add the value of option
valueNode = libxml2.newNode('value')
valueNode.addContent(option_value)
context[0].addChild(valueNode)
filename = self.getnextfilename(type,chain)
file_path = os.path.join(sfatables_config, chain, filename)
if not os.path.isdir(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
xmldoc.saveFile(file_path)
p.xpathFreeContext()
xmldoc.freeDoc()
return True | identifier_body |
Add.py | import os
import libxml2
from sfatables.command import Command
from sfatables.globals import sfatables_config, target_dir, match_dir
class | (Command):
def __init__(self):
self.options = [('-A','--add')]
self.help = 'Add a rule to a chain'
self.matches = True
self.targets = True
return
def getnextfilename(self,type,chain):
dir = sfatables_config + "/"+chain;
last_rule_number = 0
for (root, dirs, files) in os.walk(dir):
for file in files:
if (file.startswith('sfatables-') and file.endswith(type)):
number_str = file.split('-')[1]
number = int(number_str)
if (number>last_rule_number):
last_rule_number = number
return "sfatables-%d-%s"%(last_rule_number+1,type)
def call_gen(self, chain, type, dir, options):
filename = os.path.join(dir, options.name+".xml")
xmldoc = libxml2.parseFile(filename)
p = xmldoc.xpathNewContext()
supplied_arguments = options.arguments
if (hasattr(options,'element') and options.element):
element = options.element
else:
element='*'
for option in supplied_arguments:
option_name = option['name']
option_value = getattr(options,option_name)
if (hasattr(options,option_name) and getattr(options,option_name)):
context = p.xpathEval("//rule[@element='%s' or @element='*']/argument[name='%s']"%(element, option_name))
if (not context):
raise Exception('Unknown option %s for match %s and element %s'%(option,option['name'], element))
else:
# Add the value of option
valueNode = libxml2.newNode('value')
valueNode.addContent(option_value)
context[0].addChild(valueNode)
filename = self.getnextfilename(type,chain)
file_path = os.path.join(sfatables_config, chain, filename)
if not os.path.isdir(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
xmldoc.saveFile(file_path)
p.xpathFreeContext()
xmldoc.freeDoc()
return True
def call(self, command_options, match_options, target_options):
chain = command_options.args[0]
ret = self.call_gen(chain, 'match',match_dir, match_options)
if (ret):
ret = self.call_gen(chain, 'target',target_dir, target_options)
return ret
| Add | identifier_name |
Add.py | import os
import libxml2
from sfatables.command import Command
from sfatables.globals import sfatables_config, target_dir, match_dir
class Add(Command):
def __init__(self):
self.options = [('-A','--add')]
self.help = 'Add a rule to a chain'
self.matches = True
self.targets = True
return
def getnextfilename(self,type,chain):
dir = sfatables_config + "/"+chain;
last_rule_number = 0
for (root, dirs, files) in os.walk(dir):
for file in files:
if (file.startswith('sfatables-') and file.endswith(type)):
number_str = file.split('-')[1]
number = int(number_str)
if (number>last_rule_number):
last_rule_number = number
return "sfatables-%d-%s"%(last_rule_number+1,type)
def call_gen(self, chain, type, dir, options):
filename = os.path.join(dir, options.name+".xml")
xmldoc = libxml2.parseFile(filename)
p = xmldoc.xpathNewContext()
supplied_arguments = options.arguments |
for option in supplied_arguments:
option_name = option['name']
option_value = getattr(options,option_name)
if (hasattr(options,option_name) and getattr(options,option_name)):
context = p.xpathEval("//rule[@element='%s' or @element='*']/argument[name='%s']"%(element, option_name))
if (not context):
raise Exception('Unknown option %s for match %s and element %s'%(option,option['name'], element))
else:
# Add the value of option
valueNode = libxml2.newNode('value')
valueNode.addContent(option_value)
context[0].addChild(valueNode)
filename = self.getnextfilename(type,chain)
file_path = os.path.join(sfatables_config, chain, filename)
if not os.path.isdir(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
xmldoc.saveFile(file_path)
p.xpathFreeContext()
xmldoc.freeDoc()
return True
def call(self, command_options, match_options, target_options):
chain = command_options.args[0]
ret = self.call_gen(chain, 'match',match_dir, match_options)
if (ret):
ret = self.call_gen(chain, 'target',target_dir, target_options)
return ret | if (hasattr(options,'element') and options.element):
element = options.element
else:
element='*' | random_line_split |
Add.py | import os
import libxml2
from sfatables.command import Command
from sfatables.globals import sfatables_config, target_dir, match_dir
class Add(Command):
def __init__(self):
self.options = [('-A','--add')]
self.help = 'Add a rule to a chain'
self.matches = True
self.targets = True
return
def getnextfilename(self,type,chain):
dir = sfatables_config + "/"+chain;
last_rule_number = 0
for (root, dirs, files) in os.walk(dir):
for file in files:
if (file.startswith('sfatables-') and file.endswith(type)):
number_str = file.split('-')[1]
number = int(number_str)
if (number>last_rule_number):
last_rule_number = number
return "sfatables-%d-%s"%(last_rule_number+1,type)
def call_gen(self, chain, type, dir, options):
filename = os.path.join(dir, options.name+".xml")
xmldoc = libxml2.parseFile(filename)
p = xmldoc.xpathNewContext()
supplied_arguments = options.arguments
if (hasattr(options,'element') and options.element):
element = options.element
else:
element='*'
for option in supplied_arguments:
option_name = option['name']
option_value = getattr(options,option_name)
if (hasattr(options,option_name) and getattr(options,option_name)):
|
filename = self.getnextfilename(type,chain)
file_path = os.path.join(sfatables_config, chain, filename)
if not os.path.isdir(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
xmldoc.saveFile(file_path)
p.xpathFreeContext()
xmldoc.freeDoc()
return True
def call(self, command_options, match_options, target_options):
chain = command_options.args[0]
ret = self.call_gen(chain, 'match',match_dir, match_options)
if (ret):
ret = self.call_gen(chain, 'target',target_dir, target_options)
return ret
| context = p.xpathEval("//rule[@element='%s' or @element='*']/argument[name='%s']"%(element, option_name))
if (not context):
raise Exception('Unknown option %s for match %s and element %s'%(option,option['name'], element))
else:
# Add the value of option
valueNode = libxml2.newNode('value')
valueNode.addContent(option_value)
context[0].addChild(valueNode) | conditional_block |
filesystem.ts | /*
* Copyright (C) 2017 TypeFox and others.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*/
import { Disposable } from '@theia/core/lib/common';
export const fileSystemPath = '/services/filesystem';
export const FileSystem = Symbol("FileSystem");
export interface FileSystem extends Disposable {
/**
* Returns the filestat for the given uri.
*
* If the uri points to a folder it will contain one level of unresolved children.
*
* Reject if a file for the given uri does not exist.
*/
getFileStat(uri: string): Promise<FileStat>;
/**
* Finds out if a file identified by the resource exists.
*/
exists(uri: string): Promise<boolean>;
/**
* Resolve the contents of a file identified by the resource.
*/
resolveContent(uri: string, options?: { encoding?: string }): Promise<{ stat: FileStat, content: string }>;
/**
* Updates the content replacing its previous value.
*/
setContent(file: FileStat, content: string, options?: { encoding?: string }): Promise<FileStat>;
/**
* Moves the file to a new path identified by the resource.
*
* The optional parameter overwrite can be set to replace an existing file at the location.
*
* | | missing | file | empty dir | dir |
* |-----------|---------|------|-----------|-----------|
* | missing | x | x | x | x |
* | file | ✓ | x | x | x |
* | empty dir | ✓ | x | x | overwrite |
* | dir | ✓ | x | overwrite | overwrite |
*
*/
move(sourceUri: string, targetUri: string, options?: { overwrite?: boolean }): Promise<FileStat>;
/**
* Copies the file to a path identified by the resource.
*
* The optional parameter overwrite can be set to replace an existing file at the location.
*/
copy(sourceUri: string, targetUri: string, options?: { overwrite?: boolean, recursive?: boolean }): Promise<FileStat>;
/**
* Creates a new file with the given path. The returned promise
* will have the stat model object as a result.
*
* The optional parameter content can be used as value to fill into the new file.
*/
createFile(uri: string, options?: { content?: string, encoding?: string }): Promise<FileStat>;
/**
* Creates a new folder with the given path. The returned promise
* will have the stat model object as a result.
*/
createFolder(uri: string): Promise<FileStat>;
/**
* Creates a new empty file if the given path does not exist and otherwise
* will set the mtime and atime of the file to the current date.
*/
touchFile(uri: string): Promise<FileStat>;
/** | * move the file to trash.
*/
delete(uri: string, options?: { moveToTrash?: boolean }): Promise<void>;
/**
* Returns the encoding of the given file resource.
*/
getEncoding(uri: string): Promise<string>;
/**
* Return list of available roots.
*/
getRoots(): Promise<FileStat[]>;
}
/**
* A file resource with meta information.
*/
export interface FileStat {
/**
* The uri of the file.
*/
uri: string;
/**
* The last modification of this file.
*/
lastModification: number;
/**
* The resource is a directory. Iff {{true}}
* {{encoding}} has no meaning.
*/
isDirectory: boolean;
/**
* Return {{true}} when this is a directory
* that is not empty.
*/
hasChildren?: boolean;
/**
* The children of the file stat.
* If it is undefined and isDirectory is true, then this file stat is unresolved.
*/
children?: FileStat[];
/**
* The size of the file if known.
*/
size?: number;
} | * Deletes the provided file. The optional moveToTrash parameter allows to | random_line_split |
limit.js | 'use strict';
var ms = require('ms');
var SimpleStrategy = require('./simple.js');
module.exports = LimitStrategy;
function LimitStrategy(provider, options) {
SimpleStrategy.call(this, provider);
this.max = options.max || Infinity;
this.min = options.min || 0;
for (var i = 0; i < this.min; i++) {
this.expand();
}
var idleTime = options.idleTime;
var lowWaterMark = this.lowWaterMark || 0;
if (idleTime && idleTime !== Infinity) |
}
LimitStrategy.prototype = Object.create(SimpleStrategy.prototype);
LimitStrategy.prototype.constructor = LimitStrategy;
/**
* Only allow expanding if the pool size has not hit the maximum
*/
LimitStrategy.prototype.expand = function () {
if (this.poolSize < this.max) {
return SimpleStrategy.prototype.expand.call(this);
}
};
/**
* Only allow shrinking if the pool size is above the minimum or it is destroyed
*/
LimitStrategy.prototype.shrink = function () {
if (this.poolSize > this.min || this.destroyed) {
return SimpleStrategy.prototype.shrink.call(this);
}
};
| {
var self = this;
idleTime = ms(idleTime.toString());
var timeout;
var tryShrink = function () {
if (self.pool.length) {
self.shrink();
}
if (self.pool.length) {
timeout = setTimeout(tryShrink, idleTime);
}
}
this.on('begin-transaction', function () {
if (self.pool.length <= lowWaterMark) {
clearTimeout(timeout);
}
});
this.on('end-transaction', function () {
timeout = setTimeout(tryShrink, idleTime);
});
} | conditional_block |
limit.js | 'use strict';
var ms = require('ms');
var SimpleStrategy = require('./simple.js');
module.exports = LimitStrategy;
function | (provider, options) {
SimpleStrategy.call(this, provider);
this.max = options.max || Infinity;
this.min = options.min || 0;
for (var i = 0; i < this.min; i++) {
this.expand();
}
var idleTime = options.idleTime;
var lowWaterMark = this.lowWaterMark || 0;
if (idleTime && idleTime !== Infinity) {
var self = this;
idleTime = ms(idleTime.toString());
var timeout;
var tryShrink = function () {
if (self.pool.length) {
self.shrink();
}
if (self.pool.length) {
timeout = setTimeout(tryShrink, idleTime);
}
}
this.on('begin-transaction', function () {
if (self.pool.length <= lowWaterMark) {
clearTimeout(timeout);
}
});
this.on('end-transaction', function () {
timeout = setTimeout(tryShrink, idleTime);
});
}
}
LimitStrategy.prototype = Object.create(SimpleStrategy.prototype);
LimitStrategy.prototype.constructor = LimitStrategy;
/**
* Only allow expanding if the pool size has not hit the maximum
*/
LimitStrategy.prototype.expand = function () {
if (this.poolSize < this.max) {
return SimpleStrategy.prototype.expand.call(this);
}
};
/**
* Only allow shrinking if the pool size is above the minimum or it is destroyed
*/
LimitStrategy.prototype.shrink = function () {
if (this.poolSize > this.min || this.destroyed) {
return SimpleStrategy.prototype.shrink.call(this);
}
};
| LimitStrategy | identifier_name |
limit.js | 'use strict';
var ms = require('ms');
var SimpleStrategy = require('./simple.js');
module.exports = LimitStrategy;
function LimitStrategy(provider, options) {
SimpleStrategy.call(this, provider);
this.max = options.max || Infinity;
this.min = options.min || 0;
for (var i = 0; i < this.min; i++) {
this.expand();
}
var idleTime = options.idleTime;
var lowWaterMark = this.lowWaterMark || 0;
if (idleTime && idleTime !== Infinity) {
var self = this;
idleTime = ms(idleTime.toString());
var timeout;
var tryShrink = function () {
if (self.pool.length) {
self.shrink();
}
if (self.pool.length) {
timeout = setTimeout(tryShrink, idleTime);
}
}
this.on('begin-transaction', function () {
if (self.pool.length <= lowWaterMark) {
clearTimeout(timeout);
}
});
this.on('end-transaction', function () {
timeout = setTimeout(tryShrink, idleTime);
});
} |
/**
* Only allow expanding if the pool size has not hit the maximum
*/
LimitStrategy.prototype.expand = function () {
if (this.poolSize < this.max) {
return SimpleStrategy.prototype.expand.call(this);
}
};
/**
* Only allow shrinking if the pool size is above the minimum or it is destroyed
*/
LimitStrategy.prototype.shrink = function () {
if (this.poolSize > this.min || this.destroyed) {
return SimpleStrategy.prototype.shrink.call(this);
}
}; | }
LimitStrategy.prototype = Object.create(SimpleStrategy.prototype);
LimitStrategy.prototype.constructor = LimitStrategy; | random_line_split |
limit.js | 'use strict';
var ms = require('ms');
var SimpleStrategy = require('./simple.js');
module.exports = LimitStrategy;
function LimitStrategy(provider, options) |
LimitStrategy.prototype = Object.create(SimpleStrategy.prototype);
LimitStrategy.prototype.constructor = LimitStrategy;
/**
* Only allow expanding if the pool size has not hit the maximum
*/
LimitStrategy.prototype.expand = function () {
if (this.poolSize < this.max) {
return SimpleStrategy.prototype.expand.call(this);
}
};
/**
* Only allow shrinking if the pool size is above the minimum or it is destroyed
*/
LimitStrategy.prototype.shrink = function () {
if (this.poolSize > this.min || this.destroyed) {
return SimpleStrategy.prototype.shrink.call(this);
}
};
| {
SimpleStrategy.call(this, provider);
this.max = options.max || Infinity;
this.min = options.min || 0;
for (var i = 0; i < this.min; i++) {
this.expand();
}
var idleTime = options.idleTime;
var lowWaterMark = this.lowWaterMark || 0;
if (idleTime && idleTime !== Infinity) {
var self = this;
idleTime = ms(idleTime.toString());
var timeout;
var tryShrink = function () {
if (self.pool.length) {
self.shrink();
}
if (self.pool.length) {
timeout = setTimeout(tryShrink, idleTime);
}
}
this.on('begin-transaction', function () {
if (self.pool.length <= lowWaterMark) {
clearTimeout(timeout);
}
});
this.on('end-transaction', function () {
timeout = setTimeout(tryShrink, idleTime);
});
}
} | identifier_body |
os_release.rs | use std::fs;
pub enum OsReleaseId {
Amazon,
CentOs,
Debian,
Ubuntu,
}
const OS_RELEASE_PATH: &str = "/etc/os-release";
impl OsReleaseId {
fn from_os_release_str(s: &str) -> Option<Self> {
let id_line = s.lines().find(|l| l.starts_with("ID="))?; | let id = id_line.trim_start_matches("ID=").trim_matches('"');
match id {
"amzn" => Some(OsReleaseId::Amazon),
"centos" => Some(OsReleaseId::CentOs),
"debian" => Some(OsReleaseId::Debian),
"ubuntu" => Some(OsReleaseId::Ubuntu),
_ => None,
}
}
pub fn parse_os_release() -> Option<Self> {
fs::read_to_string(OS_RELEASE_PATH)
.ok()
.as_deref()
.and_then(Self::from_os_release_str)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_from_os_release() {
let actual =
OsReleaseId::from_os_release_str(include_str!("os-release-data/amazonlinux-2"));
assert!(matches!(actual, Some(OsReleaseId::Amazon)));
let actual = OsReleaseId::from_os_release_str(include_str!("os-release-data/centos-7.8"));
assert!(matches!(actual, Some(OsReleaseId::CentOs)));
let actual = OsReleaseId::from_os_release_str(include_str!("os-release-data/debian-8"));
assert!(matches!(actual, Some(OsReleaseId::Debian)));
let actual = OsReleaseId::from_os_release_str(include_str!("os-release-data/ubuntu-14.04"));
assert!(matches!(actual, Some(OsReleaseId::Ubuntu)));
}
} | random_line_split | |
os_release.rs | use std::fs;
pub enum OsReleaseId {
Amazon,
CentOs,
Debian,
Ubuntu,
}
const OS_RELEASE_PATH: &str = "/etc/os-release";
impl OsReleaseId {
fn from_os_release_str(s: &str) -> Option<Self> {
let id_line = s.lines().find(|l| l.starts_with("ID="))?;
let id = id_line.trim_start_matches("ID=").trim_matches('"');
match id {
"amzn" => Some(OsReleaseId::Amazon),
"centos" => Some(OsReleaseId::CentOs),
"debian" => Some(OsReleaseId::Debian),
"ubuntu" => Some(OsReleaseId::Ubuntu),
_ => None,
}
}
pub fn | () -> Option<Self> {
fs::read_to_string(OS_RELEASE_PATH)
.ok()
.as_deref()
.and_then(Self::from_os_release_str)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_from_os_release() {
let actual =
OsReleaseId::from_os_release_str(include_str!("os-release-data/amazonlinux-2"));
assert!(matches!(actual, Some(OsReleaseId::Amazon)));
let actual = OsReleaseId::from_os_release_str(include_str!("os-release-data/centos-7.8"));
assert!(matches!(actual, Some(OsReleaseId::CentOs)));
let actual = OsReleaseId::from_os_release_str(include_str!("os-release-data/debian-8"));
assert!(matches!(actual, Some(OsReleaseId::Debian)));
let actual = OsReleaseId::from_os_release_str(include_str!("os-release-data/ubuntu-14.04"));
assert!(matches!(actual, Some(OsReleaseId::Ubuntu)));
}
}
| parse_os_release | identifier_name |
pending.ts | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | //
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export const pending = (): PendingTracker => {
// tslint:disable-next-line:no-any
const active: {[k: string]: Promise<any>} = {};
let inc = 0;
return {
// so you can Promise.all() these active promises
active: () => {
return Object.values(active);
},
track: <T>(p: Promise<T>): Promise<T> => {
if (inc + 1 === inc) inc = -1;
active[inc++] = p;
const _inc = inc;
if (p.finally) {
p.finally(() => {
delete active[_inc];
});
return p;
}
// tslint:disable-next-line:no-any
let resolve: any, reject: any;
const proxy = new Promise((res, rej) => {
resolve = res;
reject = rej;
});
p.then((value) => {
delete active[_inc];
resolve(value);
}).catch((e) => {
delete active[_inc];
reject(e);
});
return proxy as Promise<T>;
}
};
};
export type PendingTracker = {
// tslint:disable-next-line:no-any
active: () => Array<Promise<any>>,
track: <T>(p: Promise<T>) => Promise<T>
}; | // You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
moin_migration_cleanup.py | import re
from waliki.signals import page_saved
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from waliki.models import Page
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_text_list
try:
from waliki.attachments.models import Attachment
except ImportError:
Attachment = None
try:
from sh import pandoc, echo
pandoc = pandoc.bake(_tty_out=False)
echo = echo.bake(_tty_out=False)
except ImportError:
pandoc = None
def clean_meta(rst_content):
"""remove moinmoin metada from the top of the file"""
rst = rst_content.split('\n')
for i, line in enumerate(rst):
if line.startswith('#'):
continue
break
return '\n'.join(rst[i:])
def delete_relative_links(rst_content):
"""remove links relatives. Waliki point them correctly implicitly"""
return re.sub(r'^(\.\. .*: \.\./.*)\n$', '', rst_content, flags=re.MULTILINE)
def attachments(rst_content, slug):
def rep(matchobj):
for filename in matchobj.groups(1):
try:
a = Attachment.objects.filter(file__endswith=filename, page__slug=slug)[0]
except IndexError:
print('Cant find %s in %s' % (filename, slug))
return None
return '`%s <%s>`_' % (filename, a.get_absolute_url())
return re.sub(r'`attachment:(.*)`_', rep, rst_content, flags=re.MULTILINE)
def directives(rst_content):
for directive in re.findall(r':(\w+):`.*`', rst_content, flags=re.MULTILINE):
rst_content += """
.. role:: {directive}
:class: {directive}
""".format(directive=directive)
return rst_content
def emojis(rst_content):
# require
emojis_map = {
':)': 'smile',
':-)': 'smile',
';)': 'wink',
';-)': 'wink',
':-?': 'smirk',
':?': 'smirk',
':(': 'confused',
':-(': 'confused',
':D': 'laughing',
':-D': 'laughing',
':-P': 'stuck_out_tongue_closed_eyes',
':P': 'stuck_out_tongue_closed_eyes',
":'(": 'cry',
":'-(": 'cry',
}
def replace_emoji(match):
replacement = emojis_map.get(match.groups()[0], '')
if replacement:
return '|%s|' % replacement
return ''
result = re.sub(r'\|((?:\:|;).{1,3})\|', replace_emoji, rst_content, flags=re.MULTILINE)
return result
def email(rst_content):
pattern = r'`\[\[MailTo\((.*)\)\]\]`_(?:\.\.)?'
return re.sub(pattern, r'``\1``', rst_content)
def title_level(rst_content):
def dashrepl(matchobj):
return '-' * len(matchobj.group(0))
pattern = r'^~+$'
return re.sub(pattern, dashrepl, rst_content, flags=re.MULTILINE)
def code(rst_content):
if not pandoc:
return rst_content
pattern = r'^\:\:\n\s+\.\. raw:: html\n\s+(<span class\=\"line\"\>.*?|\s+?<\/span\>)\n\s*$'
def convert(match):
source = match.groups()[0]
source = '\n'.join(l.strip() for l in source.split('\n'))
source = "<pre>%s</pre>" % source
rst_source = pandoc(echo(source), f='html', t='rst').stdout.decode('utf8')
# rst_source = rst_source.strip().replace('\n', '\n ') + '\n'
return rst_source
result = re.sub(pattern, convert, rst_content, flags=re.DOTALL | re.MULTILINE)
return result
class Command(BaseCommand):
help = 'Cleanup filters for a moin2git import'
option_list = (
make_option('--limit-to',
dest='slug',
default='',
help="optional namespace"),
make_option('--filters',
dest='filters',
default='all',
help="comma separated list of filter functions to apply"),
make_option('--message',
dest='message',
default=_("RestructuredText clean up"),
help="log message"),
) + BaseCommand.option_list
def handle(self, *args, **options):
valid_filters = ['meta', 'links',
'attachments', 'directives',
'emojis', 'title', 'email', 'code', 'title_level']
slug = options['slug']
filters = options['filters']
if filters == 'all':
filters = valid_filters
else:
filters = [f.strip() for f in filters.split(',')]
if not set(filters).issubset(valid_filters):
valid = get_text_list(valid_filters, 'and')
raise CommandError("At least one filter is unknown. Valid filters are:\n %s" % valid)
if slug:
pages = Page.objects.filter(slug__startswith=slug)
else:
pages = Page.objects.all()
for page in pages:
title = None
print('\nApplying filter/s %s to %s' % (get_text_list(filters, 'and'), page.slug))
raw = page.raw
if 'meta' in filters:
raw = clean_meta(raw)
if 'links' in filters:
raw = delete_relative_links(raw)
if 'attachments' in filters:
raw = attachments(raw, page.slug)
if 'directives' in filters:
raw = directives(raw)
if 'emojis' in filters:
raw = emojis(raw)
if 'email' in filters:
raw = email(raw)
if 'title_level' in filters:
raw = title_level(raw)
if 'code' in filters:
|
if 'title' in filters and not page.title:
title = page._get_part('get_document_title')
if raw != page.raw or title:
if title:
page.title = title
if raw != page.raw:
page.raw = raw
page.save()
page_saved.send_robust(sender='moin',
page=page,
author=None,
message=options['message'],
form_extra_data={})
else:
print('Nothing changed. Ignoring update')
| if not pandoc:
print('The filter "code" need Pandoc installed in your system. Ignoring')
else:
raw = code(raw) | conditional_block |
moin_migration_cleanup.py | import re
from waliki.signals import page_saved
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from waliki.models import Page
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_text_list
try:
from waliki.attachments.models import Attachment
except ImportError:
Attachment = None
try:
from sh import pandoc, echo
pandoc = pandoc.bake(_tty_out=False)
echo = echo.bake(_tty_out=False)
except ImportError:
pandoc = None
def clean_meta(rst_content):
"""remove moinmoin metada from the top of the file"""
rst = rst_content.split('\n')
for i, line in enumerate(rst):
if line.startswith('#'):
continue
break
return '\n'.join(rst[i:])
def delete_relative_links(rst_content):
"""remove links relatives. Waliki point them correctly implicitly"""
return re.sub(r'^(\.\. .*: \.\./.*)\n$', '', rst_content, flags=re.MULTILINE)
def attachments(rst_content, slug):
def rep(matchobj):
for filename in matchobj.groups(1):
try:
a = Attachment.objects.filter(file__endswith=filename, page__slug=slug)[0]
except IndexError:
print('Cant find %s in %s' % (filename, slug))
return None
return '`%s <%s>`_' % (filename, a.get_absolute_url())
return re.sub(r'`attachment:(.*)`_', rep, rst_content, flags=re.MULTILINE)
def directives(rst_content):
for directive in re.findall(r':(\w+):`.*`', rst_content, flags=re.MULTILINE):
rst_content += """
.. role:: {directive}
:class: {directive}
""".format(directive=directive)
return rst_content
def | (rst_content):
# require
emojis_map = {
':)': 'smile',
':-)': 'smile',
';)': 'wink',
';-)': 'wink',
':-?': 'smirk',
':?': 'smirk',
':(': 'confused',
':-(': 'confused',
':D': 'laughing',
':-D': 'laughing',
':-P': 'stuck_out_tongue_closed_eyes',
':P': 'stuck_out_tongue_closed_eyes',
":'(": 'cry',
":'-(": 'cry',
}
def replace_emoji(match):
replacement = emojis_map.get(match.groups()[0], '')
if replacement:
return '|%s|' % replacement
return ''
result = re.sub(r'\|((?:\:|;).{1,3})\|', replace_emoji, rst_content, flags=re.MULTILINE)
return result
def email(rst_content):
pattern = r'`\[\[MailTo\((.*)\)\]\]`_(?:\.\.)?'
return re.sub(pattern, r'``\1``', rst_content)
def title_level(rst_content):
def dashrepl(matchobj):
return '-' * len(matchobj.group(0))
pattern = r'^~+$'
return re.sub(pattern, dashrepl, rst_content, flags=re.MULTILINE)
def code(rst_content):
if not pandoc:
return rst_content
pattern = r'^\:\:\n\s+\.\. raw:: html\n\s+(<span class\=\"line\"\>.*?|\s+?<\/span\>)\n\s*$'
def convert(match):
source = match.groups()[0]
source = '\n'.join(l.strip() for l in source.split('\n'))
source = "<pre>%s</pre>" % source
rst_source = pandoc(echo(source), f='html', t='rst').stdout.decode('utf8')
# rst_source = rst_source.strip().replace('\n', '\n ') + '\n'
return rst_source
result = re.sub(pattern, convert, rst_content, flags=re.DOTALL | re.MULTILINE)
return result
class Command(BaseCommand):
help = 'Cleanup filters for a moin2git import'
option_list = (
make_option('--limit-to',
dest='slug',
default='',
help="optional namespace"),
make_option('--filters',
dest='filters',
default='all',
help="comma separated list of filter functions to apply"),
make_option('--message',
dest='message',
default=_("RestructuredText clean up"),
help="log message"),
) + BaseCommand.option_list
def handle(self, *args, **options):
valid_filters = ['meta', 'links',
'attachments', 'directives',
'emojis', 'title', 'email', 'code', 'title_level']
slug = options['slug']
filters = options['filters']
if filters == 'all':
filters = valid_filters
else:
filters = [f.strip() for f in filters.split(',')]
if not set(filters).issubset(valid_filters):
valid = get_text_list(valid_filters, 'and')
raise CommandError("At least one filter is unknown. Valid filters are:\n %s" % valid)
if slug:
pages = Page.objects.filter(slug__startswith=slug)
else:
pages = Page.objects.all()
for page in pages:
title = None
print('\nApplying filter/s %s to %s' % (get_text_list(filters, 'and'), page.slug))
raw = page.raw
if 'meta' in filters:
raw = clean_meta(raw)
if 'links' in filters:
raw = delete_relative_links(raw)
if 'attachments' in filters:
raw = attachments(raw, page.slug)
if 'directives' in filters:
raw = directives(raw)
if 'emojis' in filters:
raw = emojis(raw)
if 'email' in filters:
raw = email(raw)
if 'title_level' in filters:
raw = title_level(raw)
if 'code' in filters:
if not pandoc:
print('The filter "code" need Pandoc installed in your system. Ignoring')
else:
raw = code(raw)
if 'title' in filters and not page.title:
title = page._get_part('get_document_title')
if raw != page.raw or title:
if title:
page.title = title
if raw != page.raw:
page.raw = raw
page.save()
page_saved.send_robust(sender='moin',
page=page,
author=None,
message=options['message'],
form_extra_data={})
else:
print('Nothing changed. Ignoring update')
| emojis | identifier_name |
moin_migration_cleanup.py | import re
from waliki.signals import page_saved
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from waliki.models import Page
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_text_list
try:
from waliki.attachments.models import Attachment
except ImportError:
Attachment = None
try:
from sh import pandoc, echo
pandoc = pandoc.bake(_tty_out=False)
echo = echo.bake(_tty_out=False)
except ImportError:
pandoc = None
def clean_meta(rst_content):
"""remove moinmoin metada from the top of the file"""
rst = rst_content.split('\n')
for i, line in enumerate(rst):
if line.startswith('#'):
continue
break
return '\n'.join(rst[i:])
def delete_relative_links(rst_content):
"""remove links relatives. Waliki point them correctly implicitly"""
return re.sub(r'^(\.\. .*: \.\./.*)\n$', '', rst_content, flags=re.MULTILINE)
def attachments(rst_content, slug):
def rep(matchobj):
for filename in matchobj.groups(1):
try:
a = Attachment.objects.filter(file__endswith=filename, page__slug=slug)[0]
except IndexError:
print('Cant find %s in %s' % (filename, slug))
return None
return '`%s <%s>`_' % (filename, a.get_absolute_url())
return re.sub(r'`attachment:(.*)`_', rep, rst_content, flags=re.MULTILINE)
def directives(rst_content):
for directive in re.findall(r':(\w+):`.*`', rst_content, flags=re.MULTILINE):
rst_content += """
.. role:: {directive}
:class: {directive}
""".format(directive=directive)
return rst_content
def emojis(rst_content):
# require
emojis_map = {
':)': 'smile',
':-)': 'smile',
';)': 'wink',
';-)': 'wink',
':-?': 'smirk',
':?': 'smirk',
':(': 'confused',
':-(': 'confused',
':D': 'laughing',
':-D': 'laughing',
':-P': 'stuck_out_tongue_closed_eyes',
':P': 'stuck_out_tongue_closed_eyes',
":'(": 'cry',
":'-(": 'cry',
}
def replace_emoji(match):
replacement = emojis_map.get(match.groups()[0], '')
if replacement:
return '|%s|' % replacement
return ''
result = re.sub(r'\|((?:\:|;).{1,3})\|', replace_emoji, rst_content, flags=re.MULTILINE)
return result
def email(rst_content):
pattern = r'`\[\[MailTo\((.*)\)\]\]`_(?:\.\.)?'
return re.sub(pattern, r'``\1``', rst_content)
def title_level(rst_content):
def dashrepl(matchobj):
|
pattern = r'^~+$'
return re.sub(pattern, dashrepl, rst_content, flags=re.MULTILINE)
def code(rst_content):
if not pandoc:
return rst_content
pattern = r'^\:\:\n\s+\.\. raw:: html\n\s+(<span class\=\"line\"\>.*?|\s+?<\/span\>)\n\s*$'
def convert(match):
source = match.groups()[0]
source = '\n'.join(l.strip() for l in source.split('\n'))
source = "<pre>%s</pre>" % source
rst_source = pandoc(echo(source), f='html', t='rst').stdout.decode('utf8')
# rst_source = rst_source.strip().replace('\n', '\n ') + '\n'
return rst_source
result = re.sub(pattern, convert, rst_content, flags=re.DOTALL | re.MULTILINE)
return result
class Command(BaseCommand):
help = 'Cleanup filters for a moin2git import'
option_list = (
make_option('--limit-to',
dest='slug',
default='',
help="optional namespace"),
make_option('--filters',
dest='filters',
default='all',
help="comma separated list of filter functions to apply"),
make_option('--message',
dest='message',
default=_("RestructuredText clean up"),
help="log message"),
) + BaseCommand.option_list
def handle(self, *args, **options):
valid_filters = ['meta', 'links',
'attachments', 'directives',
'emojis', 'title', 'email', 'code', 'title_level']
slug = options['slug']
filters = options['filters']
if filters == 'all':
filters = valid_filters
else:
filters = [f.strip() for f in filters.split(',')]
if not set(filters).issubset(valid_filters):
valid = get_text_list(valid_filters, 'and')
raise CommandError("At least one filter is unknown. Valid filters are:\n %s" % valid)
if slug:
pages = Page.objects.filter(slug__startswith=slug)
else:
pages = Page.objects.all()
for page in pages:
title = None
print('\nApplying filter/s %s to %s' % (get_text_list(filters, 'and'), page.slug))
raw = page.raw
if 'meta' in filters:
raw = clean_meta(raw)
if 'links' in filters:
raw = delete_relative_links(raw)
if 'attachments' in filters:
raw = attachments(raw, page.slug)
if 'directives' in filters:
raw = directives(raw)
if 'emojis' in filters:
raw = emojis(raw)
if 'email' in filters:
raw = email(raw)
if 'title_level' in filters:
raw = title_level(raw)
if 'code' in filters:
if not pandoc:
print('The filter "code" need Pandoc installed in your system. Ignoring')
else:
raw = code(raw)
if 'title' in filters and not page.title:
title = page._get_part('get_document_title')
if raw != page.raw or title:
if title:
page.title = title
if raw != page.raw:
page.raw = raw
page.save()
page_saved.send_robust(sender='moin',
page=page,
author=None,
message=options['message'],
form_extra_data={})
else:
print('Nothing changed. Ignoring update')
| return '-' * len(matchobj.group(0)) | identifier_body |
moin_migration_cleanup.py | import re
from waliki.signals import page_saved
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from waliki.models import Page
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_text_list
try:
from waliki.attachments.models import Attachment
except ImportError:
Attachment = None
try:
from sh import pandoc, echo
pandoc = pandoc.bake(_tty_out=False)
echo = echo.bake(_tty_out=False)
except ImportError:
pandoc = None
def clean_meta(rst_content):
"""remove moinmoin metada from the top of the file"""
rst = rst_content.split('\n')
for i, line in enumerate(rst):
if line.startswith('#'):
continue
break
return '\n'.join(rst[i:])
def delete_relative_links(rst_content):
"""remove links relatives. Waliki point them correctly implicitly"""
return re.sub(r'^(\.\. .*: \.\./.*)\n$', '', rst_content, flags=re.MULTILINE)
def attachments(rst_content, slug):
def rep(matchobj):
for filename in matchobj.groups(1):
try:
a = Attachment.objects.filter(file__endswith=filename, page__slug=slug)[0]
except IndexError:
print('Cant find %s in %s' % (filename, slug))
return None
return '`%s <%s>`_' % (filename, a.get_absolute_url())
return re.sub(r'`attachment:(.*)`_', rep, rst_content, flags=re.MULTILINE)
def directives(rst_content):
for directive in re.findall(r':(\w+):`.*`', rst_content, flags=re.MULTILINE):
rst_content += """
.. role:: {directive}
:class: {directive}
""".format(directive=directive)
return rst_content
def emojis(rst_content):
# require
emojis_map = {
':)': 'smile',
':-)': 'smile',
';)': 'wink',
';-)': 'wink',
':-?': 'smirk',
':?': 'smirk',
':(': 'confused',
':-(': 'confused',
':D': 'laughing',
':-D': 'laughing',
':-P': 'stuck_out_tongue_closed_eyes',
':P': 'stuck_out_tongue_closed_eyes',
":'(": 'cry',
":'-(": 'cry',
}
def replace_emoji(match):
replacement = emojis_map.get(match.groups()[0], '')
if replacement:
return '|%s|' % replacement
return ''
result = re.sub(r'\|((?:\:|;).{1,3})\|', replace_emoji, rst_content, flags=re.MULTILINE)
return result
def email(rst_content):
pattern = r'`\[\[MailTo\((.*)\)\]\]`_(?:\.\.)?'
return re.sub(pattern, r'``\1``', rst_content)
def title_level(rst_content):
def dashrepl(matchobj):
return '-' * len(matchobj.group(0))
pattern = r'^~+$'
return re.sub(pattern, dashrepl, rst_content, flags=re.MULTILINE)
def code(rst_content):
if not pandoc:
return rst_content
pattern = r'^\:\:\n\s+\.\. raw:: html\n\s+(<span class\=\"line\"\>.*?|\s+?<\/span\>)\n\s*$'
def convert(match):
source = match.groups()[0]
source = '\n'.join(l.strip() for l in source.split('\n'))
source = "<pre>%s</pre>" % source
rst_source = pandoc(echo(source), f='html', t='rst').stdout.decode('utf8')
# rst_source = rst_source.strip().replace('\n', '\n ') + '\n'
return rst_source
result = re.sub(pattern, convert, rst_content, flags=re.DOTALL | re.MULTILINE)
return result
class Command(BaseCommand):
help = 'Cleanup filters for a moin2git import'
option_list = (
make_option('--limit-to',
dest='slug',
default='',
help="optional namespace"),
make_option('--filters',
dest='filters',
default='all',
help="comma separated list of filter functions to apply"),
make_option('--message',
dest='message',
default=_("RestructuredText clean up"),
help="log message"),
) + BaseCommand.option_list
def handle(self, *args, **options):
valid_filters = ['meta', 'links',
'attachments', 'directives',
'emojis', 'title', 'email', 'code', 'title_level']
slug = options['slug'] | filters = valid_filters
else:
filters = [f.strip() for f in filters.split(',')]
if not set(filters).issubset(valid_filters):
valid = get_text_list(valid_filters, 'and')
raise CommandError("At least one filter is unknown. Valid filters are:\n %s" % valid)
if slug:
pages = Page.objects.filter(slug__startswith=slug)
else:
pages = Page.objects.all()
for page in pages:
title = None
print('\nApplying filter/s %s to %s' % (get_text_list(filters, 'and'), page.slug))
raw = page.raw
if 'meta' in filters:
raw = clean_meta(raw)
if 'links' in filters:
raw = delete_relative_links(raw)
if 'attachments' in filters:
raw = attachments(raw, page.slug)
if 'directives' in filters:
raw = directives(raw)
if 'emojis' in filters:
raw = emojis(raw)
if 'email' in filters:
raw = email(raw)
if 'title_level' in filters:
raw = title_level(raw)
if 'code' in filters:
if not pandoc:
print('The filter "code" need Pandoc installed in your system. Ignoring')
else:
raw = code(raw)
if 'title' in filters and not page.title:
title = page._get_part('get_document_title')
if raw != page.raw or title:
if title:
page.title = title
if raw != page.raw:
page.raw = raw
page.save()
page_saved.send_robust(sender='moin',
page=page,
author=None,
message=options['message'],
form_extra_data={})
else:
print('Nothing changed. Ignoring update') | filters = options['filters']
if filters == 'all': | random_line_split |
SeriesFactory.ts | module n3Charts.Factory.Series {
'use strict';
export class SeriesFactory extends n3Charts.Factory.BaseFactory {
public svg: D3.Selection;
public type: string; | static seriesClassSuffix: string = '-series';
protected data: Utils.Data;
protected options: Options.Options;
create() {
this.createContainer(this.factoryMgr.get('container').data);
// Hard update
this.eventMgr.on('data-update.' + this.type, this.update.bind(this));
// Soft updates
this.eventMgr.on('pan.' + this.type, this.softUpdate.bind(this));
this.eventMgr.on('zoom-end.' + this.type, this.softUpdate.bind(this));
this.eventMgr.on('outer-world-domain-change.' + this.key, this.softUpdate.bind(this));
this.eventMgr.on('resize.' + this.type, this.softUpdate.bind(this));
}
update(data, options) {
this.data = data;
this.options = options;
this.softUpdate();
}
getAxes(series: Options.SeriesOptions): {xAxis: Factory.Axis, yAxis: Factory.Axis} {
return {
xAxis: this.factoryMgr.get('x-axis'),
yAxis: this.factoryMgr.get(series.axis + '-axis')
};
}
softUpdate() {
var series = this.options.getSeriesByType(this.type).filter((s) => s.visible);
this.updateSeriesContainer(series);
}
destroy() {
this.svg.remove();
}
createContainer(parent: D3.Selection) {
this.svg = parent
.append('g')
.attr('class', this.type + SeriesFactory.containerClassSuffix);
}
updateSeriesContainer(series: Options.ISeriesOptions[]) {
// Create a data join
var groups = this.svg
.selectAll('.' + this.type + SeriesFactory.seriesClassSuffix)
// Use the series id as key for the join
.data(series, (d: Options.ISeriesOptions) => d.id);
// Create a new group for every new series
groups.enter()
.append('g')
.attr({
class: (d: Options.ISeriesOptions) => {
return this.type + SeriesFactory.seriesClassSuffix + ' ' + d.id;
}
});
// Update all existing series groups
this.styleSeries(groups);
this.updateSeries(groups, series);
// Delete unused series groups
groups.exit()
.remove();
}
updateSeries(groups: D3.Selection, series: Options.ISeriesOptions[]) {
// Workaround to retrieve the D3.Selection
// in the callback function (bound to keyword this)
var self = this;
groups.each(function(d: Options.ISeriesOptions, i: number) {
var group = d3.select(this);
self.updateData(group, d, i, series.length);
});
}
updateData(group: D3.Selection, series: Options.ISeriesOptions, index: number, numSeries: number) {
// this needs to be overwritten
}
styleSeries(group: D3.Selection) {
// this needs to be overwritten
}
}
} |
static containerClassSuffix: string = '-data'; | random_line_split |
SeriesFactory.ts | module n3Charts.Factory.Series {
'use strict';
export class SeriesFactory extends n3Charts.Factory.BaseFactory {
public svg: D3.Selection;
public type: string;
static containerClassSuffix: string = '-data';
static seriesClassSuffix: string = '-series';
protected data: Utils.Data;
protected options: Options.Options;
create() {
this.createContainer(this.factoryMgr.get('container').data);
// Hard update
this.eventMgr.on('data-update.' + this.type, this.update.bind(this));
// Soft updates
this.eventMgr.on('pan.' + this.type, this.softUpdate.bind(this));
this.eventMgr.on('zoom-end.' + this.type, this.softUpdate.bind(this));
this.eventMgr.on('outer-world-domain-change.' + this.key, this.softUpdate.bind(this));
this.eventMgr.on('resize.' + this.type, this.softUpdate.bind(this));
}
update(data, options) {
this.data = data;
this.options = options;
this.softUpdate();
}
getAxes(series: Options.SeriesOptions): {xAxis: Factory.Axis, yAxis: Factory.Axis} {
return {
xAxis: this.factoryMgr.get('x-axis'),
yAxis: this.factoryMgr.get(series.axis + '-axis')
};
}
softUpdate() {
var series = this.options.getSeriesByType(this.type).filter((s) => s.visible);
this.updateSeriesContainer(series);
}
destroy() {
this.svg.remove();
}
| (parent: D3.Selection) {
this.svg = parent
.append('g')
.attr('class', this.type + SeriesFactory.containerClassSuffix);
}
updateSeriesContainer(series: Options.ISeriesOptions[]) {
// Create a data join
var groups = this.svg
.selectAll('.' + this.type + SeriesFactory.seriesClassSuffix)
// Use the series id as key for the join
.data(series, (d: Options.ISeriesOptions) => d.id);
// Create a new group for every new series
groups.enter()
.append('g')
.attr({
class: (d: Options.ISeriesOptions) => {
return this.type + SeriesFactory.seriesClassSuffix + ' ' + d.id;
}
});
// Update all existing series groups
this.styleSeries(groups);
this.updateSeries(groups, series);
// Delete unused series groups
groups.exit()
.remove();
}
updateSeries(groups: D3.Selection, series: Options.ISeriesOptions[]) {
// Workaround to retrieve the D3.Selection
// in the callback function (bound to keyword this)
var self = this;
groups.each(function(d: Options.ISeriesOptions, i: number) {
var group = d3.select(this);
self.updateData(group, d, i, series.length);
});
}
updateData(group: D3.Selection, series: Options.ISeriesOptions, index: number, numSeries: number) {
// this needs to be overwritten
}
styleSeries(group: D3.Selection) {
// this needs to be overwritten
}
}
}
| createContainer | identifier_name |
SeriesFactory.ts | module n3Charts.Factory.Series {
'use strict';
export class SeriesFactory extends n3Charts.Factory.BaseFactory {
public svg: D3.Selection;
public type: string;
static containerClassSuffix: string = '-data';
static seriesClassSuffix: string = '-series';
protected data: Utils.Data;
protected options: Options.Options;
create() {
this.createContainer(this.factoryMgr.get('container').data);
// Hard update
this.eventMgr.on('data-update.' + this.type, this.update.bind(this));
// Soft updates
this.eventMgr.on('pan.' + this.type, this.softUpdate.bind(this));
this.eventMgr.on('zoom-end.' + this.type, this.softUpdate.bind(this));
this.eventMgr.on('outer-world-domain-change.' + this.key, this.softUpdate.bind(this));
this.eventMgr.on('resize.' + this.type, this.softUpdate.bind(this));
}
update(data, options) {
this.data = data;
this.options = options;
this.softUpdate();
}
getAxes(series: Options.SeriesOptions): {xAxis: Factory.Axis, yAxis: Factory.Axis} {
return {
xAxis: this.factoryMgr.get('x-axis'),
yAxis: this.factoryMgr.get(series.axis + '-axis')
};
}
softUpdate() {
var series = this.options.getSeriesByType(this.type).filter((s) => s.visible);
this.updateSeriesContainer(series);
}
destroy() {
this.svg.remove();
}
createContainer(parent: D3.Selection) |
updateSeriesContainer(series: Options.ISeriesOptions[]) {
// Create a data join
var groups = this.svg
.selectAll('.' + this.type + SeriesFactory.seriesClassSuffix)
// Use the series id as key for the join
.data(series, (d: Options.ISeriesOptions) => d.id);
// Create a new group for every new series
groups.enter()
.append('g')
.attr({
class: (d: Options.ISeriesOptions) => {
return this.type + SeriesFactory.seriesClassSuffix + ' ' + d.id;
}
});
// Update all existing series groups
this.styleSeries(groups);
this.updateSeries(groups, series);
// Delete unused series groups
groups.exit()
.remove();
}
updateSeries(groups: D3.Selection, series: Options.ISeriesOptions[]) {
// Workaround to retrieve the D3.Selection
// in the callback function (bound to keyword this)
var self = this;
groups.each(function(d: Options.ISeriesOptions, i: number) {
var group = d3.select(this);
self.updateData(group, d, i, series.length);
});
}
updateData(group: D3.Selection, series: Options.ISeriesOptions, index: number, numSeries: number) {
// this needs to be overwritten
}
styleSeries(group: D3.Selection) {
// this needs to be overwritten
}
}
}
| {
this.svg = parent
.append('g')
.attr('class', this.type + SeriesFactory.containerClassSuffix);
} | identifier_body |
consoleauth.py | # Copyright (c) 2016 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
default='consoleauth',
help='The topic console auth proxy nodes listen on')
console_token_ttl = cfg.IntOpt('console_token_ttl',
default=600,
help='How many seconds before deleting tokens')
CONSOLEAUTH_OPTS = [consoleauth_topic_opt, console_token_ttl]
def register_opts(conf):
conf.register_opts(CONSOLEAUTH_OPTS)
def | ():
return {'DEFAULT': CONSOLEAUTH_OPTS}
| list_opts | identifier_name |
consoleauth.py | # Copyright (c) 2016 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
|
console_token_ttl = cfg.IntOpt('console_token_ttl',
default=600,
help='How many seconds before deleting tokens')
CONSOLEAUTH_OPTS = [consoleauth_topic_opt, console_token_ttl]
def register_opts(conf):
conf.register_opts(CONSOLEAUTH_OPTS)
def list_opts():
return {'DEFAULT': CONSOLEAUTH_OPTS} | consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
default='consoleauth',
help='The topic console auth proxy nodes listen on') | random_line_split |
consoleauth.py | # Copyright (c) 2016 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
default='consoleauth',
help='The topic console auth proxy nodes listen on')
console_token_ttl = cfg.IntOpt('console_token_ttl',
default=600,
help='How many seconds before deleting tokens')
CONSOLEAUTH_OPTS = [consoleauth_topic_opt, console_token_ttl]
def register_opts(conf):
conf.register_opts(CONSOLEAUTH_OPTS)
def list_opts():
| return {'DEFAULT': CONSOLEAUTH_OPTS} | identifier_body | |
angular-ui-router.d.ts | // Compiled using typings@0.6.10
// Source: https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/655f8c1bf3c71b0e1ba415b36309604f79326ac8/angular-ui-router/angular-ui-router.d.ts
// Type definitions for Angular JS 1.1.5+ (ui.router module)
// Project: https://github.com/angular-ui/ui-router
// Definitions by: Michel Salib <https://github.com/michelsalib>
// Definitions: https://github.com/borisyankov/DefinitelyTyped
// Support for AMD require and CommonJS
declare module 'angular-ui-router' {
// Since angular-ui-router adds providers for a bunch of
// injectable dependencies, it doesn't really return any
// actual data except the plain string 'ui.router'.
//
// As such, I don't think anybody will ever use the actual
// default value of the module. So I've only included the
// the types. (@xogeny)
export type IState = angular.ui.IState;
export type IStateProvider = angular.ui.IStateProvider;
export type IUrlMatcher = angular.ui.IUrlMatcher;
export type IUrlRouterProvider = angular.ui.IUrlRouterProvider;
export type IStateOptions = angular.ui.IStateOptions;
export type IHrefOptions = angular.ui.IHrefOptions;
export type IStateService = angular.ui.IStateService;
export type IResolvedState = angular.ui.IResolvedState;
export type IStateParamsService = angular.ui.IStateParamsService;
export type IUrlRouterService = angular.ui.IUrlRouterService;
export type IUiViewScrollProvider = angular.ui.IUiViewScrollProvider;
export type IType = angular.ui.IType;
}
declare module angular.ui {
interface IState {
name?: string;
/**
* String HTML content, or function that returns an HTML string
*/
template?: string | {(): string};
/**
* String URL path to template file OR Function, returns URL path string
*/
templateUrl?: string | {(params: IStateParamsService): string};
/**
* Function, returns HTML content string
*/
templateProvider?: Function | Array<string|Function>;
/**
* A controller paired to the state. Function, annotated array or name as String
*/
controller?: Function|string|Array<string|Function>;
controllerAs?: string;
/**
* Function (injectable), returns the actual controller function or string.
*/
controllerProvider?: Function|Array<string|Function>;
/**
* Specifies the parent state of this state
*/
parent?: string | IState;
resolve?: { [name:string]: any };
/**
* A url with optional parameters. When a state is navigated or transitioned to, the $stateParams service will be populated with any parameters that were passed.
*/
url?: string | IUrlMatcher;
/**
* A map which optionally configures parameters declared in the url, or defines additional non-url parameters. Only use this within a state if you are not using url. Otherwise you can specify your parameters within the url. When a state is navigated or transitioned to, the $stateParams service will be populated with any parameters that were passed.
*/
params?: any;
/**
* Use the views property to set up multiple views. If you don't need multiple views within a single state this property is not needed. Tip: remember that often nested views are more useful and powerful than multiple sibling views.
*/
views?: { [name:string]: IState };
abstract?: boolean;
/**
* Callback function for when a state is entered. Good way to trigger an action or dispatch an event, such as opening a dialog.
* If minifying your scripts, make sure to explicitly annotate this function, because it won't be automatically annotated by your build tools.
*/
onEnter?: Function|Array<string|Function>;
/**
* Callback functions for when a state is entered and exited. Good way to trigger an action or dispatch an event, such as opening a dialog.
* If minifying your scripts, make sure to explicitly annotate this function, because it won't be automatically annotated by your build tools.
*/
onExit?: Function|Array<string|Function>;
/**
* Arbitrary data object, useful for custom configuration.
*/
data?: any;
/**
* Boolean (default true). If false will not re-trigger the same state just because a search/query parameter has changed. Useful for when you'd like to modify $location.search() without triggering a reload.
*/
reloadOnSearch?: boolean;
/**
* Boolean (default true). If false will reload state on everytransitions. Useful for when you'd like to restore all data to its initial state.
*/
cache?: boolean;
}
interface IStateProvider extends angular.IServiceProvider {
state(name:string, config:IState): IStateProvider;
state(config:IState): IStateProvider;
decorator(name?: string, decorator?: (state: IState, parent: Function) => any): any;
}
interface IUrlMatcher {
concat(pattern: string): IUrlMatcher;
exec(path: string, searchParams: {}): {};
parameters(): string[];
format(values: {}): string;
}
interface IUrlMatcherFactory {
/**
* Creates a UrlMatcher for the specified pattern.
*
* @param pattern {string} The URL pattern.
*
* @returns {IUrlMatcher} The UrlMatcher.
*/
compile(pattern: string): IUrlMatcher;
/**
* Returns true if the specified object is a UrlMatcher, or false otherwise.
*
* @param o {any} The object to perform the type check against.
*
* @returns {boolean} Returns true if the object matches the IUrlMatcher interface, by implementing all the same methods.
*/
isMatcher(o: any): boolean;
/**
* Returns a type definition for the specified name
*
* @param name {string} The type definition name
*
* @returns {IType} The type definition
*/
type(name: string): IType;
/**
* Registers a custom Type object that can be used to generate URLs with typed parameters.
*
* @param {IType} definition The type definition.
* @param {any[]} inlineAnnotedDefinitionFn A function that is injected before the app runtime starts. The result of this function is merged into the existing definition.
*
* @returns {IUrlMatcherFactory} Returns $urlMatcherFactoryProvider.
*/
type(name: string, definition: IType, inlineAnnotedDefinitionFn?: any[]): IUrlMatcherFactory;
/**
* Registers a custom Type object that can be used to generate URLs with typed parameters.
*
* @param {IType} definition The type definition.
* @param {any[]} inlineAnnotedDefinitionFn A function that is injected before the app runtime starts. The result of this function is merged into the existing definition.
*
* @returns {IUrlMatcherFactory} Returns $urlMatcherFactoryProvider.
*/
type(name: string, definition: IType, definitionFn?: (...args:any[]) => IType): IUrlMatcherFactory;
/**
* Defines whether URL matching should be case sensitive (the default behavior), or not.
*
* @param value {boolean} false to match URL in a case sensitive manner; otherwise true;
*
* @returns {boolean} the current value of caseInsensitive
*/
caseInsensitive(value?: boolean): boolean;
/**
* Sets the default behavior when generating or matching URLs with default parameter values
*
* @param value {string} A string that defines the default parameter URL squashing behavior. nosquash: When generating an href with a default parameter value, do not squash the parameter value from the URL slash: When generating an href with a default parameter value, squash (remove) the parameter value, and, if the parameter is surrounded by slashes, squash (remove) one slash from the URL any other string, e.g. "~": When generating an href with a default parameter value, squash (remove) the parameter value from the URL and replace it with this string.
*/
defaultSquashPolicy(value: string): void;
/**
* Defines whether URLs should match trailing slashes, or not (the default behavior).
*
* @param value {boolean} false to match trailing slashes in URLs, otherwise true.
*
* @returns {boolean} the current value of strictMode
*/
strictMode(value?: boolean): boolean;
} | when(whenPath: IUrlMatcher, hanlder: Function): IUrlRouterProvider;
when(whenPath: IUrlMatcher, handler: any[]): IUrlRouterProvider;
when(whenPath: IUrlMatcher, toPath: string): IUrlRouterProvider;
when(whenPath: string, handler: Function): IUrlRouterProvider;
when(whenPath: string, handler: any[]): IUrlRouterProvider;
when(whenPath: string, toPath: string): IUrlRouterProvider;
otherwise(handler: Function): IUrlRouterProvider;
otherwise(handler: any[]): IUrlRouterProvider;
otherwise(path: string): IUrlRouterProvider;
rule(handler: Function): IUrlRouterProvider;
rule(handler: any[]): IUrlRouterProvider;
/**
* Disables (or enables) deferring location change interception.
*
* If you wish to customize the behavior of syncing the URL (for example, if you wish to defer a transition but maintain the current URL), call this method at configuration time. Then, at run time, call $urlRouter.listen() after you have configured your own $locationChangeSuccess event handler.
*
* @param {boolean} defer Indicates whether to defer location change interception. Passing no parameter is equivalent to true.
*/
deferIntercept(defer?: boolean): void;
}
interface IStateOptions {
/**
* {boolean=true|string=} - If true will update the url in the location bar, if false will not. If string, must be "replace", which will update url and also replace last history record.
*/
location?: boolean | string;
/**
* {boolean=true}, If true will inherit url parameters from current url.
*/
inherit?: boolean;
/**
* {object=$state.$current}, When transitioning with relative path (e.g '^'), defines which state to be relative from.
*/
relative?: IState;
/**
* {boolean=true}, If true will broadcast $stateChangeStart and $stateChangeSuccess events.
*/
notify?: boolean;
/**
* {boolean=false}, If true will force transition even if the state or params have not changed, aka a reload of the same state. It differs from reloadOnSearch because you'd use this when you want to force a reload when everything is the same, including search params.
*/
reload?: boolean;
}
interface IHrefOptions {
lossy?: boolean;
inherit?: boolean;
relative?: IState;
absolute?: boolean;
}
interface IStateService {
/**
* Convenience method for transitioning to a new state. $state.go calls $state.transitionTo internally but automatically sets options to { location: true, inherit: true, relative: $state.$current, notify: true }. This allows you to easily use an absolute or relative to path and specify only the parameters you'd like to update (while letting unspecified parameters inherit from the currently active ancestor states).
*
* @param to Absolute state name or relative state path. Some examples:
*
* $state.go('contact.detail') - will go to the contact.detail state
* $state.go('^') - will go to a parent state
* $state.go('^.sibling') - will go to a sibling state
* $state.go('.child.grandchild') - will go to grandchild state
*
* @param params A map of the parameters that will be sent to the state, will populate $stateParams. Any parameters that are not specified will be inherited from currently defined parameters. This allows, for example, going to a sibling state that shares parameters specified in a parent state. Parameter inheritance only works between common ancestor states, I.e. transitioning to a sibling will get you the parameters for all parents, transitioning to a child will get you all current parameters, etc.
*
* @param options Options object.
*/
go(to: string, params?: {}, options?: IStateOptions): angular.IPromise<any>;
go(to: IState, params?: {}, options?: IStateOptions): angular.IPromise<any>;
transitionTo(state: string, params?: {}, updateLocation?: boolean): angular.IPromise<any>;
transitionTo(state: IState, params?: {}, updateLocation?: boolean): angular.IPromise<any>;
transitionTo(state: string, params?: {}, options?: IStateOptions): angular.IPromise<any>;
transitionTo(state: IState, params?: {}, options?: IStateOptions): angular.IPromise<any>;
includes(state: string, params?: {}): boolean;
includes(state: string, params?: {}, options?:any): boolean;
is(state:string, params?: {}): boolean;
is(state: IState, params?: {}): boolean;
href(state: IState, params?: {}, options?: IHrefOptions): string;
href(state: string, params?: {}, options?: IHrefOptions): string;
get(state: string, context?: string): IState;
get(state: IState, context?: string): IState;
get(state: string, context?: IState): IState;
get(state: IState, context?: IState): IState;
get(): IState[];
/** A reference to the state's config object. However you passed it in. Useful for accessing custom data. */
current: IState;
/** A param object, e.g. {sectionId: section.id)}, that you'd like to test against the current active state. */
params: IStateParamsService;
reload(): angular.IPromise<any>;
/** Currently pending transition. A promise that'll resolve or reject. */
transition: angular.IPromise<{}>;
$current: IResolvedState;
}
interface IResolvedState {
locals: {
/**
* Currently resolved "resolve" values from the current state
*/
globals: { [key: string]: any; };
};
}
interface IStateParamsService {
[key: string]: any;
}
interface IUrlRouterService {
/*
* Triggers an update; the same update that happens when the address bar
* url changes, aka $locationChangeSuccess.
*
* This method is useful when you need to use preventDefault() on the
* $locationChangeSuccess event, perform some custom logic (route protection,
* auth, config, redirection, etc) and then finally proceed with the transition
* by calling $urlRouter.sync().
*
*/
sync(): void;
listen(): Function;
href(urlMatcher: IUrlMatcher, params?: IStateParamsService, options?: IHrefOptions): string;
update(read?: boolean): void;
push(urlMatcher: IUrlMatcher, params?: IStateParamsService, options?: IHrefOptions): void;
}
interface IUiViewScrollProvider {
/*
* Reverts back to using the core $anchorScroll service for scrolling
* based on the url anchor.
*/
useAnchorScroll(): void;
}
interface IType {
/**
* Converts a parameter value (from URL string or transition param) to a custom/native value.
*
* @param val {string} The URL parameter value to decode.
* @param key {string} The name of the parameter in which val is stored. Can be used for meta-programming of Type objects.
*
* @returns {any} Returns a custom representation of the URL parameter value.
*/
decode(val: string, key: string): any;
/**
* Encodes a custom/native type value to a string that can be embedded in a URL. Note that the return value does not need to be URL-safe (i.e. passed through encodeURIComponent()), it only needs to be a representation of val that has been coerced to a string.
*
* @param val {any} The value to encode.
* @param key {string} The name of the parameter in which val is stored. Can be used for meta-programming of Type objects.
*
* @returns {string} Returns a string representation of val that can be encoded in a URL.
*/
encode(val: any, key: string): string;
/**
* Determines whether two decoded values are equivalent.
*
* @param a {any} A value to compare against.
* @param b {any} A value to compare against.
*
* @returns {boolean} Returns true if the values are equivalent/equal, otherwise false.
*/
equals? (a: any, b: any): boolean;
/**
* Detects whether a value is of a particular type. Accepts a native (decoded) value and determines whether it matches the current Type object.
*
* @param val {any} The value to check.
* @param key {any} Optional. If the type check is happening in the context of a specific UrlMatcher object, this is the name of the parameter in which val is stored. Can be used for meta-programming of Type objects.
*
* @returns {boolean} Returns true if the value matches the type, otherwise false.
*/
is(val: any, key: string): boolean;
/**
* The regular expression pattern used to match values of this type when coming from a substring of a URL.
*/
pattern?: RegExp;
}
} |
interface IUrlRouterProvider extends angular.IServiceProvider {
when(whenPath: RegExp, handler: Function): IUrlRouterProvider;
when(whenPath: RegExp, handler: any[]): IUrlRouterProvider;
when(whenPath: RegExp, toPath: string): IUrlRouterProvider; | random_line_split |
test.py | #!/usr/bin/env finemonkeyrunner
# -*- coding:utf8 -*-
import sys
sys.path.append(r'D:\learning\python\auto\fineMonkeyRunner')
from com.fine.android.finemonkeyrunner import fineMonkeyRunner
# 导入包路径,否则找不到 ---注意
#sys.path.append(r'C:\Users\wangxu\AppData\Local\Android\sdk\tools\testscript') | finemonkeyrunner = fineMonkeyRunner('emulator-5554')
#finemonkeyrunner.assertfocusedwindowmame('com.mdsd.wiicare/com.mdsd.wiicare.function.LoginActivity_')
#finemonkeyrunner.assertcurrentactivity('com.mdsd.wiicare/com.mdsd.wiicare.function.LoginActivity_')
view = finemonkeyrunner.getviewbyID('id/etAccount')
print finemonkeyrunner.getviewinfo_classname(view)
#print finemonkeyrunner.getelementinfo_locate('id/etAccount')
#print finemonkeyrunner.getviewinfo_visible(view)
#finemonkeyrunner.typebyid('id/etPassword','123')
#ss = finemonkeyrunner.getviewssametext('id/drawerLayout','经鼻气管插管')
#print finemonkeyrunner.viewlist
#finemonkeyrunner.getviewinfo(view)
#finemonkeyrunner.forcestopapp('com.mdsd.wiicare') | #sys.path.append(r'D:\learning\python\auto\fineMonkeyRunner') | random_line_split |
index.d.ts | /// <reference types="node" />
/** | * found in the LICENSE file at https://angular.io/license
*/
import { BuilderContext, BuilderOutput } from '@angular-devkit/architect';
import { WebpackLoggingCallback } from '@angular-devkit/build-webpack';
import { experimental, json, logging, virtualFs } from '@angular-devkit/core';
import * as fs from 'fs';
import * as webpack from 'webpack';
import { IndexHtmlTransform } from '../angular-cli-files/utilities/index-file/write-index-html';
import { ExecutionTransformer } from '../transforms';
import { Schema as BrowserBuilderSchema } from './schema';
export declare type BrowserBuilderOutput = json.JsonObject & BuilderOutput & {
outputPath: string;
};
export declare function createBrowserLoggingCallback(verbose: boolean, logger: logging.LoggerApi): WebpackLoggingCallback;
export declare function buildBrowserWebpackConfigFromContext(options: BrowserBuilderSchema, context: BuilderContext, host?: virtualFs.Host<fs.Stats>): Promise<{
workspace: experimental.workspace.Workspace;
config: webpack.Configuration[];
}>;
export declare function buildWebpackBrowser(options: BrowserBuilderSchema, context: BuilderContext, transforms?: {
webpackConfiguration?: ExecutionTransformer<webpack.Configuration>;
logging?: WebpackLoggingCallback;
indexHtml?: IndexHtmlTransform;
}): import("rxjs").Observable<BrowserBuilderOutput>;
declare const _default: import("@angular-devkit/architect/src/internal").Builder<json.JsonObject & BrowserBuilderSchema>;
export default _default; | * @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be | random_line_split |
test_onyx_config.py | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(lines=['hostname foo'], save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.load_config.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
|
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
| set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True) | identifier_body |
test_onyx_config.py | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(lines=['hostname foo'], save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.load_config.call_args[0][1]
self.assertIn('configuration write', args)
def | (self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
| test_onyx_config_lines_wo_parents | identifier_name |
test_onyx_config.py | # | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(lines=['hostname foo'], save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.load_config.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True) | random_line_split | |
mpmc_bounded_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
#![allow(missing_docs, dead_code)]
// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
// This queue is copy pasted from old rust stdlib.
use std::sync::Arc;
use std::cell::UnsafeCell;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{Relaxed, Release, Acquire};
struct Node<T> {
sequence: AtomicUsize,
value: Option<T>,
}
unsafe impl<T: Send> Send for Node<T> {}
unsafe impl<T: Sync> Sync for Node<T> {}
struct State<T> {
pad0: [u8; 64],
buffer: Vec<UnsafeCell<Node<T>>>,
mask: usize,
pad1: [u8; 64],
enqueue_pos: AtomicUsize,
pad2: [u8; 64],
dequeue_pos: AtomicUsize,
pad3: [u8; 64],
}
unsafe impl<T: Send> Send for State<T> {}
unsafe impl<T: Send + Sync> Sync for State<T> {}
pub struct Queue<T> {
state: Arc<State<T>>,
}
impl<T> State<T> {
fn with_capacity(capacity: usize) -> State<T> {
let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
if capacity < 2 {
2
} else {
// use next power of 2 as capacity
capacity.next_power_of_two()
}
} else {
capacity
};
let buffer = (0..capacity).map(|i| {
UnsafeCell::new(Node { sequence:AtomicUsize::new(i), value: None })
}).collect::<Vec<_>>();
State{
pad0: [0; 64],
buffer: buffer,
mask: capacity-1,
pad1: [0; 64],
enqueue_pos: AtomicUsize::new(0),
pad2: [0; 64],
dequeue_pos: AtomicUsize::new(0),
pad3: [0; 64],
}
}
fn push(&self, value: T) -> Result<(), T> {
let mask = self.mask;
let mut pos = self.enqueue_pos.load(Relaxed);
loop {
let node = &self.buffer[pos & mask];
let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: isize = seq as isize - pos as isize;
if diff == 0 {
let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
if enqueue_pos == pos {
unsafe {
(*node.get()).value = Some(value);
(*node.get()).sequence.store(pos+1, Release);
}
break
} else {
pos = enqueue_pos;
}
} else if diff < 0 {
return Err(value);
} else {
pos = self.enqueue_pos.load(Relaxed);
}
}
Ok(())
}
fn pop(&self) -> Option<T> {
let mask = self.mask;
let mut pos = self.dequeue_pos.load(Relaxed);
loop {
let node = &self.buffer[pos & mask];
let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: isize = seq as isize - (pos + 1) as isize;
if diff == 0 {
let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
if dequeue_pos == pos {
unsafe {
let value = (*node.get()).value.take();
(*node.get()).sequence.store(pos + mask + 1, Release);
return value
}
} else {
pos = dequeue_pos;
}
} else if diff < 0 {
return None
} else {
pos = self.dequeue_pos.load(Relaxed);
}
}
}
}
impl<T> Queue<T> {
pub fn with_capacity(capacity: usize) -> Queue<T> {
Queue{
state: Arc::new(State::with_capacity(capacity))
}
}
pub fn push(&self, value: T) -> Result<(), T> {
self.state.push(value)
}
pub fn pop(&self) -> Option<T> {
self.state.pop()
}
}
impl<T> Clone for Queue<T> {
fn clone(&self) -> Queue<T> {
Queue { state: self.state.clone() }
}
}
#[cfg(test)]
mod tests {
use std::thread;
use std::sync::mpsc::channel;
use super::Queue;
| let q = Queue::with_capacity(nthreads*nmsgs);
assert_eq!(None, q.pop());
let (tx, rx) = channel();
for _ in 0..nthreads {
let q = q.clone();
let tx = tx.clone();
thread::spawn(move || {
let q = q;
for i in 0..nmsgs {
assert!(q.push(i).is_ok());
}
tx.send(()).unwrap();
});
}
let mut completion_rxs = vec![];
for _ in 0..nthreads {
let (tx, rx) = channel();
completion_rxs.push(rx);
let q = q.clone();
thread::spawn(move || {
let q = q;
let mut i = 0;
loop {
match q.pop() {
None => {},
Some(_) => {
i += 1;
if i == nmsgs { break }
}
}
}
tx.send(i).unwrap();
});
}
for rx in &mut completion_rxs {
assert_eq!(nmsgs, rx.recv().unwrap());
}
for _ in 0..nthreads {
rx.recv().unwrap();
}
}
} | #[test]
fn test() {
let nthreads = 8;
let nmsgs = 1000; | random_line_split |
mpmc_bounded_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
#![allow(missing_docs, dead_code)]
// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
// This queue is copy pasted from old rust stdlib.
use std::sync::Arc;
use std::cell::UnsafeCell;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{Relaxed, Release, Acquire};
struct Node<T> {
sequence: AtomicUsize,
value: Option<T>,
}
unsafe impl<T: Send> Send for Node<T> {}
unsafe impl<T: Sync> Sync for Node<T> {}
struct State<T> {
pad0: [u8; 64],
buffer: Vec<UnsafeCell<Node<T>>>,
mask: usize,
pad1: [u8; 64],
enqueue_pos: AtomicUsize,
pad2: [u8; 64],
dequeue_pos: AtomicUsize,
pad3: [u8; 64],
}
unsafe impl<T: Send> Send for State<T> {}
unsafe impl<T: Send + Sync> Sync for State<T> {}
pub struct | <T> {
state: Arc<State<T>>,
}
impl<T> State<T> {
fn with_capacity(capacity: usize) -> State<T> {
let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
if capacity < 2 {
2
} else {
// use next power of 2 as capacity
capacity.next_power_of_two()
}
} else {
capacity
};
let buffer = (0..capacity).map(|i| {
UnsafeCell::new(Node { sequence:AtomicUsize::new(i), value: None })
}).collect::<Vec<_>>();
State{
pad0: [0; 64],
buffer: buffer,
mask: capacity-1,
pad1: [0; 64],
enqueue_pos: AtomicUsize::new(0),
pad2: [0; 64],
dequeue_pos: AtomicUsize::new(0),
pad3: [0; 64],
}
}
fn push(&self, value: T) -> Result<(), T> {
let mask = self.mask;
let mut pos = self.enqueue_pos.load(Relaxed);
loop {
let node = &self.buffer[pos & mask];
let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: isize = seq as isize - pos as isize;
if diff == 0 {
let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
if enqueue_pos == pos {
unsafe {
(*node.get()).value = Some(value);
(*node.get()).sequence.store(pos+1, Release);
}
break
} else {
pos = enqueue_pos;
}
} else if diff < 0 {
return Err(value);
} else {
pos = self.enqueue_pos.load(Relaxed);
}
}
Ok(())
}
fn pop(&self) -> Option<T> {
let mask = self.mask;
let mut pos = self.dequeue_pos.load(Relaxed);
loop {
let node = &self.buffer[pos & mask];
let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: isize = seq as isize - (pos + 1) as isize;
if diff == 0 {
let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
if dequeue_pos == pos {
unsafe {
let value = (*node.get()).value.take();
(*node.get()).sequence.store(pos + mask + 1, Release);
return value
}
} else {
pos = dequeue_pos;
}
} else if diff < 0 {
return None
} else {
pos = self.dequeue_pos.load(Relaxed);
}
}
}
}
impl<T> Queue<T> {
pub fn with_capacity(capacity: usize) -> Queue<T> {
Queue{
state: Arc::new(State::with_capacity(capacity))
}
}
pub fn push(&self, value: T) -> Result<(), T> {
self.state.push(value)
}
pub fn pop(&self) -> Option<T> {
self.state.pop()
}
}
impl<T> Clone for Queue<T> {
fn clone(&self) -> Queue<T> {
Queue { state: self.state.clone() }
}
}
#[cfg(test)]
mod tests {
use std::thread;
use std::sync::mpsc::channel;
use super::Queue;
#[test]
fn test() {
let nthreads = 8;
let nmsgs = 1000;
let q = Queue::with_capacity(nthreads*nmsgs);
assert_eq!(None, q.pop());
let (tx, rx) = channel();
for _ in 0..nthreads {
let q = q.clone();
let tx = tx.clone();
thread::spawn(move || {
let q = q;
for i in 0..nmsgs {
assert!(q.push(i).is_ok());
}
tx.send(()).unwrap();
});
}
let mut completion_rxs = vec![];
for _ in 0..nthreads {
let (tx, rx) = channel();
completion_rxs.push(rx);
let q = q.clone();
thread::spawn(move || {
let q = q;
let mut i = 0;
loop {
match q.pop() {
None => {},
Some(_) => {
i += 1;
if i == nmsgs { break }
}
}
}
tx.send(i).unwrap();
});
}
for rx in &mut completion_rxs {
assert_eq!(nmsgs, rx.recv().unwrap());
}
for _ in 0..nthreads {
rx.recv().unwrap();
}
}
}
| Queue | identifier_name |
mpmc_bounded_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
#![allow(missing_docs, dead_code)]
// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
// This queue is copy pasted from old rust stdlib.
use std::sync::Arc;
use std::cell::UnsafeCell;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{Relaxed, Release, Acquire};
struct Node<T> {
sequence: AtomicUsize,
value: Option<T>,
}
unsafe impl<T: Send> Send for Node<T> {}
unsafe impl<T: Sync> Sync for Node<T> {}
struct State<T> {
pad0: [u8; 64],
buffer: Vec<UnsafeCell<Node<T>>>,
mask: usize,
pad1: [u8; 64],
enqueue_pos: AtomicUsize,
pad2: [u8; 64],
dequeue_pos: AtomicUsize,
pad3: [u8; 64],
}
unsafe impl<T: Send> Send for State<T> {}
unsafe impl<T: Send + Sync> Sync for State<T> {}
pub struct Queue<T> {
state: Arc<State<T>>,
}
impl<T> State<T> {
fn with_capacity(capacity: usize) -> State<T> {
let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
if capacity < 2 {
2
} else {
// use next power of 2 as capacity
capacity.next_power_of_two()
}
} else {
capacity
};
let buffer = (0..capacity).map(|i| {
UnsafeCell::new(Node { sequence:AtomicUsize::new(i), value: None })
}).collect::<Vec<_>>();
State{
pad0: [0; 64],
buffer: buffer,
mask: capacity-1,
pad1: [0; 64],
enqueue_pos: AtomicUsize::new(0),
pad2: [0; 64],
dequeue_pos: AtomicUsize::new(0),
pad3: [0; 64],
}
}
fn push(&self, value: T) -> Result<(), T> {
let mask = self.mask;
let mut pos = self.enqueue_pos.load(Relaxed);
loop {
let node = &self.buffer[pos & mask];
let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: isize = seq as isize - pos as isize;
if diff == 0 {
let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
if enqueue_pos == pos {
unsafe {
(*node.get()).value = Some(value);
(*node.get()).sequence.store(pos+1, Release);
}
break
} else {
pos = enqueue_pos;
}
} else if diff < 0 {
return Err(value);
} else {
pos = self.enqueue_pos.load(Relaxed);
}
}
Ok(())
}
fn pop(&self) -> Option<T> {
let mask = self.mask;
let mut pos = self.dequeue_pos.load(Relaxed);
loop {
let node = &self.buffer[pos & mask];
let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: isize = seq as isize - (pos + 1) as isize;
if diff == 0 {
let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
if dequeue_pos == pos {
unsafe {
let value = (*node.get()).value.take();
(*node.get()).sequence.store(pos + mask + 1, Release);
return value
}
} else {
pos = dequeue_pos;
}
} else if diff < 0 {
return None
} else {
pos = self.dequeue_pos.load(Relaxed);
}
}
}
}
impl<T> Queue<T> {
pub fn with_capacity(capacity: usize) -> Queue<T> {
Queue{
state: Arc::new(State::with_capacity(capacity))
}
}
pub fn push(&self, value: T) -> Result<(), T> {
self.state.push(value)
}
pub fn pop(&self) -> Option<T> {
self.state.pop()
}
}
impl<T> Clone for Queue<T> {
fn clone(&self) -> Queue<T> {
Queue { state: self.state.clone() }
}
}
#[cfg(test)]
mod tests {
use std::thread;
use std::sync::mpsc::channel;
use super::Queue;
#[test]
fn test() {
let nthreads = 8;
let nmsgs = 1000;
let q = Queue::with_capacity(nthreads*nmsgs);
assert_eq!(None, q.pop());
let (tx, rx) = channel();
for _ in 0..nthreads {
let q = q.clone();
let tx = tx.clone();
thread::spawn(move || {
let q = q;
for i in 0..nmsgs {
assert!(q.push(i).is_ok());
}
tx.send(()).unwrap();
});
}
let mut completion_rxs = vec![];
for _ in 0..nthreads {
let (tx, rx) = channel();
completion_rxs.push(rx);
let q = q.clone();
thread::spawn(move || {
let q = q;
let mut i = 0;
loop {
match q.pop() {
None => | ,
Some(_) => {
i += 1;
if i == nmsgs { break }
}
}
}
tx.send(i).unwrap();
});
}
for rx in &mut completion_rxs {
assert_eq!(nmsgs, rx.recv().unwrap());
}
for _ in 0..nthreads {
rx.recv().unwrap();
}
}
}
| {} | conditional_block |
test_release.py | # Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from datetime import datetime
from hypothesis import given
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.model import (
ObjectType,
Person,
Release,
Timestamp,
TimestampWithTimezone,
)
from swh.web.common.utils import reverse
from swh.web.tests.data import random_sha1
from swh.web.tests.strategies import content, directory, release
from swh.web.tests.utils import check_api_get_responses, check_http_get_response
@given(release())
def test_api_release(api_client, archive_data, release):
url = reverse("api-1-release", url_args={"sha1_git": release})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(release)
target_revision = expected_release["target"]
target_url = reverse(
"api-1-revision",
url_args={"sha1_git": target_revision},
request=rv.wsgi_request,
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
@given(content(), directory(), release())
def test_api_release_target_type_not_a_revision(
api_client, archive_data, content, directory, release
):
for target_type, target in (
(ObjectType.CONTENT, content),
(ObjectType.DIRECTORY, directory),
(ObjectType.RELEASE, release),
):
if target_type == ObjectType.CONTENT:
target = target["sha1_git"]
sample_release = Release(
author=Person(
email=b"author@company.org",
fullname=b"author <author@company.org>",
name=b"author",
),
date=TimestampWithTimezone(
timestamp=Timestamp(
seconds=int(datetime.now().timestamp()), microseconds=0
),
offset=0,
negative_utc=False,
),
message=b"sample release message",
name=b"sample release",
synthetic=False,
target=hash_to_bytes(target),
target_type=target_type,
)
archive_data.release_add([sample_release])
new_release_id = hash_to_hex(sample_release.id)
url = reverse("api-1-release", url_args={"sha1_git": new_release_id})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(new_release_id)
if target_type == ObjectType.CONTENT:
url_args = {"q": "sha1_git:%s" % target}
else:
url_args = {"sha1_git": target} | )
expected_release["target_url"] = target_url
assert rv.data == expected_release
def test_api_release_not_found(api_client):
unknown_release_ = random_sha1()
url = reverse("api-1-release", url_args={"sha1_git": unknown_release_})
rv = check_api_get_responses(api_client, url, status_code=404)
assert rv.data == {
"exception": "NotFoundExc",
"reason": "Release with sha1_git %s not found." % unknown_release_,
}
@given(release())
def test_api_release_uppercase(api_client, release):
url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release.upper()}
)
resp = check_http_get_response(api_client, url, status_code=302)
redirect_url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release}
)
assert resp["location"] == redirect_url |
target_url = reverse(
"api-1-%s" % target_type.value, url_args=url_args, request=rv.wsgi_request | random_line_split |
test_release.py | # Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from datetime import datetime
from hypothesis import given
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.model import (
ObjectType,
Person,
Release,
Timestamp,
TimestampWithTimezone,
)
from swh.web.common.utils import reverse
from swh.web.tests.data import random_sha1
from swh.web.tests.strategies import content, directory, release
from swh.web.tests.utils import check_api_get_responses, check_http_get_response
@given(release())
def test_api_release(api_client, archive_data, release):
url = reverse("api-1-release", url_args={"sha1_git": release})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(release)
target_revision = expected_release["target"]
target_url = reverse(
"api-1-revision",
url_args={"sha1_git": target_revision},
request=rv.wsgi_request,
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
@given(content(), directory(), release())
def test_api_release_target_type_not_a_revision(
api_client, archive_data, content, directory, release
):
for target_type, target in (
(ObjectType.CONTENT, content),
(ObjectType.DIRECTORY, directory),
(ObjectType.RELEASE, release),
):
if target_type == ObjectType.CONTENT:
target = target["sha1_git"]
sample_release = Release(
author=Person(
email=b"author@company.org",
fullname=b"author <author@company.org>",
name=b"author",
),
date=TimestampWithTimezone(
timestamp=Timestamp(
seconds=int(datetime.now().timestamp()), microseconds=0
),
offset=0,
negative_utc=False,
),
message=b"sample release message",
name=b"sample release",
synthetic=False,
target=hash_to_bytes(target),
target_type=target_type,
)
archive_data.release_add([sample_release])
new_release_id = hash_to_hex(sample_release.id)
url = reverse("api-1-release", url_args={"sha1_git": new_release_id})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(new_release_id)
if target_type == ObjectType.CONTENT:
url_args = {"q": "sha1_git:%s" % target}
else:
url_args = {"sha1_git": target}
target_url = reverse(
"api-1-%s" % target_type.value, url_args=url_args, request=rv.wsgi_request
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
def test_api_release_not_found(api_client):
unknown_release_ = random_sha1()
url = reverse("api-1-release", url_args={"sha1_git": unknown_release_})
rv = check_api_get_responses(api_client, url, status_code=404)
assert rv.data == {
"exception": "NotFoundExc",
"reason": "Release with sha1_git %s not found." % unknown_release_,
}
@given(release())
def | (api_client, release):
url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release.upper()}
)
resp = check_http_get_response(api_client, url, status_code=302)
redirect_url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release}
)
assert resp["location"] == redirect_url
| test_api_release_uppercase | identifier_name |
test_release.py | # Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from datetime import datetime
from hypothesis import given
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.model import (
ObjectType,
Person,
Release,
Timestamp,
TimestampWithTimezone,
)
from swh.web.common.utils import reverse
from swh.web.tests.data import random_sha1
from swh.web.tests.strategies import content, directory, release
from swh.web.tests.utils import check_api_get_responses, check_http_get_response
@given(release())
def test_api_release(api_client, archive_data, release):
url = reverse("api-1-release", url_args={"sha1_git": release})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(release)
target_revision = expected_release["target"]
target_url = reverse(
"api-1-revision",
url_args={"sha1_git": target_revision},
request=rv.wsgi_request,
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
@given(content(), directory(), release())
def test_api_release_target_type_not_a_revision(
api_client, archive_data, content, directory, release
):
for target_type, target in (
(ObjectType.CONTENT, content),
(ObjectType.DIRECTORY, directory),
(ObjectType.RELEASE, release),
):
if target_type == ObjectType.CONTENT:
target = target["sha1_git"]
sample_release = Release(
author=Person(
email=b"author@company.org",
fullname=b"author <author@company.org>",
name=b"author",
),
date=TimestampWithTimezone(
timestamp=Timestamp(
seconds=int(datetime.now().timestamp()), microseconds=0
),
offset=0,
negative_utc=False,
),
message=b"sample release message",
name=b"sample release",
synthetic=False,
target=hash_to_bytes(target),
target_type=target_type,
)
archive_data.release_add([sample_release])
new_release_id = hash_to_hex(sample_release.id)
url = reverse("api-1-release", url_args={"sha1_git": new_release_id})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(new_release_id)
if target_type == ObjectType.CONTENT:
url_args = {"q": "sha1_git:%s" % target}
else:
url_args = {"sha1_git": target}
target_url = reverse(
"api-1-%s" % target_type.value, url_args=url_args, request=rv.wsgi_request
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
def test_api_release_not_found(api_client):
|
@given(release())
def test_api_release_uppercase(api_client, release):
url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release.upper()}
)
resp = check_http_get_response(api_client, url, status_code=302)
redirect_url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release}
)
assert resp["location"] == redirect_url
| unknown_release_ = random_sha1()
url = reverse("api-1-release", url_args={"sha1_git": unknown_release_})
rv = check_api_get_responses(api_client, url, status_code=404)
assert rv.data == {
"exception": "NotFoundExc",
"reason": "Release with sha1_git %s not found." % unknown_release_,
} | identifier_body |
test_release.py | # Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from datetime import datetime
from hypothesis import given
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.model import (
ObjectType,
Person,
Release,
Timestamp,
TimestampWithTimezone,
)
from swh.web.common.utils import reverse
from swh.web.tests.data import random_sha1
from swh.web.tests.strategies import content, directory, release
from swh.web.tests.utils import check_api_get_responses, check_http_get_response
@given(release())
def test_api_release(api_client, archive_data, release):
url = reverse("api-1-release", url_args={"sha1_git": release})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(release)
target_revision = expected_release["target"]
target_url = reverse(
"api-1-revision",
url_args={"sha1_git": target_revision},
request=rv.wsgi_request,
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
@given(content(), directory(), release())
def test_api_release_target_type_not_a_revision(
api_client, archive_data, content, directory, release
):
for target_type, target in (
(ObjectType.CONTENT, content),
(ObjectType.DIRECTORY, directory),
(ObjectType.RELEASE, release),
):
if target_type == ObjectType.CONTENT:
|
sample_release = Release(
author=Person(
email=b"author@company.org",
fullname=b"author <author@company.org>",
name=b"author",
),
date=TimestampWithTimezone(
timestamp=Timestamp(
seconds=int(datetime.now().timestamp()), microseconds=0
),
offset=0,
negative_utc=False,
),
message=b"sample release message",
name=b"sample release",
synthetic=False,
target=hash_to_bytes(target),
target_type=target_type,
)
archive_data.release_add([sample_release])
new_release_id = hash_to_hex(sample_release.id)
url = reverse("api-1-release", url_args={"sha1_git": new_release_id})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(new_release_id)
if target_type == ObjectType.CONTENT:
url_args = {"q": "sha1_git:%s" % target}
else:
url_args = {"sha1_git": target}
target_url = reverse(
"api-1-%s" % target_type.value, url_args=url_args, request=rv.wsgi_request
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
def test_api_release_not_found(api_client):
unknown_release_ = random_sha1()
url = reverse("api-1-release", url_args={"sha1_git": unknown_release_})
rv = check_api_get_responses(api_client, url, status_code=404)
assert rv.data == {
"exception": "NotFoundExc",
"reason": "Release with sha1_git %s not found." % unknown_release_,
}
@given(release())
def test_api_release_uppercase(api_client, release):
url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release.upper()}
)
resp = check_http_get_response(api_client, url, status_code=302)
redirect_url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release}
)
assert resp["location"] == redirect_url
| target = target["sha1_git"] | conditional_block |
win_unittest.py | import os
import sys
# OS Specifics
ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
BINARY_PATH = os.path.join(ABS_WORK_DIR, "firefox", "firefox.exe")
INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.zip")
XPCSHELL_NAME = 'xpcshell.exe'
EXE_SUFFIX = '.exe'
DISABLE_SCREEN_SAVER = False
ADJUST_MOUSE_AND_SCREEN = True
#####
config = {
"buildbot_json_path": "buildprops.json",
"exes": {
'python': sys.executable,
'virtualenv': [sys.executable, 'c:/mozilla-build/buildbotve/virtualenv.py'],
'hg': 'c:/mozilla-build/hg/hg',
'mozinstall': ['%s/build/venv/scripts/python' % os.getcwd(),
'%s/build/venv/scripts/mozinstall-script.py' % os.getcwd()],
},
###
"installer_path": INSTALLER_PATH,
"binary_path": BINARY_PATH,
"xpcshell_name": XPCSHELL_NAME,
"virtualenv_path": 'venv',
"virtualenv_python_dll": os.path.join(os.path.dirname(sys.executable), "python27.dll"),
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
"exe_suffix": EXE_SUFFIX,
"run_file_names": {
"mochitest": "runtests.py",
"webapprt": "runtests.py",
"reftest": "runreftest.py",
"xpcshell": "runxpcshelltests.py",
"cppunittest": "runcppunittests.py",
"jittest": "jit_test.py",
"mozbase": "test.py"
},
"minimum_tests_zip_dirs": ["bin/*", "certs/*", "modules/*", "mozbase/*", "config/*"],
"specific_tests_zip_dirs": {
"mochitest": ["mochitest/*"],
"webapprt": ["mochitest/*"],
"reftest": ["reftest/*", "jsreftest/*"],
"xpcshell": ["xpcshell/*"],
"cppunittest": ["cppunittests/*"],
"jittest": ["jit-test/*"],
"mozbase": ["mozbase/*"]
},
# test harness options are located in the gecko tree
"in_tree_config": "config/mozharness/windows_config.py",
# local mochi suites
"all_mochitest_suites":
{
"plain1": ["--total-chunks=5", "--this-chunk=1", "--chunk-by-dir=4"],
"plain2": ["--total-chunks=5", "--this-chunk=2", "--chunk-by-dir=4"],
"plain3": ["--total-chunks=5", "--this-chunk=3", "--chunk-by-dir=4"],
"plain4": ["--total-chunks=5", "--this-chunk=4", "--chunk-by-dir=4"],
"plain5": ["--total-chunks=5", "--this-chunk=5", "--chunk-by-dir=4"],
"plain": [],
"plain-chunked": ["--chunk-by-dir=4"],
"chrome": ["--chrome"],
"browser-chrome": ["--browser-chrome"],
"browser-chrome-1": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=1"],
"browser-chrome-2": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=2"],
"browser-chrome-3": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=3"],
"browser-chrome-chunked": ["--browser-chrome", "--chunk-by-dir=5"],
"mochitest-gl": ["--manifest=tests/mochitest/tests/dom/canvas/test/mochitest-subsuite-webgl.ini"],
"mochitest-devtools-chrome": ["--browser-chrome", "--subsuite=devtools"],
"mochitest-devtools-chrome-1": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=1"],
"mochitest-devtools-chrome-2": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=2"],
"mochitest-devtools-chrome-3": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=3"],
"mochitest-devtools-chrome-chunked": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5"],
"mochitest-metro-chrome": ["--browser-chrome", "--metro-immersive"],
"jetpack-package": ["--jetpack-package"],
"jetpack-addon": ["--jetpack-addon"],
"a11y": ["--a11y"],
"plugins": ['--setpref=dom.ipc.plugins.enabled=false',
'--setpref=dom.ipc.plugins.enabled.x86_64=false',
'--ipcplugins']
},
# local webapprt suites
"all_webapprt_suites": {
"chrome": ["--webapprt-chrome", "--browser-arg=-test-mode"],
"content": ["--webapprt-content"]
},
# local reftest suites
"all_reftest_suites": {
"reftest": ["tests/reftest/tests/layout/reftests/reftest.list"], | "reftest-ipc": ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/layout/reftests/reftest-sanity/reftest.list'],
"reftest-no-accel": ["--setpref=gfx.direct2d.disabled=true", "--setpref=layers.acceleration.disabled=true",
"tests/reftest/tests/layout/reftests/reftest.list"],
"reftest-omtc": ["--setpref=layers.offmainthreadcomposition.enabled=true",
"tests/reftest/tests/layout/reftests/reftest.list"],
"crashtest-ipc": ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/testing/crashtest/crashtests.list'],
},
"all_xpcshell_suites": {
"xpcshell": ["--manifest=tests/xpcshell/tests/all-test-dirs.list",
"%(abs_app_dir)s/" + XPCSHELL_NAME]
},
"all_cppunittest_suites": {
"cppunittest": ['tests/cppunittests']
},
"all_jittest_suites": {
"jittest": []
},
"all_mozbase_suites": {
"mozbase": []
},
"run_cmd_checks_enabled": True,
"preflight_run_cmd_suites": [
# NOTE 'enabled' is only here while we have unconsolidated configs
{
"name": "disable_screen_saver",
"cmd": ["xset", "s", "off", "s", "reset"],
"architectures": ["32bit", "64bit"],
"halt_on_failure": False,
"enabled": DISABLE_SCREEN_SAVER
},
{
"name": "run mouse & screen adjustment script",
"cmd": [
# when configs are consolidated this python path will only show
# for windows.
sys.executable,
"../scripts/external_tools/mouse_and_screen_resolution.py",
"--configuration-url",
"https://hg.mozilla.org/%(repo_path)s/raw-file/%(revision)s/" +
"testing/machine-configuration.json"],
"architectures": ["32bit"],
"halt_on_failure": True,
"enabled": ADJUST_MOUSE_AND_SCREEN
},
],
"repos": [{"repo": "https://hg.mozilla.org/build/tools"}],
"vcs_output_timeout": 1000,
"minidump_stackwalk_path": "%(abs_work_dir)s/tools/breakpad/win32/minidump_stackwalk.exe",
"minidump_save_path": "%(abs_work_dir)s/../minidumps",
"buildbot_max_log_size": 52428800,
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file": os.path.join(os.getcwd(), "oauth.txt"),
} | "crashtest": ["tests/reftest/tests/testing/crashtest/crashtests.list"],
"jsreftest": ["--extra-profile-file=tests/jsreftest/tests/user.js", "tests/jsreftest/tests/jstests.list"], | random_line_split |
attendances.js | 'use strict';
/**
* Module dependencies.
*/
var mongoose = require('mongoose'),
CurrentModel = mongoose.model('Attendance'),
Schedule = mongoose.model('Schedule'),
Group = mongoose.model('Group'),
_ = require('lodash');
exports.attendance = function(req, res, next, id) {
CurrentModel.load(id, function(err, item) {
if (err) return next(err);
if (!item) return next(new Error('Failed to load item ' + id));
req.attendance = item;
next();
});
};
exports.schedule = function(req, res, next, id) {
Schedule.load(id, function(err, item) {
if (err) return next(err);
if (!item) return next(new Error('Failed to load item ' + id));
req.schedule = item;
next();
});
};
exports.group = function(req, res, next, id) {
Group.load(id, function(err, item) {
if (err) return next(err);
if (!item) return next(new Error('Failed to load item ' + id));
req.group = item;
next();
});
};
exports.create = function(req, res) {
var value = new CurrentModel(req.body);
value.group = req.group;
value.schedule = req.schedule;
value.save(function(err) {
if (err) {
return res.send('users/signup', {
errors: err.errors,
object: value
});
} else {
res.jsonp(value);
}
});
};
exports.update = function(req, res) {
var item = req.attendance;
item = _.extend(item, req.body);
item.save(function(err) {
if (err) {
return res.send('users/signup', {
errors: err.errors,
object: item
});
} else {
res.jsonp(item);
}
});
};
exports.destroy = function(req, res) {
var item = req.attendance;
item.remove(function(err) { | if (err) {
return res.send('users/signup', {
errors: err.errors,
object: item
});
} else {
res.jsonp(item);
}
});
};
exports.show = function(req, res) {
res.jsonp(req.attendance);
};
exports.all = function(req, res) {
CurrentModel.find({ group: req.group, schedule: req.schedule }).populate('participant', 'name email').exec(function(err, items) {
if (err) {
res.render('error', {
status: 500
});
} else {
res.jsonp(items);
}
});
}; | random_line_split | |
test_check.py | """Tests for distutils.command.check."""
import os
import textwrap
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
HERE = os.path.dirname(__file__)
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with non-ASCII characters
metadata = {'url': 'xxx', 'author': '\u00c9ric',
'author_email': 'xxx', 'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
|
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0)
else:
self.assertEqual(len(msgs), 1)
self.assertEqual(
str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0) | identifier_body |
test_check.py | """Tests for distutils.command.check."""
import os
import textwrap
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
HERE = os.path.dirname(__file__)
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def | (self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with non-ASCII characters
metadata = {'url': 'xxx', 'author': '\u00c9ric',
'author_email': 'xxx', 'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0)
else:
self.assertEqual(len(msgs), 1)
self.assertEqual(
str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| test_check_metadata | identifier_name |
test_check.py | """Tests for distutils.command.check."""
import os
import textwrap
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
HERE = os.path.dirname(__file__)
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with non-ASCII characters
metadata = {'url': 'xxx', 'author': '\u00c9ric',
'author_email': 'xxx', 'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
|
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0)
else:
self.assertEqual(len(msgs), 1)
self.assertEqual(
str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
) | conditional_block |
test_check.py | """Tests for distutils.command.check."""
import os
import textwrap
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
HERE = os.path.dirname(__file__)
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with non-ASCII characters
metadata = {'url': 'xxx', 'author': '\u00c9ric',
'author_email': 'xxx', 'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0) | str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite()) | else:
self.assertEqual(len(msgs), 1)
self.assertEqual( | random_line_split |
serverError.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import type * as Proto from '../protocol';
import { TypeScriptVersion } from './versionProvider';
export class TypeScriptServerError extends Error {
public static create(
serverId: string,
version: TypeScriptVersion,
response: Proto.Response
): TypeScriptServerError {
const parsedResult = TypeScriptServerError.parseErrorText(response);
return new TypeScriptServerError(serverId, version, response, parsedResult?.message, parsedResult?.stack, parsedResult?.sanitizedStack);
}
private constructor(
public readonly serverId: string,
public readonly version: TypeScriptVersion,
private readonly response: Proto.Response,
public readonly serverMessage: string | undefined,
public readonly serverStack: string | undefined,
private readonly sanitizedStack: string | undefined
) {
super(`<${serverId}> TypeScript Server Error (${version.displayName})\n${serverMessage}\n${serverStack}`);
}
public get serverErrorText() { return this.response.message; }
public get serverCommand() { return this.response.command; }
public get telemetry() {
// The "sanitizedstack" has been purged of error messages, paths, and file names (other than tsserver)
// and, thus, can be classified as SystemMetaData, rather than CallstackOrException.
/* __GDPR__FRAGMENT__
"TypeScriptRequestErrorProperties" : {
"command" : { "classification": "SystemMetaData", "purpose": "FeatureInsight" },
"serverid" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"sanitizedstack" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"badclient" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" }
}
*/
return {
command: this.serverCommand,
serverid: this.serverId,
sanitizedstack: this.sanitizedStack || '',
badclient: /\bBADCLIENT\b/.test(this.stack || ''),
} as const;
}
/**
* Given a `errorText` from a tsserver request indicating failure in handling a request,
* prepares a payload for telemetry-logging.
*/
private static parseErrorText(response: Proto.Response) {
const errorText = response.message;
if (errorText) {
const errorPrefix = 'Error processing request. ';
if (errorText.startsWith(errorPrefix)) |
}
return undefined;
}
/**
* Drop everything but ".js" and line/column numbers (though retain "tsserver" if that's the filename).
*/
private static sanitizeStack(message: string | undefined) {
if (!message) {
return '';
}
const regex = /(\btsserver)?(\.(?:ts|tsx|js|jsx)(?::\d+(?::\d+)?)?)\)?$/igm;
let serverStack = '';
while (true) {
const match = regex.exec(message);
if (!match) {
break;
}
// [1] is 'tsserver' or undefined
// [2] is '.js:{line_number}:{column_number}'
serverStack += `${match[1] || 'suppressed'}${match[2]}\n`;
}
return serverStack;
}
}
| {
const prefixFreeErrorText = errorText.substr(errorPrefix.length);
const newlineIndex = prefixFreeErrorText.indexOf('\n');
if (newlineIndex >= 0) {
// Newline expected between message and stack.
const stack = prefixFreeErrorText.substring(newlineIndex + 1);
return {
message: prefixFreeErrorText.substring(0, newlineIndex),
stack,
sanitizedStack: TypeScriptServerError.sanitizeStack(stack)
};
}
} | conditional_block |
serverError.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import type * as Proto from '../protocol';
import { TypeScriptVersion } from './versionProvider';
export class TypeScriptServerError extends Error {
public static create(
serverId: string,
version: TypeScriptVersion,
response: Proto.Response
): TypeScriptServerError {
const parsedResult = TypeScriptServerError.parseErrorText(response); |
private constructor(
public readonly serverId: string,
public readonly version: TypeScriptVersion,
private readonly response: Proto.Response,
public readonly serverMessage: string | undefined,
public readonly serverStack: string | undefined,
private readonly sanitizedStack: string | undefined
) {
super(`<${serverId}> TypeScript Server Error (${version.displayName})\n${serverMessage}\n${serverStack}`);
}
public get serverErrorText() { return this.response.message; }
public get serverCommand() { return this.response.command; }
public get telemetry() {
// The "sanitizedstack" has been purged of error messages, paths, and file names (other than tsserver)
// and, thus, can be classified as SystemMetaData, rather than CallstackOrException.
/* __GDPR__FRAGMENT__
"TypeScriptRequestErrorProperties" : {
"command" : { "classification": "SystemMetaData", "purpose": "FeatureInsight" },
"serverid" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"sanitizedstack" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"badclient" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" }
}
*/
return {
command: this.serverCommand,
serverid: this.serverId,
sanitizedstack: this.sanitizedStack || '',
badclient: /\bBADCLIENT\b/.test(this.stack || ''),
} as const;
}
/**
* Given a `errorText` from a tsserver request indicating failure in handling a request,
* prepares a payload for telemetry-logging.
*/
private static parseErrorText(response: Proto.Response) {
const errorText = response.message;
if (errorText) {
const errorPrefix = 'Error processing request. ';
if (errorText.startsWith(errorPrefix)) {
const prefixFreeErrorText = errorText.substr(errorPrefix.length);
const newlineIndex = prefixFreeErrorText.indexOf('\n');
if (newlineIndex >= 0) {
// Newline expected between message and stack.
const stack = prefixFreeErrorText.substring(newlineIndex + 1);
return {
message: prefixFreeErrorText.substring(0, newlineIndex),
stack,
sanitizedStack: TypeScriptServerError.sanitizeStack(stack)
};
}
}
}
return undefined;
}
/**
* Drop everything but ".js" and line/column numbers (though retain "tsserver" if that's the filename).
*/
private static sanitizeStack(message: string | undefined) {
if (!message) {
return '';
}
const regex = /(\btsserver)?(\.(?:ts|tsx|js|jsx)(?::\d+(?::\d+)?)?)\)?$/igm;
let serverStack = '';
while (true) {
const match = regex.exec(message);
if (!match) {
break;
}
// [1] is 'tsserver' or undefined
// [2] is '.js:{line_number}:{column_number}'
serverStack += `${match[1] || 'suppressed'}${match[2]}\n`;
}
return serverStack;
}
} | return new TypeScriptServerError(serverId, version, response, parsedResult?.message, parsedResult?.stack, parsedResult?.sanitizedStack);
} | random_line_split |
serverError.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import type * as Proto from '../protocol';
import { TypeScriptVersion } from './versionProvider';
export class TypeScriptServerError extends Error {
public static create(
serverId: string,
version: TypeScriptVersion,
response: Proto.Response
): TypeScriptServerError {
const parsedResult = TypeScriptServerError.parseErrorText(response);
return new TypeScriptServerError(serverId, version, response, parsedResult?.message, parsedResult?.stack, parsedResult?.sanitizedStack);
}
private constructor(
public readonly serverId: string,
public readonly version: TypeScriptVersion,
private readonly response: Proto.Response,
public readonly serverMessage: string | undefined,
public readonly serverStack: string | undefined,
private readonly sanitizedStack: string | undefined
) {
super(`<${serverId}> TypeScript Server Error (${version.displayName})\n${serverMessage}\n${serverStack}`);
}
public get | () { return this.response.message; }
public get serverCommand() { return this.response.command; }
public get telemetry() {
// The "sanitizedstack" has been purged of error messages, paths, and file names (other than tsserver)
// and, thus, can be classified as SystemMetaData, rather than CallstackOrException.
/* __GDPR__FRAGMENT__
"TypeScriptRequestErrorProperties" : {
"command" : { "classification": "SystemMetaData", "purpose": "FeatureInsight" },
"serverid" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"sanitizedstack" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"badclient" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" }
}
*/
return {
command: this.serverCommand,
serverid: this.serverId,
sanitizedstack: this.sanitizedStack || '',
badclient: /\bBADCLIENT\b/.test(this.stack || ''),
} as const;
}
/**
* Given a `errorText` from a tsserver request indicating failure in handling a request,
* prepares a payload for telemetry-logging.
*/
private static parseErrorText(response: Proto.Response) {
const errorText = response.message;
if (errorText) {
const errorPrefix = 'Error processing request. ';
if (errorText.startsWith(errorPrefix)) {
const prefixFreeErrorText = errorText.substr(errorPrefix.length);
const newlineIndex = prefixFreeErrorText.indexOf('\n');
if (newlineIndex >= 0) {
// Newline expected between message and stack.
const stack = prefixFreeErrorText.substring(newlineIndex + 1);
return {
message: prefixFreeErrorText.substring(0, newlineIndex),
stack,
sanitizedStack: TypeScriptServerError.sanitizeStack(stack)
};
}
}
}
return undefined;
}
/**
* Drop everything but ".js" and line/column numbers (though retain "tsserver" if that's the filename).
*/
private static sanitizeStack(message: string | undefined) {
if (!message) {
return '';
}
const regex = /(\btsserver)?(\.(?:ts|tsx|js|jsx)(?::\d+(?::\d+)?)?)\)?$/igm;
let serverStack = '';
while (true) {
const match = regex.exec(message);
if (!match) {
break;
}
// [1] is 'tsserver' or undefined
// [2] is '.js:{line_number}:{column_number}'
serverStack += `${match[1] || 'suppressed'}${match[2]}\n`;
}
return serverStack;
}
}
| serverErrorText | identifier_name |
serverError.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import type * as Proto from '../protocol';
import { TypeScriptVersion } from './versionProvider';
export class TypeScriptServerError extends Error {
public static create(
serverId: string,
version: TypeScriptVersion,
response: Proto.Response
): TypeScriptServerError {
const parsedResult = TypeScriptServerError.parseErrorText(response);
return new TypeScriptServerError(serverId, version, response, parsedResult?.message, parsedResult?.stack, parsedResult?.sanitizedStack);
}
private constructor(
public readonly serverId: string,
public readonly version: TypeScriptVersion,
private readonly response: Proto.Response,
public readonly serverMessage: string | undefined,
public readonly serverStack: string | undefined,
private readonly sanitizedStack: string | undefined
) {
super(`<${serverId}> TypeScript Server Error (${version.displayName})\n${serverMessage}\n${serverStack}`);
}
public get serverErrorText() |
public get serverCommand() { return this.response.command; }
public get telemetry() {
// The "sanitizedstack" has been purged of error messages, paths, and file names (other than tsserver)
// and, thus, can be classified as SystemMetaData, rather than CallstackOrException.
/* __GDPR__FRAGMENT__
"TypeScriptRequestErrorProperties" : {
"command" : { "classification": "SystemMetaData", "purpose": "FeatureInsight" },
"serverid" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"sanitizedstack" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" },
"badclient" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" }
}
*/
return {
command: this.serverCommand,
serverid: this.serverId,
sanitizedstack: this.sanitizedStack || '',
badclient: /\bBADCLIENT\b/.test(this.stack || ''),
} as const;
}
/**
* Given a `errorText` from a tsserver request indicating failure in handling a request,
* prepares a payload for telemetry-logging.
*/
private static parseErrorText(response: Proto.Response) {
const errorText = response.message;
if (errorText) {
const errorPrefix = 'Error processing request. ';
if (errorText.startsWith(errorPrefix)) {
const prefixFreeErrorText = errorText.substr(errorPrefix.length);
const newlineIndex = prefixFreeErrorText.indexOf('\n');
if (newlineIndex >= 0) {
// Newline expected between message and stack.
const stack = prefixFreeErrorText.substring(newlineIndex + 1);
return {
message: prefixFreeErrorText.substring(0, newlineIndex),
stack,
sanitizedStack: TypeScriptServerError.sanitizeStack(stack)
};
}
}
}
return undefined;
}
/**
* Drop everything but ".js" and line/column numbers (though retain "tsserver" if that's the filename).
*/
private static sanitizeStack(message: string | undefined) {
if (!message) {
return '';
}
const regex = /(\btsserver)?(\.(?:ts|tsx|js|jsx)(?::\d+(?::\d+)?)?)\)?$/igm;
let serverStack = '';
while (true) {
const match = regex.exec(message);
if (!match) {
break;
}
// [1] is 'tsserver' or undefined
// [2] is '.js:{line_number}:{column_number}'
serverStack += `${match[1] || 'suppressed'}${match[2]}\n`;
}
return serverStack;
}
}
| { return this.response.message; } | identifier_body |
alerts.service.ts | import { Injectable } from '@angular/core';
import { TranslateService } from '@ngx-translate/core';
import { Observable, Subject } from 'rxjs';
import { AlertType, Alert } from './alerts.model';
@Injectable({
providedIn: 'root',
})
export class AlertsService {
private subject = new Subject<Alert>();
constructor(private translate: TranslateService) |
getAlert(): Observable<Alert> {
return this.subject.asObservable();
}
success(translationKey: string, values?: object): void {
this.alert(AlertType.SUCCESS, translationKey, values);
}
warn(translationKey: string, values?: object): void {
this.alert(AlertType.WARNING, translationKey, values);
}
error(translationKey: string, values?: object): void {
this.alert(AlertType.DANGER, translationKey, values);
}
info(translationKey: string, values?: object): void {
this.alert(AlertType.INFO, translationKey, values);
}
private alert(type: string, translationKey: string, values?: object): void {
const message = this.translate.instant(translationKey, values);
this.subject.next({ type, message } as Alert);
}
clear(): void {
this.subject.next();
}
}
| {} | identifier_body |
alerts.service.ts | import { Injectable } from '@angular/core';
import { TranslateService } from '@ngx-translate/core';
import { Observable, Subject } from 'rxjs';
import { AlertType, Alert } from './alerts.model';
@Injectable({
providedIn: 'root',
})
export class AlertsService {
private subject = new Subject<Alert>();
constructor(private translate: TranslateService) {}
getAlert(): Observable<Alert> {
return this.subject.asObservable();
}
success(translationKey: string, values?: object): void {
this.alert(AlertType.SUCCESS, translationKey, values);
}
warn(translationKey: string, values?: object): void {
this.alert(AlertType.WARNING, translationKey, values);
}
error(translationKey: string, values?: object): void {
this.alert(AlertType.DANGER, translationKey, values);
}
| (translationKey: string, values?: object): void {
this.alert(AlertType.INFO, translationKey, values);
}
private alert(type: string, translationKey: string, values?: object): void {
const message = this.translate.instant(translationKey, values);
this.subject.next({ type, message } as Alert);
}
clear(): void {
this.subject.next();
}
}
| info | identifier_name |
alerts.service.ts | import { Injectable } from '@angular/core';
import { TranslateService } from '@ngx-translate/core';
import { Observable, Subject } from 'rxjs';
import { AlertType, Alert } from './alerts.model';
@Injectable({
providedIn: 'root', |
getAlert(): Observable<Alert> {
return this.subject.asObservable();
}
success(translationKey: string, values?: object): void {
this.alert(AlertType.SUCCESS, translationKey, values);
}
warn(translationKey: string, values?: object): void {
this.alert(AlertType.WARNING, translationKey, values);
}
error(translationKey: string, values?: object): void {
this.alert(AlertType.DANGER, translationKey, values);
}
info(translationKey: string, values?: object): void {
this.alert(AlertType.INFO, translationKey, values);
}
private alert(type: string, translationKey: string, values?: object): void {
const message = this.translate.instant(translationKey, values);
this.subject.next({ type, message } as Alert);
}
clear(): void {
this.subject.next();
}
} | })
export class AlertsService {
private subject = new Subject<Alert>();
constructor(private translate: TranslateService) {} | random_line_split |
carrera_fecha.controller.ts | import { CarreraFecha } from '../domain/carrera_fecha.model';
import _db from './persistence/db.repository';
import { DateCareerStorageService } from './services/storage/carrera_fecha.storage';
import { DateCareerReadingService } from './services/reading/carrera_fecha.reading';
const createCarreraFecha = async (req, res) => {
try {
const cf_service = new DateCareerStorageService(_db);
const carrera_fecha_model = new CarreraFecha(req.body.fecha, req.body.descripcion, req.body.titulo);
const carrera_fecha = await cf_service.save(carrera_fecha_model);
return res.json(carrera_fecha_model);
} catch (e) {
console.log(e);
return res.json(e);
}
}
const getCarreraFecha = async (req, res) => {
try {
const cf_service = new DateCareerReadingService(_db);
const date_careers = await cf_service.getAllDateCareers();
return res.json(date_careers);
} catch (e) {
return res.json(e);
}
}
const getCarreraFechaMes = async (req, res) => {
try {
const cf_service = new DateCareerReadingService(_db);
const date_careers = await cf_service.getAllDateCareers();
const month = req.body.month - 1;
const filteredCareers = date_careers.filter((date_career) => ((new Date(date_career.fecha)).getMonth() == month))
return res.json(filteredCareers);
} catch (e) {
return res.json(e);
}
}
|
export default { createCarreraFecha, getCarreraFecha, getCarreraFechaMes }; | random_line_split | |
win_pkg.py | # -*- encoding: utf-8 -*-
'''
:maintainer: HubbleStack
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import fnmatch
import logging
import salt.utils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
__virtualname__ = 'win_pkg'
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
try:
__pkgdata__ = __salt__['pkg.list_pkgs']()
except CommandExecutionError:
__salt__['pkg.refresh_db']()
__pkgdata__ = __salt__['pkg.list_pkgs']()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('package audit __data__:')
log.debug(__data__)
log.debug('package audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __pkgdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Blacklisted package '{0}' is installed " \
"on the system".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __pkgdata__:
audit_value = __pkgdata__[name]
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite" \
" package '{2}' is not installed on" \
" the system".format(match_output,
tag_data['value_type'],
name)
ret['Failure'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite package" \
" '{2}' is not installed on the system" \
.format(match_output, tag_data['value_type'], name)
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _translate_value_type(current, value, evaluator):
if 'equal' in value.lower() and LooseVersion(current) == LooseVersion(evaluator):
return True
if 'less' in value.lower() and LooseVersion(current) <= LooseVersion(evaluator):
|
if 'more' in value.lower() and LooseVersion(current) >= LooseVersion(evaluator):
return True
return False
| return True | conditional_block |
win_pkg.py | # -*- encoding: utf-8 -*-
'''
:maintainer: HubbleStack
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import fnmatch
import logging
import salt.utils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
__virtualname__ = 'win_pkg'
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
try:
__pkgdata__ = __salt__['pkg.list_pkgs']()
except CommandExecutionError:
__salt__['pkg.refresh_db']()
__pkgdata__ = __salt__['pkg.list_pkgs']()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('package audit __data__:')
log.debug(__data__)
log.debug('package audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __pkgdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Blacklisted package '{0}' is installed " \
"on the system".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __pkgdata__:
audit_value = __pkgdata__[name]
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite" \
" package '{2}' is not installed on" \
" the system".format(match_output,
tag_data['value_type'],
name)
ret['Failure'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite package" \
" '{2}' is not installed on the system" \
.format(match_output, tag_data['value_type'], name)
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _translate_value_type(current, value, evaluator):
| if 'equal' in value.lower() and LooseVersion(current) == LooseVersion(evaluator):
return True
if 'less' in value.lower() and LooseVersion(current) <= LooseVersion(evaluator):
return True
if 'more' in value.lower() and LooseVersion(current) >= LooseVersion(evaluator):
return True
return False | identifier_body | |
win_pkg.py | # -*- encoding: utf-8 -*-
'''
:maintainer: HubbleStack
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import fnmatch
import logging
import salt.utils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
__virtualname__ = 'win_pkg'
def | ():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
try:
__pkgdata__ = __salt__['pkg.list_pkgs']()
except CommandExecutionError:
__salt__['pkg.refresh_db']()
__pkgdata__ = __salt__['pkg.list_pkgs']()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('package audit __data__:')
log.debug(__data__)
log.debug('package audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __pkgdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Blacklisted package '{0}' is installed " \
"on the system".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __pkgdata__:
audit_value = __pkgdata__[name]
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite" \
" package '{2}' is not installed on" \
" the system".format(match_output,
tag_data['value_type'],
name)
ret['Failure'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite package" \
" '{2}' is not installed on the system" \
.format(match_output, tag_data['value_type'], name)
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _translate_value_type(current, value, evaluator):
if 'equal' in value.lower() and LooseVersion(current) == LooseVersion(evaluator):
return True
if 'less' in value.lower() and LooseVersion(current) <= LooseVersion(evaluator):
return True
if 'more' in value.lower() and LooseVersion(current) >= LooseVersion(evaluator):
return True
return False
| __virtual__ | identifier_name |
win_pkg.py | # -*- encoding: utf-8 -*-
'''
:maintainer: HubbleStack
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import fnmatch
import logging
import salt.utils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
__virtualname__ = 'win_pkg'
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
try:
__pkgdata__ = __salt__['pkg.list_pkgs']()
except CommandExecutionError:
__salt__['pkg.refresh_db']()
__pkgdata__ = __salt__['pkg.list_pkgs']()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('package audit __data__:')
log.debug(__data__)
log.debug('package audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data) | audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __pkgdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Blacklisted package '{0}' is installed " \
"on the system".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __pkgdata__:
audit_value = __pkgdata__[name]
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite" \
" package '{2}' is not installed on" \
" the system".format(match_output,
tag_data['value_type'],
name)
ret['Failure'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite package" \
" '{2}' is not installed on the system" \
.format(match_output, tag_data['value_type'], name)
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _translate_value_type(current, value, evaluator):
if 'equal' in value.lower() and LooseVersion(current) == LooseVersion(evaluator):
return True
if 'less' in value.lower() and LooseVersion(current) <= LooseVersion(evaluator):
return True
if 'more' in value.lower() and LooseVersion(current) >= LooseVersion(evaluator):
return True
return False | continue
name = tag_data['name'] | random_line_split |
wrapper.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Wrapper definitions on top of Gecko types in order to be used in the style
//! system.
//!
//! This really follows the Servo pattern in
//! `components/script/layout_wrapper.rs`.
//!
//! This theoretically should live in its own crate, but now it lives in the
//! style system it's kind of pointless in the Stylo case, and only Servo forces
//! the separation between the style system implementation and everything else.
use atomic_refcell::AtomicRefCell;
use data::ElementData;
use dom::{LayoutIterator, NodeInfo, TElement, TNode, UnsafeNode};
use dom::{OpaqueNode, PresentationalHintsSynthetizer};
use element_state::ElementState;
use error_reporting::StdoutErrorReporter;
use gecko::selector_parser::{SelectorImpl, NonTSPseudoClass, PseudoElement};
use gecko::snapshot_helpers;
use gecko_bindings::bindings;
use gecko_bindings::bindings::{Gecko_DropStyleChildrenIterator, Gecko_MaybeCreateStyleChildrenIterator};
use gecko_bindings::bindings::{Gecko_ElementState, Gecko_GetLastChild, Gecko_GetNextStyleChild};
use gecko_bindings::bindings::{Gecko_GetServoDeclarationBlock, Gecko_IsHTMLElementInHTMLDocument};
use gecko_bindings::bindings::{Gecko_IsLink, Gecko_IsRootElement, Gecko_MatchesElement};
use gecko_bindings::bindings::{Gecko_IsUnvisitedLink, Gecko_IsVisitedLink, Gecko_Namespace};
use gecko_bindings::bindings::{Gecko_SetNodeFlags, Gecko_UnsetNodeFlags};
use gecko_bindings::bindings::Gecko_ClassOrClassList;
use gecko_bindings::bindings::Gecko_GetStyleContext;
use gecko_bindings::structs;
use gecko_bindings::structs::{RawGeckoElement, RawGeckoNode};
use gecko_bindings::structs::{nsIAtom, nsIContent, nsStyleContext};
use gecko_bindings::structs::NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE;
use parking_lot::RwLock;
use parser::ParserContextExtraData;
use properties::{ComputedValues, parse_style_attribute};
use properties::PropertyDeclarationBlock;
use selector_parser::{ElementExt, Snapshot};
use selectors::Element;
use selectors::parser::{AttrSelector, NamespaceConstraint};
use servo_url::ServoUrl;
use sink::Push;
use std::fmt;
use std::ptr;
use std::sync::Arc;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
use stylist::ApplicableDeclarationBlock;
/// A simple wrapper over a non-null Gecko node (`nsINode`) pointer.
///
/// Important: We don't currently refcount the DOM, because the wrapper lifetime
/// magic guarantees that our LayoutFoo references won't outlive the root, and
/// we don't mutate any of the references on the Gecko side during restyle.
///
/// We could implement refcounting if need be (at a potentially non-trivial
/// performance cost) by implementing Drop and making LayoutFoo non-Copy.
#[derive(Clone, Copy)]
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
fn is_element(&self) -> bool {
use gecko_bindings::structs::nsINode_BooleanFlag;
self.0.mBoolFlags & (1u32 << nsINode_BooleanFlag::NodeIsElement as u32) != 0
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn to_unsafe(&self) -> UnsafeNode {
(self.0 as *const _ as usize, 0)
}
unsafe fn from_unsafe(n: &UnsafeNode) -> Self {
GeckoNode(&*(n.0 as *mut RawGeckoNode))
}
fn children(self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
let maybe_iter = unsafe { Gecko_MaybeCreateStyleChildrenIterator(self.0) };
if let Some(iter) = maybe_iter.into_owned_opt() {
LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter))
} else {
LayoutIterator(GeckoChildrenIterator::Current(self.first_child()))
}
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn parent_node(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentNode(self.0).map(GeckoNode) }
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
fn needs_dirty_on_viewport_size_changed(&self) -> bool {
// Gecko's node doesn't have the DIRTY_ON_VIEWPORT_SIZE_CHANGE flag,
// so we force them to be dirtied on viewport size change, regardless if
// they use viewport percentage size or not.
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
true
}
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
unsafe fn set_dirty_on_viewport_size_changed(&self) {}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(bindings::StyleChildrenIteratorOwned),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref it) = *self {
unsafe {
Gecko_DropStyleChildrenIterator(ptr::read(it as *const _));
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
Gecko_GetNextStyleChild(it).map(GeckoNode)
}
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "<{}", self.get_local_name()));
if let Some(id) = self.get_id() {
try!(write!(f, " id={}", id));
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute(value: &str) -> PropertyDeclarationBlock {
// FIXME(bholley): Real base URL and error reporter.
let base_url = &*DUMMY_BASE_URL;
// FIXME(heycam): Needs real ParserContextExtraData so that URLs parse
// properly.
let extra_data = ParserContextExtraData::default();
parse_style_attribute(value, &base_url, Box::new(StdoutErrorReporter), extra_data)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Clear the element data for a given element.
pub fn clear_data(&self) {
let ptr = self.0.mServoData.get();
if !ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
/// Ensures the element has data, returning the existing data or allocating
/// it.
///
/// Only safe to call with exclusive access to the element, given otherwise
/// it could race to allocate and leak.
pub unsafe fn ensure_data(&self) -> &AtomicRefCell<ElementData> {
match self.get_data() {
Some(x) => x,
None => {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::new(None))));
self.0.mServoData.set(ptr);
unsafe { &* ptr }
},
}
}
/// Creates a blank snapshot for this element.
pub fn create_snapshot(&self) -> Snapshot {
Snapshot::new(*self)
}
}
lazy_static! {
/// A dummy base url in order to get it where we don't have any available.
///
/// We need to get rid of this sooner than later.
pub static ref DUMMY_BASE_URL: ServoUrl = {
ServoUrl::parse("http://www.example.org").unwrap()
};
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetServoDeclarationBlock(self.0) };
declarations.map(|s| s.as_arc_opt()).unwrap_or(None)
}
fn get_state(&self) -> ElementState {
unsafe {
ElementState::from_bits_truncate(Gecko_ElementState(self.0))
}
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
namespace.0.as_ptr(),
attr.as_ptr())
}
}
#[inline]
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, val: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
namespace.0.as_ptr(),
attr.as_ptr(),
val.as_ptr(),
/* ignoreCase = */ false)
}
}
fn existing_style_for_restyle_damage<'a>(&'a self,
current_cv: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a nsStyleContext> {
if current_cv.is_none() {
// Don't bother in doing an ffi call to get null back.
return None;
}
unsafe {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr())
.unwrap_or(ptr::null_mut());
let context_ptr = Gecko_GetStyleContext(self.as_node().0, atom_ptr);
context_ptr.as_ref()
}
}
fn has_dirty_descendants(&self) -> bool {
self.flags() & (NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_dirty_descendants(&self) {
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
fn skip_root_and_item_based_display_fixup(&self) -> bool {
// We don't want to fix up display values of native anonymous content.
// Additionally, we want to skip root-based display fixup for document
// level native anonymous content subtree roots, since they're not
// really roots from the style fixup perspective. Checking that we
// are NAC handles both cases.
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) != 0
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> PresentationalHintsSynthetizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, _hints: &mut V)
where V: Push<ApplicableDeclarationBlock>,
{
// FIXME(bholley) - Need to implement this.
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
fn parent_element(&self) -> Option<Self> {
| fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
// XXX(emilio): Implement this properly.
false
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName.raw())
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
match pseudo_class {
// https://github.com/servo/servo/issues/8718
NonTSPseudoClass::AnyLink => unsafe { Gecko_IsLink(self.0) },
NonTSPseudoClass::Link => unsafe { Gecko_IsUnvisitedLink(self.0) },
NonTSPseudoClass::Visited => unsafe { Gecko_IsVisitedLink(self.0) },
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus |
NonTSPseudoClass::Hover |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::ReadWrite |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::Indeterminate => {
self.get_state().contains(pseudo_class.state_flag())
},
NonTSPseudoClass::ReadOnly => {
!self.get_state().contains(pseudo_class.state_flag())
}
NonTSPseudoClass::MozBrowserFrame => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
}
}
}
fn get_id(&self) -> Option<Atom> {
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0,
atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn has_class(&self, name: &Atom) -> bool {
snapshot_helpers::has_class(self.0,
name,
Gecko_ClassOrClassList)
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom)
{
snapshot_helpers::each_class(self.0,
callback,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
unsafe {
Gecko_IsHTMLElementInHTMLDocument(self.0)
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait AttrSelectorHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn ns_or_null(&self) -> *mut nsIAtom;
/// Returns the proper selector name depending on whether the requesting
/// element is an HTML element in an HTML document or not.
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom;
}
impl AttrSelectorHelpers for AttrSelector<SelectorImpl> {
fn ns_or_null(&self) -> *mut nsIAtom {
match self.namespace {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.url.0.as_ptr(),
}
}
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom {
if is_html_element_in_html_document {
self.lower_name.as_ptr()
} else {
self.name.as_ptr()
}
}
}
impl<'le> ::selectors::MatchAttr for GeckoElement<'le> {
type Impl = SelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<Self::Impl>) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()))
}
}
fn match_attr_equals(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_equals_ignore_ascii_case(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_includes(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrIncludes(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_dash(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrDashEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_prefix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasPrefix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_substring(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasSubstring(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_suffix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasSuffix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
}
impl<'le> ElementExt for GeckoElement<'le> {
#[inline]
fn is_link(&self) -> bool {
self.match_non_ts_pseudo_class(NonTSPseudoClass::AnyLink)
}
#[inline]
fn matches_user_and_author_rules(&self) -> bool {
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) == 0
}
}
| unsafe { bindings::Gecko_GetParentElement(self.0).map(GeckoElement) }
}
| identifier_body |
wrapper.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Wrapper definitions on top of Gecko types in order to be used in the style
//! system.
//!
//! This really follows the Servo pattern in
//! `components/script/layout_wrapper.rs`.
//!
//! This theoretically should live in its own crate, but now it lives in the
//! style system it's kind of pointless in the Stylo case, and only Servo forces
//! the separation between the style system implementation and everything else.
use atomic_refcell::AtomicRefCell;
use data::ElementData;
use dom::{LayoutIterator, NodeInfo, TElement, TNode, UnsafeNode};
use dom::{OpaqueNode, PresentationalHintsSynthetizer};
use element_state::ElementState;
use error_reporting::StdoutErrorReporter;
use gecko::selector_parser::{SelectorImpl, NonTSPseudoClass, PseudoElement};
use gecko::snapshot_helpers;
use gecko_bindings::bindings;
use gecko_bindings::bindings::{Gecko_DropStyleChildrenIterator, Gecko_MaybeCreateStyleChildrenIterator};
use gecko_bindings::bindings::{Gecko_ElementState, Gecko_GetLastChild, Gecko_GetNextStyleChild};
use gecko_bindings::bindings::{Gecko_GetServoDeclarationBlock, Gecko_IsHTMLElementInHTMLDocument};
use gecko_bindings::bindings::{Gecko_IsLink, Gecko_IsRootElement, Gecko_MatchesElement};
use gecko_bindings::bindings::{Gecko_IsUnvisitedLink, Gecko_IsVisitedLink, Gecko_Namespace};
use gecko_bindings::bindings::{Gecko_SetNodeFlags, Gecko_UnsetNodeFlags};
use gecko_bindings::bindings::Gecko_ClassOrClassList;
use gecko_bindings::bindings::Gecko_GetStyleContext;
use gecko_bindings::structs;
use gecko_bindings::structs::{RawGeckoElement, RawGeckoNode};
use gecko_bindings::structs::{nsIAtom, nsIContent, nsStyleContext};
use gecko_bindings::structs::NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE;
use parking_lot::RwLock;
use parser::ParserContextExtraData;
use properties::{ComputedValues, parse_style_attribute};
use properties::PropertyDeclarationBlock;
use selector_parser::{ElementExt, Snapshot};
use selectors::Element;
use selectors::parser::{AttrSelector, NamespaceConstraint};
use servo_url::ServoUrl;
use sink::Push;
use std::fmt;
use std::ptr;
use std::sync::Arc;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
use stylist::ApplicableDeclarationBlock;
/// A simple wrapper over a non-null Gecko node (`nsINode`) pointer.
///
/// Important: We don't currently refcount the DOM, because the wrapper lifetime
/// magic guarantees that our LayoutFoo references won't outlive the root, and
/// we don't mutate any of the references on the Gecko side during restyle.
///
/// We could implement refcounting if need be (at a potentially non-trivial
/// performance cost) by implementing Drop and making LayoutFoo non-Copy.
#[derive(Clone, Copy)]
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
fn is_element(&self) -> bool {
use gecko_bindings::structs::nsINode_BooleanFlag;
self.0.mBoolFlags & (1u32 << nsINode_BooleanFlag::NodeIsElement as u32) != 0
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn to_unsafe(&self) -> UnsafeNode {
(self.0 as *const _ as usize, 0)
}
unsafe fn from_unsafe(n: &UnsafeNode) -> Self {
GeckoNode(&*(n.0 as *mut RawGeckoNode))
}
fn children(self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
let maybe_iter = unsafe { Gecko_MaybeCreateStyleChildrenIterator(self.0) };
if let Some(iter) = maybe_iter.into_owned_opt() {
LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter))
} else {
LayoutIterator(GeckoChildrenIterator::Current(self.first_child()))
}
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn parent_node(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentNode(self.0).map(GeckoNode) }
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
fn needs_dirty_on_viewport_size_changed(&self) -> bool {
// Gecko's node doesn't have the DIRTY_ON_VIEWPORT_SIZE_CHANGE flag,
// so we force them to be dirtied on viewport size change, regardless if
// they use viewport percentage size or not.
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
true
}
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
unsafe fn set_dirty_on_viewport_size_changed(&self) {}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(bindings::StyleChildrenIteratorOwned),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref it) = *self {
unsafe {
Gecko_DropStyleChildrenIterator(ptr::read(it as *const _));
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
Gecko_GetNextStyleChild(it).map(GeckoNode)
}
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "<{}", self.get_local_name()));
if let Some(id) = self.get_id() {
try!(write!(f, " id={}", id));
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute(value: &str) -> PropertyDeclarationBlock {
// FIXME(bholley): Real base URL and error reporter.
let base_url = &*DUMMY_BASE_URL;
// FIXME(heycam): Needs real ParserContextExtraData so that URLs parse
// properly.
let extra_data = ParserContextExtraData::default();
parse_style_attribute(value, &base_url, Box::new(StdoutErrorReporter), extra_data)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Clear the element data for a given element.
pub fn clear_data(&self) {
let ptr = self.0.mServoData.get();
if !ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
/// Ensures the element has data, returning the existing data or allocating
/// it.
///
/// Only safe to call with exclusive access to the element, given otherwise
/// it could race to allocate and leak.
pub unsafe fn ensure_data(&self) -> &AtomicRefCell<ElementData> {
match self.get_data() {
Some(x) => x,
None => {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::new(None))));
self.0.mServoData.set(ptr);
unsafe { &* ptr }
},
}
}
/// Creates a blank snapshot for this element.
pub fn create_snapshot(&self) -> Snapshot {
Snapshot::new(*self)
}
}
lazy_static! {
/// A dummy base url in order to get it where we don't have any available.
///
/// We need to get rid of this sooner than later.
pub static ref DUMMY_BASE_URL: ServoUrl = {
ServoUrl::parse("http://www.example.org").unwrap()
};
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetServoDeclarationBlock(self.0) };
declarations.map(|s| s.as_arc_opt()).unwrap_or(None)
}
fn get_state(&self) -> ElementState {
unsafe {
ElementState::from_bits_truncate(Gecko_ElementState(self.0))
}
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
namespace.0.as_ptr(),
attr.as_ptr())
}
}
#[inline]
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, val: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
namespace.0.as_ptr(),
attr.as_ptr(),
val.as_ptr(),
/* ignoreCase = */ false)
}
}
fn existing_style_for_restyle_damage<'a>(&'a self,
current_cv: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a nsStyleContext> {
if current_cv.is_none() {
// Don't bother in doing an ffi call to get null back.
return None;
}
unsafe {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr())
.unwrap_or(ptr::null_mut());
let context_ptr = Gecko_GetStyleContext(self.as_node().0, atom_ptr);
context_ptr.as_ref()
}
}
fn has_dirty_descendants(&self) -> bool {
self.flags() & (NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_dirty_descendants(&self) {
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
fn skip_root_and_item_based_display_fixup(&self) -> bool {
// We don't want to fix up display values of native anonymous content.
// Additionally, we want to skip root-based display fixup for document
// level native anonymous content subtree roots, since they're not
// really roots from the style fixup perspective. Checking that we
// are NAC handles both cases.
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) != 0
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> PresentationalHintsSynthetizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, _hints: &mut V)
where V: Push<ApplicableDeclarationBlock>,
{
// FIXME(bholley) - Need to implement this.
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
fn parent_element(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentElement(self.0).map(GeckoElement) }
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
// XXX(emilio): Implement this properly.
false
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName.raw())
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
match pseudo_class {
// https://github.com/servo/servo/issues/8718
NonTSPseudoClass::AnyLink => unsafe { Gecko_IsLink(self.0) },
NonTSPseudoClass::Link => unsafe { Gecko_IsUnvisitedLink(self.0) },
NonTSPseudoClass::Visited => unsafe { Gecko_IsVisitedLink(self.0) },
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus |
NonTSPseudoClass::Hover |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::ReadWrite |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::Indeterminate => {
| NonTSPseudoClass::ReadOnly => {
!self.get_state().contains(pseudo_class.state_flag())
}
NonTSPseudoClass::MozBrowserFrame => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
}
}
}
fn get_id(&self) -> Option<Atom> {
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0,
atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn has_class(&self, name: &Atom) -> bool {
snapshot_helpers::has_class(self.0,
name,
Gecko_ClassOrClassList)
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom)
{
snapshot_helpers::each_class(self.0,
callback,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
unsafe {
Gecko_IsHTMLElementInHTMLDocument(self.0)
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait AttrSelectorHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn ns_or_null(&self) -> *mut nsIAtom;
/// Returns the proper selector name depending on whether the requesting
/// element is an HTML element in an HTML document or not.
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom;
}
impl AttrSelectorHelpers for AttrSelector<SelectorImpl> {
fn ns_or_null(&self) -> *mut nsIAtom {
match self.namespace {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.url.0.as_ptr(),
}
}
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom {
if is_html_element_in_html_document {
self.lower_name.as_ptr()
} else {
self.name.as_ptr()
}
}
}
impl<'le> ::selectors::MatchAttr for GeckoElement<'le> {
type Impl = SelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<Self::Impl>) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()))
}
}
fn match_attr_equals(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_equals_ignore_ascii_case(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_includes(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrIncludes(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_dash(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrDashEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_prefix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasPrefix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_substring(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasSubstring(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_suffix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasSuffix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
}
impl<'le> ElementExt for GeckoElement<'le> {
#[inline]
fn is_link(&self) -> bool {
self.match_non_ts_pseudo_class(NonTSPseudoClass::AnyLink)
}
#[inline]
fn matches_user_and_author_rules(&self) -> bool {
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) == 0
}
}
| self.get_state().contains(pseudo_class.state_flag())
},
| conditional_block |
wrapper.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Wrapper definitions on top of Gecko types in order to be used in the style
//! system.
//!
//! This really follows the Servo pattern in
//! `components/script/layout_wrapper.rs`.
//!
//! This theoretically should live in its own crate, but now it lives in the
//! style system it's kind of pointless in the Stylo case, and only Servo forces
//! the separation between the style system implementation and everything else.
use atomic_refcell::AtomicRefCell;
use data::ElementData;
use dom::{LayoutIterator, NodeInfo, TElement, TNode, UnsafeNode};
use dom::{OpaqueNode, PresentationalHintsSynthetizer};
use element_state::ElementState;
use error_reporting::StdoutErrorReporter;
use gecko::selector_parser::{SelectorImpl, NonTSPseudoClass, PseudoElement};
use gecko::snapshot_helpers;
use gecko_bindings::bindings;
use gecko_bindings::bindings::{Gecko_DropStyleChildrenIterator, Gecko_MaybeCreateStyleChildrenIterator};
use gecko_bindings::bindings::{Gecko_ElementState, Gecko_GetLastChild, Gecko_GetNextStyleChild};
use gecko_bindings::bindings::{Gecko_GetServoDeclarationBlock, Gecko_IsHTMLElementInHTMLDocument};
use gecko_bindings::bindings::{Gecko_IsLink, Gecko_IsRootElement, Gecko_MatchesElement};
use gecko_bindings::bindings::{Gecko_IsUnvisitedLink, Gecko_IsVisitedLink, Gecko_Namespace};
use gecko_bindings::bindings::{Gecko_SetNodeFlags, Gecko_UnsetNodeFlags};
use gecko_bindings::bindings::Gecko_ClassOrClassList;
use gecko_bindings::bindings::Gecko_GetStyleContext;
use gecko_bindings::structs;
use gecko_bindings::structs::{RawGeckoElement, RawGeckoNode};
use gecko_bindings::structs::{nsIAtom, nsIContent, nsStyleContext};
use gecko_bindings::structs::NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE;
use parking_lot::RwLock;
use parser::ParserContextExtraData;
use properties::{ComputedValues, parse_style_attribute};
use properties::PropertyDeclarationBlock;
use selector_parser::{ElementExt, Snapshot};
use selectors::Element;
use selectors::parser::{AttrSelector, NamespaceConstraint};
use servo_url::ServoUrl;
use sink::Push;
use std::fmt;
use std::ptr;
use std::sync::Arc;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
use stylist::ApplicableDeclarationBlock;
/// A simple wrapper over a non-null Gecko node (`nsINode`) pointer.
///
/// Important: We don't currently refcount the DOM, because the wrapper lifetime
/// magic guarantees that our LayoutFoo references won't outlive the root, and
/// we don't mutate any of the references on the Gecko side during restyle.
///
/// We could implement refcounting if need be (at a potentially non-trivial
/// performance cost) by implementing Drop and making LayoutFoo non-Copy.
#[derive(Clone, Copy)]
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
fn is_element(&self) -> bool {
use gecko_bindings::structs::nsINode_BooleanFlag;
self.0.mBoolFlags & (1u32 << nsINode_BooleanFlag::NodeIsElement as u32) != 0
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn to_unsafe(&self) -> UnsafeNode {
(self.0 as *const _ as usize, 0)
}
unsafe fn from_unsafe(n: &UnsafeNode) -> Self {
GeckoNode(&*(n.0 as *mut RawGeckoNode))
}
fn children(self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
let maybe_iter = unsafe { Gecko_MaybeCreateStyleChildrenIterator(self.0) };
if let Some(iter) = maybe_iter.into_owned_opt() {
LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter))
} else {
LayoutIterator(GeckoChildrenIterator::Current(self.first_child()))
}
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn parent_node(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentNode(self.0).map(GeckoNode) }
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
fn needs_dirty_on_viewport_size_changed(&self) -> bool {
// Gecko's node doesn't have the DIRTY_ON_VIEWPORT_SIZE_CHANGE flag,
// so we force them to be dirtied on viewport size change, regardless if
// they use viewport percentage size or not.
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
true
}
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
unsafe fn set_dirty_on_viewport_size_changed(&self) {}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(bindings::StyleChildrenIteratorOwned),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref it) = *self {
unsafe {
Gecko_DropStyleChildrenIterator(ptr::read(it as *const _));
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
Gecko_GetNextStyleChild(it).map(GeckoNode)
}
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "<{}", self.get_local_name()));
if let Some(id) = self.get_id() {
try!(write!(f, " id={}", id));
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute(value: &str) -> PropertyDeclarationBlock {
// FIXME(bholley): Real base URL and error reporter.
let base_url = &*DUMMY_BASE_URL;
// FIXME(heycam): Needs real ParserContextExtraData so that URLs parse
// properly.
let extra_data = ParserContextExtraData::default();
parse_style_attribute(value, &base_url, Box::new(StdoutErrorReporter), extra_data)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Clear the element data for a given element.
pub fn clear_data(&self) {
let ptr = self.0.mServoData.get();
if !ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
/// Ensures the element has data, returning the existing data or allocating
/// it.
///
/// Only safe to call with exclusive access to the element, given otherwise
/// it could race to allocate and leak.
pub unsafe fn ensure_data(&self) -> &AtomicRefCell<ElementData> {
match self.get_data() {
Some(x) => x,
None => {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::new(None))));
self.0.mServoData.set(ptr);
unsafe { &* ptr }
},
}
}
/// Creates a blank snapshot for this element.
pub fn create_snapshot(&self) -> Snapshot {
Snapshot::new(*self)
}
}
lazy_static! {
/// A dummy base url in order to get it where we don't have any available.
///
/// We need to get rid of this sooner than later.
pub static ref DUMMY_BASE_URL: ServoUrl = {
ServoUrl::parse("http://www.example.org").unwrap()
};
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetServoDeclarationBlock(self.0) };
declarations.map(|s| s.as_arc_opt()).unwrap_or(None)
}
fn get_state(&self) -> ElementState {
unsafe {
ElementState::from_bits_truncate(Gecko_ElementState(self.0))
}
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
namespace.0.as_ptr(),
attr.as_ptr())
}
}
#[inline]
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, val: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
namespace.0.as_ptr(),
attr.as_ptr(),
val.as_ptr(),
/* ignoreCase = */ false)
}
}
fn existing_style_for_restyle_damage<'a>(&'a self,
current_cv: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a nsStyleContext> {
if current_cv.is_none() {
// Don't bother in doing an ffi call to get null back.
return None;
}
unsafe {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr())
.unwrap_or(ptr::null_mut());
let context_ptr = Gecko_GetStyleContext(self.as_node().0, atom_ptr);
context_ptr.as_ref()
}
}
fn has_dirty_descendants(&self) -> bool {
self.flags() & (NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_dirty_descendants(&self) {
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
fn skip_root_and_item_based_display_fixup(&self) -> bool {
// We don't want to fix up display values of native anonymous content.
// Additionally, we want to skip root-based display fixup for document
// level native anonymous content subtree roots, since they're not
// really roots from the style fixup perspective. Checking that we
// are NAC handles both cases.
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) != 0
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> PresentationalHintsSynthetizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, _hints: &mut V)
where V: Push<ApplicableDeclarationBlock>,
{
// FIXME(bholley) - Need to implement this.
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
fn parent_element(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentElement(self.0).map(GeckoElement) }
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
// XXX(emilio): Implement this properly.
false
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName.raw())
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
match pseudo_class {
// https://github.com/servo/servo/issues/8718
NonTSPseudoClass::AnyLink => unsafe { Gecko_IsLink(self.0) },
NonTSPseudoClass::Link => unsafe { Gecko_IsUnvisitedLink(self.0) },
NonTSPseudoClass::Visited => unsafe { Gecko_IsVisitedLink(self.0) },
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus |
NonTSPseudoClass::Hover |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::ReadWrite |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::Indeterminate => {
self.get_state().contains(pseudo_class.state_flag())
},
NonTSPseudoClass::ReadOnly => {
!self.get_state().contains(pseudo_class.state_flag())
}
NonTSPseudoClass::MozBrowserFrame => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
}
}
}
fn get_id(&self) -> Option<Atom> {
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0,
atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn has_class(&self, name: &Atom) -> bool {
snapshot_helpers::has_class(self.0,
name,
Gecko_ClassOrClassList)
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom)
{
snapshot_helpers::each_class(self.0,
callback,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
unsafe {
Gecko_IsHTMLElementInHTMLDocument(self.0)
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait AttrSelectorHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn ns_or_null(&self) -> *mut nsIAtom;
/// Returns the proper selector name depending on whether the requesting
/// element is an HTML element in an HTML document or not.
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom;
}
impl AttrSelectorHelpers for AttrSelector<SelectorImpl> {
fn ns_or_null(&self) -> *mut nsIAtom {
match self.namespace {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.url.0.as_ptr(),
}
}
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom {
if is_html_element_in_html_document {
self.lower_name.as_ptr()
} else {
self.name.as_ptr()
}
}
}
impl<'le> ::selectors::MatchAttr for GeckoElement<'le> {
type Impl = SelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<Self::Impl>) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()))
}
}
fn match_attr_equals(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_equals_ignore_ascii_case(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_includes(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrIncludes(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_dash(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrDashEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_prefix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasPrefix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_substring(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasSubstring(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_suffix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool { | }
}
}
impl<'le> ElementExt for GeckoElement<'le> {
#[inline]
fn is_link(&self) -> bool {
self.match_non_ts_pseudo_class(NonTSPseudoClass::AnyLink)
}
#[inline]
fn matches_user_and_author_rules(&self) -> bool {
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) == 0
}
} | unsafe {
bindings::Gecko_AttrHasSuffix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr()) | random_line_split |
wrapper.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Wrapper definitions on top of Gecko types in order to be used in the style
//! system.
//!
//! This really follows the Servo pattern in
//! `components/script/layout_wrapper.rs`.
//!
//! This theoretically should live in its own crate, but now it lives in the
//! style system it's kind of pointless in the Stylo case, and only Servo forces
//! the separation between the style system implementation and everything else.
use atomic_refcell::AtomicRefCell;
use data::ElementData;
use dom::{LayoutIterator, NodeInfo, TElement, TNode, UnsafeNode};
use dom::{OpaqueNode, PresentationalHintsSynthetizer};
use element_state::ElementState;
use error_reporting::StdoutErrorReporter;
use gecko::selector_parser::{SelectorImpl, NonTSPseudoClass, PseudoElement};
use gecko::snapshot_helpers;
use gecko_bindings::bindings;
use gecko_bindings::bindings::{Gecko_DropStyleChildrenIterator, Gecko_MaybeCreateStyleChildrenIterator};
use gecko_bindings::bindings::{Gecko_ElementState, Gecko_GetLastChild, Gecko_GetNextStyleChild};
use gecko_bindings::bindings::{Gecko_GetServoDeclarationBlock, Gecko_IsHTMLElementInHTMLDocument};
use gecko_bindings::bindings::{Gecko_IsLink, Gecko_IsRootElement, Gecko_MatchesElement};
use gecko_bindings::bindings::{Gecko_IsUnvisitedLink, Gecko_IsVisitedLink, Gecko_Namespace};
use gecko_bindings::bindings::{Gecko_SetNodeFlags, Gecko_UnsetNodeFlags};
use gecko_bindings::bindings::Gecko_ClassOrClassList;
use gecko_bindings::bindings::Gecko_GetStyleContext;
use gecko_bindings::structs;
use gecko_bindings::structs::{RawGeckoElement, RawGeckoNode};
use gecko_bindings::structs::{nsIAtom, nsIContent, nsStyleContext};
use gecko_bindings::structs::NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE;
use parking_lot::RwLock;
use parser::ParserContextExtraData;
use properties::{ComputedValues, parse_style_attribute};
use properties::PropertyDeclarationBlock;
use selector_parser::{ElementExt, Snapshot};
use selectors::Element;
use selectors::parser::{AttrSelector, NamespaceConstraint};
use servo_url::ServoUrl;
use sink::Push;
use std::fmt;
use std::ptr;
use std::sync::Arc;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
use stylist::ApplicableDeclarationBlock;
/// A simple wrapper over a non-null Gecko node (`nsINode`) pointer.
///
/// Important: We don't currently refcount the DOM, because the wrapper lifetime
/// magic guarantees that our LayoutFoo references won't outlive the root, and
/// we don't mutate any of the references on the Gecko side during restyle.
///
/// We could implement refcounting if need be (at a potentially non-trivial
/// performance cost) by implementing Drop and making LayoutFoo non-Copy.
#[derive(Clone, Copy)]
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
fn is_element(&self) -> bool {
use gecko_bindings::structs::nsINode_BooleanFlag;
self.0.mBoolFlags & (1u32 << nsINode_BooleanFlag::NodeIsElement as u32) != 0
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn to_unsafe(&self) -> UnsafeNode {
(self.0 as *const _ as usize, 0)
}
unsafe fn from_unsafe(n: &UnsafeNode) -> Self {
GeckoNode(&*(n.0 as *mut RawGeckoNode))
}
fn children(self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
let maybe_iter = unsafe { Gecko_MaybeCreateStyleChildrenIterator(self.0) };
if let Some(iter) = maybe_iter.into_owned_opt() {
LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter))
} else {
LayoutIterator(GeckoChildrenIterator::Current(self.first_child()))
}
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn parent_node(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentNode(self.0).map(GeckoNode) }
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
fn needs_dirty_on_viewport_size_changed(&self) -> bool {
// Gecko's node doesn't have the DIRTY_ON_VIEWPORT_SIZE_CHANGE flag,
// so we force them to be dirtied on viewport size change, regardless if
// they use viewport percentage size or not.
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
true
}
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
unsafe fn set_dirty_on_viewport_size_changed(&self) {}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(bindings::StyleChildrenIteratorOwned),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref it) = *self {
unsafe {
Gecko_DropStyleChildrenIterator(ptr::read(it as *const _));
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
Gecko_GetNextStyleChild(it).map(GeckoNode)
}
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "<{}", self.get_local_name()));
if let Some(id) = self.get_id() {
try!(write!(f, " id={}", id));
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute(value: &str) -> PropertyDeclarationBlock {
// FIXME(bholley): Real base URL and error reporter.
let base_url = &*DUMMY_BASE_URL;
// FIXME(heycam): Needs real ParserContextExtraData so that URLs parse
// properly.
let extra_data = ParserContextExtraData::default();
parse_style_attribute(value, &base_url, Box::new(StdoutErrorReporter), extra_data)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Clear the element data for a given element.
pub fn clear_data(&self) {
let ptr = self.0.mServoData.get();
if !ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
/// Ensures the element has data, returning the existing data or allocating
/// it.
///
/// Only safe to call with exclusive access to the element, given otherwise
/// it could race to allocate and leak.
pub unsafe fn ensure_data(&self) -> &AtomicRefCell<ElementData> {
match self.get_data() {
Some(x) => x,
None => {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::new(None))));
self.0.mServoData.set(ptr);
unsafe { &* ptr }
},
}
}
/// Creates a blank snapshot for this element.
pub fn create_snapshot(&self) -> Snapshot {
Snapshot::new(*self)
}
}
lazy_static! {
/// A dummy base url in order to get it where we don't have any available.
///
/// We need to get rid of this sooner than later.
pub static ref DUMMY_BASE_URL: ServoUrl = {
ServoUrl::parse("http://www.example.org").unwrap()
};
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetServoDeclarationBlock(self.0) };
declarations.map(|s| s.as_arc_opt()).unwrap_or(None)
}
fn get_state(&self) -> ElementState {
unsafe {
ElementState::from_bits_truncate(Gecko_ElementState(self.0))
}
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
namespace.0.as_ptr(),
attr.as_ptr())
}
}
#[inline]
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, val: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
namespace.0.as_ptr(),
attr.as_ptr(),
val.as_ptr(),
/* ignoreCase = */ false)
}
}
fn existing_style_for_restyle_damage<'a>(&'a self,
current_cv: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a nsStyleContext> {
if current_cv.is_none() {
// Don't bother in doing an ffi call to get null back.
return None;
}
unsafe {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr())
.unwrap_or(ptr::null_mut());
let context_ptr = Gecko_GetStyleContext(self.as_node().0, atom_ptr);
context_ptr.as_ref()
}
}
fn has_dirty_descendants(&self) -> bool {
self.flags() & (NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_dirty_descendants(&self) {
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unse | lf) {
self.unset_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
fn skip_root_and_item_based_display_fixup(&self) -> bool {
// We don't want to fix up display values of native anonymous content.
// Additionally, we want to skip root-based display fixup for document
// level native anonymous content subtree roots, since they're not
// really roots from the style fixup perspective. Checking that we
// are NAC handles both cases.
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) != 0
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> PresentationalHintsSynthetizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, _hints: &mut V)
where V: Push<ApplicableDeclarationBlock>,
{
// FIXME(bholley) - Need to implement this.
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
fn parent_element(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentElement(self.0).map(GeckoElement) }
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
// XXX(emilio): Implement this properly.
false
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName.raw())
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
match pseudo_class {
// https://github.com/servo/servo/issues/8718
NonTSPseudoClass::AnyLink => unsafe { Gecko_IsLink(self.0) },
NonTSPseudoClass::Link => unsafe { Gecko_IsUnvisitedLink(self.0) },
NonTSPseudoClass::Visited => unsafe { Gecko_IsVisitedLink(self.0) },
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus |
NonTSPseudoClass::Hover |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::ReadWrite |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::Indeterminate => {
self.get_state().contains(pseudo_class.state_flag())
},
NonTSPseudoClass::ReadOnly => {
!self.get_state().contains(pseudo_class.state_flag())
}
NonTSPseudoClass::MozBrowserFrame => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
}
}
}
fn get_id(&self) -> Option<Atom> {
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0,
atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn has_class(&self, name: &Atom) -> bool {
snapshot_helpers::has_class(self.0,
name,
Gecko_ClassOrClassList)
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom)
{
snapshot_helpers::each_class(self.0,
callback,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
unsafe {
Gecko_IsHTMLElementInHTMLDocument(self.0)
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait AttrSelectorHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn ns_or_null(&self) -> *mut nsIAtom;
/// Returns the proper selector name depending on whether the requesting
/// element is an HTML element in an HTML document or not.
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom;
}
impl AttrSelectorHelpers for AttrSelector<SelectorImpl> {
fn ns_or_null(&self) -> *mut nsIAtom {
match self.namespace {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.url.0.as_ptr(),
}
}
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom {
if is_html_element_in_html_document {
self.lower_name.as_ptr()
} else {
self.name.as_ptr()
}
}
}
impl<'le> ::selectors::MatchAttr for GeckoElement<'le> {
type Impl = SelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<Self::Impl>) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()))
}
}
fn match_attr_equals(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_equals_ignore_ascii_case(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_includes(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrIncludes(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_dash(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrDashEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_prefix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasPrefix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_substring(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasSubstring(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn match_attr_suffix(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrHasSuffix(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
}
impl<'le> ElementExt for GeckoElement<'le> {
#[inline]
fn is_link(&self) -> bool {
self.match_non_ts_pseudo_class(NonTSPseudoClass::AnyLink)
}
#[inline]
fn matches_user_and_author_rules(&self) -> bool {
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) == 0
}
}
| t_dirty_descendants(&se | identifier_name |
csvl10n.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""classes that hold units of comma-separated values (.csv) files (csvunit)
or entire files (csvfile) for use with localisation
"""
import csv
from translate.misc import sparse
from translate.storage import base
class SimpleDictReader:
def __init__(self, fileobj, fieldnames):
self.fieldnames = fieldnames
self.contents = fileobj.read()
self.parser = sparse.SimpleParser(defaulttokenlist=[",", "\n"], whitespacechars="\r")
self.parser.stringescaping = 0
self.parser.quotechars = '"'
self.tokens = self.parser.tokenize(self.contents)
self.tokenpos = 0
def __iter__(self):
return self
def getvalue(self, value):
"""returns a value, evaluating strings as neccessary"""
if (value.startswith("'") and value.endswith("'")) or (value.startswith('"') and value.endswith('"')):
return sparse.stringeval(value)
else:
return value
def next(self):
lentokens = len(self.tokens)
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
if self.tokenpos >= lentokens:
raise StopIteration()
thistokens = []
while self.tokenpos < lentokens and self.tokens[self.tokenpos] != "\n":
thistokens.append(self.tokens[self.tokenpos])
self.tokenpos += 1
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
fields = []
# patch together fields since we can have quotes inside a field
currentfield = ''
fieldparts = 0
for token in thistokens:
if token == ',':
# a field is only quoted if the whole thing is quoted
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
currentfield = ''
fieldparts = 0
else:
currentfield += token
fieldparts += 1
# things after the last comma...
if fieldparts:
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
values = {}
for fieldnum in range(len(self.fieldnames)):
if fieldnum >= len(fields):
values[self.fieldnames[fieldnum]] = ""
else:
values[self.fieldnames[fieldnum]] = fields[fieldnum]
return values
class csvunit(base.TranslationUnit):
spreadsheetescapes = [("+", "\\+"), ("-", "\\-"), ("=", "\\="), ("'", "\\'")]
def __init__(self, source=None):
super(csvunit, self).__init__(source)
self.comment = ""
self.source = source
self.target = ""
def add_spreadsheet_escapes(self, source, target):
"""add common spreadsheet escapes to two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(unescaped):
source = source.replace(unescaped, escaped, 1)
if target.startswith(unescaped):
target = target.replace(unescaped, escaped, 1)
return source, target
def remove_spreadsheet_escapes(self, source, target):
"""remove common spreadsheet escapes from two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(escaped):
source = source.replace(escaped, unescaped, 1)
if target.startswith(escaped):
target = target.replace(escaped, unescaped, 1)
return source, target
def fromdict(self, cedict):
self.comment = cedict.get('location', '').decode('utf-8')
self.source = cedict.get('source', '').decode('utf-8')
self.target = cedict.get('target', '').decode('utf-8')
if self.comment is None:
self.comment = ''
if self.source is None:
self.source = ''
if self.target is None:
self.target = ''
self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)
def todict(self, encoding='utf-8'):
comment, source, target = self.comment, self.source, self.target
source, target = self.add_spreadsheet_escapes(source, target)
if isinstance(comment, unicode):
comment = comment.encode(encoding)
if isinstance(source, unicode):
source = source.encode(encoding)
if isinstance(target, unicode):
target = target.encode(encoding)
return {'location': comment, 'source': source, 'target': target}
class csvfile(base.TranslationStore):
"""This class represents a .csv file with various lines.
The default format contains three columns: location, source, target"""
UnitClass = csvunit
Name = _("Comma Separated Value")
Mimetypes = ['text/comma-separated-values', 'text/csv']
Extensions = ["csv"]
def __init__(self, inputfile=None, fieldnames=None):
|
def parse(self, csvsrc):
csvfile = csv.StringIO(csvsrc)
reader = SimpleDictReader(csvfile, self.fieldnames)
for row in reader:
newce = self.UnitClass()
newce.fromdict(row)
self.addunit(newce)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
csvfile = csv.StringIO()
writer = csv.DictWriter(csvfile, self.fieldnames)
for ce in self.units:
cedict = ce.todict()
writer.writerow(cedict)
csvfile.reset()
return "".join(csvfile.readlines())
| base.TranslationStore.__init__(self, unitclass=self.UnitClass)
self.units = []
if fieldnames is None:
self.fieldnames = ['location', 'source', 'target']
else:
if isinstance(fieldnames, basestring):
fieldnames = [fieldname.strip() for fieldname in fieldnames.split(",")]
self.fieldnames = fieldnames
self.filename = getattr(inputfile, 'name', '')
if inputfile is not None:
csvsrc = inputfile.read()
inputfile.close()
self.parse(csvsrc) | identifier_body |
csvl10n.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""classes that hold units of comma-separated values (.csv) files (csvunit)
or entire files (csvfile) for use with localisation
"""
import csv
from translate.misc import sparse
from translate.storage import base
class SimpleDictReader:
def __init__(self, fileobj, fieldnames):
self.fieldnames = fieldnames
self.contents = fileobj.read()
self.parser = sparse.SimpleParser(defaulttokenlist=[",", "\n"], whitespacechars="\r")
self.parser.stringescaping = 0
self.parser.quotechars = '"'
self.tokens = self.parser.tokenize(self.contents)
self.tokenpos = 0
def __iter__(self):
return self
def getvalue(self, value):
"""returns a value, evaluating strings as neccessary"""
if (value.startswith("'") and value.endswith("'")) or (value.startswith('"') and value.endswith('"')):
return sparse.stringeval(value)
else:
return value
def next(self):
lentokens = len(self.tokens)
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
if self.tokenpos >= lentokens:
raise StopIteration()
thistokens = []
while self.tokenpos < lentokens and self.tokens[self.tokenpos] != "\n":
thistokens.append(self.tokens[self.tokenpos])
self.tokenpos += 1
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
fields = []
# patch together fields since we can have quotes inside a field
currentfield = ''
fieldparts = 0
for token in thistokens:
if token == ',':
# a field is only quoted if the whole thing is quoted
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
currentfield = ''
fieldparts = 0
else:
currentfield += token
fieldparts += 1
# things after the last comma...
if fieldparts:
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
values = {}
for fieldnum in range(len(self.fieldnames)):
if fieldnum >= len(fields):
values[self.fieldnames[fieldnum]] = ""
else:
values[self.fieldnames[fieldnum]] = fields[fieldnum]
return values
class csvunit(base.TranslationUnit):
spreadsheetescapes = [("+", "\\+"), ("-", "\\-"), ("=", "\\="), ("'", "\\'")]
def __init__(self, source=None):
super(csvunit, self).__init__(source)
self.comment = ""
self.source = source
self.target = ""
def add_spreadsheet_escapes(self, source, target):
"""add common spreadsheet escapes to two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(unescaped):
source = source.replace(unescaped, escaped, 1)
if target.startswith(unescaped):
target = target.replace(unescaped, escaped, 1)
return source, target
def remove_spreadsheet_escapes(self, source, target):
"""remove common spreadsheet escapes from two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(escaped):
source = source.replace(escaped, unescaped, 1)
if target.startswith(escaped):
target = target.replace(escaped, unescaped, 1)
return source, target
def fromdict(self, cedict):
self.comment = cedict.get('location', '').decode('utf-8')
self.source = cedict.get('source', '').decode('utf-8')
self.target = cedict.get('target', '').decode('utf-8')
if self.comment is None:
self.comment = ''
if self.source is None:
self.source = ''
if self.target is None:
self.target = ''
self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)
def todict(self, encoding='utf-8'):
comment, source, target = self.comment, self.source, self.target
source, target = self.add_spreadsheet_escapes(source, target)
if isinstance(comment, unicode):
comment = comment.encode(encoding)
if isinstance(source, unicode):
source = source.encode(encoding)
if isinstance(target, unicode):
target = target.encode(encoding)
return {'location': comment, 'source': source, 'target': target}
class csvfile(base.TranslationStore):
"""This class represents a .csv file with various lines.
The default format contains three columns: location, source, target"""
UnitClass = csvunit
Name = _("Comma Separated Value")
Mimetypes = ['text/comma-separated-values', 'text/csv']
Extensions = ["csv"]
def __init__(self, inputfile=None, fieldnames=None):
base.TranslationStore.__init__(self, unitclass=self.UnitClass)
self.units = []
if fieldnames is None:
self.fieldnames = ['location', 'source', 'target']
else:
|
self.filename = getattr(inputfile, 'name', '')
if inputfile is not None:
csvsrc = inputfile.read()
inputfile.close()
self.parse(csvsrc)
def parse(self, csvsrc):
csvfile = csv.StringIO(csvsrc)
reader = SimpleDictReader(csvfile, self.fieldnames)
for row in reader:
newce = self.UnitClass()
newce.fromdict(row)
self.addunit(newce)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
csvfile = csv.StringIO()
writer = csv.DictWriter(csvfile, self.fieldnames)
for ce in self.units:
cedict = ce.todict()
writer.writerow(cedict)
csvfile.reset()
return "".join(csvfile.readlines())
| if isinstance(fieldnames, basestring):
fieldnames = [fieldname.strip() for fieldname in fieldnames.split(",")]
self.fieldnames = fieldnames | conditional_block |
csvl10n.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""classes that hold units of comma-separated values (.csv) files (csvunit)
or entire files (csvfile) for use with localisation
"""
import csv
from translate.misc import sparse
from translate.storage import base
class SimpleDictReader:
def | (self, fileobj, fieldnames):
self.fieldnames = fieldnames
self.contents = fileobj.read()
self.parser = sparse.SimpleParser(defaulttokenlist=[",", "\n"], whitespacechars="\r")
self.parser.stringescaping = 0
self.parser.quotechars = '"'
self.tokens = self.parser.tokenize(self.contents)
self.tokenpos = 0
def __iter__(self):
return self
def getvalue(self, value):
"""returns a value, evaluating strings as neccessary"""
if (value.startswith("'") and value.endswith("'")) or (value.startswith('"') and value.endswith('"')):
return sparse.stringeval(value)
else:
return value
def next(self):
lentokens = len(self.tokens)
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
if self.tokenpos >= lentokens:
raise StopIteration()
thistokens = []
while self.tokenpos < lentokens and self.tokens[self.tokenpos] != "\n":
thistokens.append(self.tokens[self.tokenpos])
self.tokenpos += 1
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
fields = []
# patch together fields since we can have quotes inside a field
currentfield = ''
fieldparts = 0
for token in thistokens:
if token == ',':
# a field is only quoted if the whole thing is quoted
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
currentfield = ''
fieldparts = 0
else:
currentfield += token
fieldparts += 1
# things after the last comma...
if fieldparts:
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
values = {}
for fieldnum in range(len(self.fieldnames)):
if fieldnum >= len(fields):
values[self.fieldnames[fieldnum]] = ""
else:
values[self.fieldnames[fieldnum]] = fields[fieldnum]
return values
class csvunit(base.TranslationUnit):
spreadsheetescapes = [("+", "\\+"), ("-", "\\-"), ("=", "\\="), ("'", "\\'")]
def __init__(self, source=None):
super(csvunit, self).__init__(source)
self.comment = ""
self.source = source
self.target = ""
def add_spreadsheet_escapes(self, source, target):
"""add common spreadsheet escapes to two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(unescaped):
source = source.replace(unescaped, escaped, 1)
if target.startswith(unescaped):
target = target.replace(unescaped, escaped, 1)
return source, target
def remove_spreadsheet_escapes(self, source, target):
"""remove common spreadsheet escapes from two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(escaped):
source = source.replace(escaped, unescaped, 1)
if target.startswith(escaped):
target = target.replace(escaped, unescaped, 1)
return source, target
def fromdict(self, cedict):
self.comment = cedict.get('location', '').decode('utf-8')
self.source = cedict.get('source', '').decode('utf-8')
self.target = cedict.get('target', '').decode('utf-8')
if self.comment is None:
self.comment = ''
if self.source is None:
self.source = ''
if self.target is None:
self.target = ''
self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)
def todict(self, encoding='utf-8'):
comment, source, target = self.comment, self.source, self.target
source, target = self.add_spreadsheet_escapes(source, target)
if isinstance(comment, unicode):
comment = comment.encode(encoding)
if isinstance(source, unicode):
source = source.encode(encoding)
if isinstance(target, unicode):
target = target.encode(encoding)
return {'location': comment, 'source': source, 'target': target}
class csvfile(base.TranslationStore):
"""This class represents a .csv file with various lines.
The default format contains three columns: location, source, target"""
UnitClass = csvunit
Name = _("Comma Separated Value")
Mimetypes = ['text/comma-separated-values', 'text/csv']
Extensions = ["csv"]
def __init__(self, inputfile=None, fieldnames=None):
base.TranslationStore.__init__(self, unitclass=self.UnitClass)
self.units = []
if fieldnames is None:
self.fieldnames = ['location', 'source', 'target']
else:
if isinstance(fieldnames, basestring):
fieldnames = [fieldname.strip() for fieldname in fieldnames.split(",")]
self.fieldnames = fieldnames
self.filename = getattr(inputfile, 'name', '')
if inputfile is not None:
csvsrc = inputfile.read()
inputfile.close()
self.parse(csvsrc)
def parse(self, csvsrc):
csvfile = csv.StringIO(csvsrc)
reader = SimpleDictReader(csvfile, self.fieldnames)
for row in reader:
newce = self.UnitClass()
newce.fromdict(row)
self.addunit(newce)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
csvfile = csv.StringIO()
writer = csv.DictWriter(csvfile, self.fieldnames)
for ce in self.units:
cedict = ce.todict()
writer.writerow(cedict)
csvfile.reset()
return "".join(csvfile.readlines())
| __init__ | identifier_name |
csvl10n.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""classes that hold units of comma-separated values (.csv) files (csvunit)
or entire files (csvfile) for use with localisation
"""
import csv
from translate.misc import sparse
from translate.storage import base
class SimpleDictReader:
def __init__(self, fileobj, fieldnames):
self.fieldnames = fieldnames
self.contents = fileobj.read()
self.parser = sparse.SimpleParser(defaulttokenlist=[",", "\n"], whitespacechars="\r")
self.parser.stringescaping = 0
self.parser.quotechars = '"'
self.tokens = self.parser.tokenize(self.contents)
self.tokenpos = 0
def __iter__(self):
return self
def getvalue(self, value):
"""returns a value, evaluating strings as neccessary"""
if (value.startswith("'") and value.endswith("'")) or (value.startswith('"') and value.endswith('"')):
return sparse.stringeval(value)
else:
return value
def next(self): | lentokens = len(self.tokens)
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
if self.tokenpos >= lentokens:
raise StopIteration()
thistokens = []
while self.tokenpos < lentokens and self.tokens[self.tokenpos] != "\n":
thistokens.append(self.tokens[self.tokenpos])
self.tokenpos += 1
while self.tokenpos < lentokens and self.tokens[self.tokenpos] == "\n":
self.tokenpos += 1
fields = []
# patch together fields since we can have quotes inside a field
currentfield = ''
fieldparts = 0
for token in thistokens:
if token == ',':
# a field is only quoted if the whole thing is quoted
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
currentfield = ''
fieldparts = 0
else:
currentfield += token
fieldparts += 1
# things after the last comma...
if fieldparts:
if fieldparts == 1:
currentfield = self.getvalue(currentfield)
fields.append(currentfield)
values = {}
for fieldnum in range(len(self.fieldnames)):
if fieldnum >= len(fields):
values[self.fieldnames[fieldnum]] = ""
else:
values[self.fieldnames[fieldnum]] = fields[fieldnum]
return values
class csvunit(base.TranslationUnit):
spreadsheetescapes = [("+", "\\+"), ("-", "\\-"), ("=", "\\="), ("'", "\\'")]
def __init__(self, source=None):
super(csvunit, self).__init__(source)
self.comment = ""
self.source = source
self.target = ""
def add_spreadsheet_escapes(self, source, target):
"""add common spreadsheet escapes to two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(unescaped):
source = source.replace(unescaped, escaped, 1)
if target.startswith(unescaped):
target = target.replace(unescaped, escaped, 1)
return source, target
def remove_spreadsheet_escapes(self, source, target):
"""remove common spreadsheet escapes from two strings"""
for unescaped, escaped in self.spreadsheetescapes:
if source.startswith(escaped):
source = source.replace(escaped, unescaped, 1)
if target.startswith(escaped):
target = target.replace(escaped, unescaped, 1)
return source, target
def fromdict(self, cedict):
self.comment = cedict.get('location', '').decode('utf-8')
self.source = cedict.get('source', '').decode('utf-8')
self.target = cedict.get('target', '').decode('utf-8')
if self.comment is None:
self.comment = ''
if self.source is None:
self.source = ''
if self.target is None:
self.target = ''
self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)
def todict(self, encoding='utf-8'):
comment, source, target = self.comment, self.source, self.target
source, target = self.add_spreadsheet_escapes(source, target)
if isinstance(comment, unicode):
comment = comment.encode(encoding)
if isinstance(source, unicode):
source = source.encode(encoding)
if isinstance(target, unicode):
target = target.encode(encoding)
return {'location': comment, 'source': source, 'target': target}
class csvfile(base.TranslationStore):
"""This class represents a .csv file with various lines.
The default format contains three columns: location, source, target"""
UnitClass = csvunit
Name = _("Comma Separated Value")
Mimetypes = ['text/comma-separated-values', 'text/csv']
Extensions = ["csv"]
def __init__(self, inputfile=None, fieldnames=None):
base.TranslationStore.__init__(self, unitclass=self.UnitClass)
self.units = []
if fieldnames is None:
self.fieldnames = ['location', 'source', 'target']
else:
if isinstance(fieldnames, basestring):
fieldnames = [fieldname.strip() for fieldname in fieldnames.split(",")]
self.fieldnames = fieldnames
self.filename = getattr(inputfile, 'name', '')
if inputfile is not None:
csvsrc = inputfile.read()
inputfile.close()
self.parse(csvsrc)
def parse(self, csvsrc):
csvfile = csv.StringIO(csvsrc)
reader = SimpleDictReader(csvfile, self.fieldnames)
for row in reader:
newce = self.UnitClass()
newce.fromdict(row)
self.addunit(newce)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
csvfile = csv.StringIO()
writer = csv.DictWriter(csvfile, self.fieldnames)
for ce in self.units:
cedict = ce.todict()
writer.writerow(cedict)
csvfile.reset()
return "".join(csvfile.readlines()) | random_line_split | |
edit-car-row.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { By } from '@angular/platform-browser';
import { ReactiveFormsModule } from '@angular/forms';
import { EditCarRowComponent } from './edit-car-row.component';
describe('EditCarRowComponent', () => {
let component: EditCarRowComponent;
let fixture: ComponentFixture<EditCarRowComponent>;
const car = {
id: 1,
make: 'test make',
model: 'test model',
year: 2000,
color: 'blue',
price: 10000,
};
beforeEach(async(() => { | TestBed.configureTestingModule({
imports: [ ReactiveFormsModule ],
declarations: [ EditCarRowComponent ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(EditCarRowComponent);
component = fixture.componentInstance;
component.car = car;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
it('should save car', (done) => {
const inputs = fixture.debugElement.queryAll(By.css('input'));
expect(inputs[0].nativeElement.value).toBe(car.make);
expect(inputs[1].nativeElement.value).toBe(car.model);
expect(Number(inputs[2].nativeElement.value)).toBe(car.year);
expect(inputs[3].nativeElement.value).toBe(car.color);
expect(Number(inputs[4].nativeElement.value)).toBe(car.price);
const makeInput = inputs[0].nativeElement as HTMLInputElement;
makeInput.value = 'new make';
makeInput.dispatchEvent(new Event('input'));
expect(component.editCarForm.controls.make.value).toBe('new make');
const subscription = component.saveCar.subscribe(carToSave => {
expect(carToSave).toEqual({
...car,
make: 'new make',
});
subscription.unsubscribe();
done();
});
const saveButton = fixture.debugElement.query(By.css('button')).nativeElement as HTMLButtonElement;
saveButton.dispatchEvent(new Event('click'));
});
}); | random_line_split | |
place-picker.js | import {inject, customElement, bindable} from 'aurelia-framework';
import mapsapi from 'google-maps-api';
@customElement('place-picker')
// Get an API key from https://developers.google.com/maps/documentation/javascript/get-api-key.
@inject(Element, mapsapi('AIzaSyA1QmM_IG94DN0kCl7l1dblf4C8vRiuxus', ['places']))
export class PlacePicker {
@bindable location;
constructor(element, mapsApi) {
this.element = element;
this.mapsApi = mapsApi;
}
attached() {
// This loads the Google Maps API asynchronously.
this.mapsApi.then(maps => {
// Now that it's loaded, add a map to our HTML.
var mapContainer = this.element.querySelector('.place-picker-map');
var map = new maps.Map(mapContainer, {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13
});
// Also convert our input field into a place autocomplete field.
var input = this.element.querySelector('input');
var autocomplete = new google.maps.places.Autocomplete(input);
map.controls[google.maps.ControlPosition.TOP_LEFT].push(input);
autocomplete.bindTo('bounds', map);
// Create a marker that will show where the selected place is.
var marker = new google.maps.Marker({
map: map,
anchorPoint: new google.maps.Point(0, -29)
});
// Create a lambda that moves the marker and the map viewport.
let updateMarker = () => {
var position = new google.maps.LatLng(this.location.lat, this.location.lng);
map.setCenter(position);
marker.setPosition(position);
marker.setVisible(true);
};
// Ensure that the current location is shown properly.
updateMarker();
// Update the location and its marker every time a new place is selected.
autocomplete.addListener('place_changed', () => {
marker.setVisible(false);
var place = autocomplete.getPlace();
if (place.geometry) |
});
});
}
}
| {
this.location.name = place.name;
this.location.lat = place.geometry.location.lat();
this.location.lng = place.geometry.location.lng();
updateMarker();
} | conditional_block |
place-picker.js | import {inject, customElement, bindable} from 'aurelia-framework';
import mapsapi from 'google-maps-api';
@customElement('place-picker')
// Get an API key from https://developers.google.com/maps/documentation/javascript/get-api-key.
@inject(Element, mapsapi('AIzaSyA1QmM_IG94DN0kCl7l1dblf4C8vRiuxus', ['places']))
export class PlacePicker {
@bindable location;
constructor(element, mapsApi) |
attached() {
// This loads the Google Maps API asynchronously.
this.mapsApi.then(maps => {
// Now that it's loaded, add a map to our HTML.
var mapContainer = this.element.querySelector('.place-picker-map');
var map = new maps.Map(mapContainer, {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13
});
// Also convert our input field into a place autocomplete field.
var input = this.element.querySelector('input');
var autocomplete = new google.maps.places.Autocomplete(input);
map.controls[google.maps.ControlPosition.TOP_LEFT].push(input);
autocomplete.bindTo('bounds', map);
// Create a marker that will show where the selected place is.
var marker = new google.maps.Marker({
map: map,
anchorPoint: new google.maps.Point(0, -29)
});
// Create a lambda that moves the marker and the map viewport.
let updateMarker = () => {
var position = new google.maps.LatLng(this.location.lat, this.location.lng);
map.setCenter(position);
marker.setPosition(position);
marker.setVisible(true);
};
// Ensure that the current location is shown properly.
updateMarker();
// Update the location and its marker every time a new place is selected.
autocomplete.addListener('place_changed', () => {
marker.setVisible(false);
var place = autocomplete.getPlace();
if (place.geometry) {
this.location.name = place.name;
this.location.lat = place.geometry.location.lat();
this.location.lng = place.geometry.location.lng();
updateMarker();
}
});
});
}
}
| {
this.element = element;
this.mapsApi = mapsApi;
} | identifier_body |
place-picker.js | import {inject, customElement, bindable} from 'aurelia-framework';
import mapsapi from 'google-maps-api';
@customElement('place-picker')
// Get an API key from https://developers.google.com/maps/documentation/javascript/get-api-key.
@inject(Element, mapsapi('AIzaSyA1QmM_IG94DN0kCl7l1dblf4C8vRiuxus', ['places']))
export class PlacePicker {
@bindable location;
| (element, mapsApi) {
this.element = element;
this.mapsApi = mapsApi;
}
attached() {
// This loads the Google Maps API asynchronously.
this.mapsApi.then(maps => {
// Now that it's loaded, add a map to our HTML.
var mapContainer = this.element.querySelector('.place-picker-map');
var map = new maps.Map(mapContainer, {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13
});
// Also convert our input field into a place autocomplete field.
var input = this.element.querySelector('input');
var autocomplete = new google.maps.places.Autocomplete(input);
map.controls[google.maps.ControlPosition.TOP_LEFT].push(input);
autocomplete.bindTo('bounds', map);
// Create a marker that will show where the selected place is.
var marker = new google.maps.Marker({
map: map,
anchorPoint: new google.maps.Point(0, -29)
});
// Create a lambda that moves the marker and the map viewport.
let updateMarker = () => {
var position = new google.maps.LatLng(this.location.lat, this.location.lng);
map.setCenter(position);
marker.setPosition(position);
marker.setVisible(true);
};
// Ensure that the current location is shown properly.
updateMarker();
// Update the location and its marker every time a new place is selected.
autocomplete.addListener('place_changed', () => {
marker.setVisible(false);
var place = autocomplete.getPlace();
if (place.geometry) {
this.location.name = place.name;
this.location.lat = place.geometry.location.lat();
this.location.lng = place.geometry.location.lng();
updateMarker();
}
});
});
}
}
| constructor | identifier_name |
place-picker.js | import {inject, customElement, bindable} from 'aurelia-framework';
import mapsapi from 'google-maps-api';
@customElement('place-picker')
// Get an API key from https://developers.google.com/maps/documentation/javascript/get-api-key.
@inject(Element, mapsapi('AIzaSyA1QmM_IG94DN0kCl7l1dblf4C8vRiuxus', ['places']))
export class PlacePicker {
@bindable location;
constructor(element, mapsApi) {
this.element = element;
this.mapsApi = mapsApi;
} | // This loads the Google Maps API asynchronously.
this.mapsApi.then(maps => {
// Now that it's loaded, add a map to our HTML.
var mapContainer = this.element.querySelector('.place-picker-map');
var map = new maps.Map(mapContainer, {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13
});
// Also convert our input field into a place autocomplete field.
var input = this.element.querySelector('input');
var autocomplete = new google.maps.places.Autocomplete(input);
map.controls[google.maps.ControlPosition.TOP_LEFT].push(input);
autocomplete.bindTo('bounds', map);
// Create a marker that will show where the selected place is.
var marker = new google.maps.Marker({
map: map,
anchorPoint: new google.maps.Point(0, -29)
});
// Create a lambda that moves the marker and the map viewport.
let updateMarker = () => {
var position = new google.maps.LatLng(this.location.lat, this.location.lng);
map.setCenter(position);
marker.setPosition(position);
marker.setVisible(true);
};
// Ensure that the current location is shown properly.
updateMarker();
// Update the location and its marker every time a new place is selected.
autocomplete.addListener('place_changed', () => {
marker.setVisible(false);
var place = autocomplete.getPlace();
if (place.geometry) {
this.location.name = place.name;
this.location.lat = place.geometry.location.lat();
this.location.lng = place.geometry.location.lng();
updateMarker();
}
});
});
}
} |
attached() { | random_line_split |
brightness-low.js | 'use strict';
var React = require('react');
var PureRenderMixin = require('react-addons-pure-render-mixin');
var SvgIcon = require('../../svg-icon'); | displayName: 'DeviceBrightnessLow',
mixins: [PureRenderMixin],
render: function render() {
return React.createElement(
SvgIcon,
this.props,
React.createElement('path', { d: 'M20 15.31L23.31 12 20 8.69V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69zM12 18c-3.31 0-6-2.69-6-6s2.69-6 6-6 6 2.69 6 6-2.69 6-6 6z' })
);
}
});
module.exports = DeviceBrightnessLow; |
var DeviceBrightnessLow = React.createClass({ | random_line_split |
application.py | # Copyright © 2013, 2015, 2016, 2017, 2018, 2020, 2022 Tom Most <twm@freecog.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
"""
Yarrharr production server via Twisted Web
"""
import io
import json
import logging
import os
import re
import sys
from base64 import b64encode
import attr
from django.conf import settings
from django.dispatch import receiver
from twisted.internet import defer
from twisted.internet.endpoints import serverFromString
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogFilterPredicate,
Logger,
LogLevel,
PredicateResult,
formatEvent,
globalLogBeginner,
globalLogPublisher,
)
from twisted.python.filepath import FilePath
from twisted.web.resource import ErrorPage, NoResource, Resource
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.wsgi import WSGIResource
from zope.interface import implementer
from . import __version__
from .signals import schedule_changed
from .wsgi import application
log = Logger()
@attr.s
class CSPReport(object):
url = attr.ib()
referrer = attr.ib()
resource = attr.ib()
violatedDirective = attr.ib()
effectiveDirective = attr.ib()
source = attr.ib()
sample = attr.ib()
status = attr.ib()
policy = attr.ib()
disposition = attr.ib()
def __str__(self):
bits = []
for a in attr.fields(self.__class__):
value = getattr(self, a.name)
if value is None:
continue
bits.append("{}={!r}".format(a.name, value))
return "\n".join(bits)
@classmethod
def fromJSON(cls, data):
"""
Construct a :class:`CSPReport` from the serialization of a violation
per CSP Level 3 §5.3.
"""
if {"source-file", "line-number", "column-number"} <= data.keys():
source = "{source-file} {line-number}:{column-number}".format_map(data)
elif {"source-file", "line-number"} <= data.keys():
source = "{source-file} {line-number}".format_map(data)
else:
source = data.get("source-file")
return cls(
url=data["document-uri"],
referrer=data["referrer"] or None, # Always seems to be an empty string.
resource=data["blocked-uri"],
violatedDirective=data.get("violated-directive"),
effectiveDirective=data.get("effective-directive"),
policy=data["original-policy"],
disposition=data.get("disposition"),
status=data.get("status-code"),
sample=data.get("script-sample") or None,
source=source,
)
class CSPReportLogger(Resource):
isLeaf = True
_log = Logger()
def render(self, request):
if request.method != b"POST":
request.setResponseCode(405)
request.setHeader("Allow", "POST")
return b"HTTP 405: Method Not Allowed\n"
if request.requestHeaders.getRawHeaders("Content-Type") != ["application/csp-report"]:
request.setResponseCode(415)
return b"HTTP 415: Only application/csp-report requests are accepted\n"
# Process the JSON text produced per
# https://w3c.github.io/webappsec-csp/#deprecated-serialize-violation
report = CSPReport.fromJSON(json.load(io.TextIOWrapper(request.content, encoding="utf-8"))["csp-report"])
if report.sample and report.sample.startswith(";(function installGlobalHook(window) {"):
# This seems to be a misbehavior in some Firefox extension.
# I cannot reproduce it with a clean profile.
return b""
if report.sample and report.sample == "call to eval() or related function blocked by CSP":
# This is caused by Tridactyl due to a Firefox issue. It's quite
# chatty so we'll disable for now, even though the message is
# generated by the browser and might indicate a script injection.
# See <https://github.com/cmcaine/tridactyl/issues/109> and
# <https://bugzilla.mozilla.org/show_bug.cgi?id=1267027>.
re | self._log.debug(
"Content Security Policy violation reported by {userAgent!r}:\n{report}",
userAgent=", ".join(request.requestHeaders.getRawHeaders("User-Agent", [])),
report=report,
)
return b"" # Browser ignores the response.
class FallbackResource(Resource):
"""
Resource which falls back to an alternative resource tree if it doesn't
have a matching child resource.
"""
def __init__(self, fallback):
Resource.__init__(self)
self.fallback = fallback
def render(self, request):
"""
Render this path with the fallback resource.
"""
return self.fallback.render(request)
def getChild(self, path, request):
"""
Dispatch unhandled requests to the fallback resource.
"""
# Mutate the request path such that it's like FallbackResource didn't handle
# the request at all. This is a bit of a nasty hack, since we're
# relying on the t.w.server implementation's behavior to not break when
# we do this. A better way would be to create a wrapper for the request object
request.postpath.insert(0, request.prepath.pop())
return self.fallback
class Static(Resource):
"""
Serve up Yarrharr's static assets directory. The files in this directory
have names like::
In development, the files are served uncompressed and named like so::
main-afffb00fd22ca3ce0250.js
The second dot-delimited section is a hash of the file's contents or source
material. As the filename changes each time the content does, these files
are served with a long max-age and the ``immutable`` flag in the
`Cache-Control`_ header.
In production, each file has two pre-compressed variants: one with
a ``.gz`` extension, and one with a ``.br`` extension. For example::
main-afffb00fd22ca3ce0250.js
main-afffb00fd22ca3ce0250.js.br
main-afffb00fd22ca3ce0250.js.gz
The actual serving of the files is done by `twisted.web.static.File`, which
is fancy and supports range requests, conditional gets, etc.
.. note::
Several features used here are only available to HTTPS origins.
Cache-Control: immutable and Brotli compression both are in Firefox.
.. _cache-control: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
"""
_dir = FilePath(settings.STATIC_ROOT)
_validName = re.compile(rb"^[a-zA-Z0-9]+-[a-zA-Z0-9]+(\.[a-z]+)+$")
# NOTE: RFC 7231 § 5.3.4 is not completely clear about whether
# content-coding tokens are case-sensitive or not. The "identity" token
# appears in EBNF and is therefore definitely case-insensitive, but the
# other tokens only appear in IANA registry tables in lowercase form. In
# contrast, the transfer-coding possibilities are clearly defined in EBNF
# so are definitely case-insensitive. For content-coding every implementer
# seems to agree on lowercase, so I'm not going to worry about it.
_brToken = re.compile(rb"(:?^|[\s,])br(:?$|[\s,;])")
_gzToken = re.compile(rb"(:?^|[\s,])(:?x-)?gzip(:?$|[\s,;])")
_contentTypes = {
b".js": "application/javascript",
b".css": "text/css",
b".map": "application/octet-stream",
b".ico": "image/x-icon",
b".svg": "image/svg+xml",
b".png": "image/png",
}
def _file(self, path, type, encoding=None):
"""
Construct a `twisted.web.static.File` customized to serve Yarrharr
static assets.
:param path: `twisted.internet.filepath.FilePath` instance
:returns: `twisted.web.resource.IResource`
"""
f = File(path.path)
f.type = type
f.encoding = encoding
return f
def getChild(self, path, request):
"""
Serve a file for the given path.
The Content-Type header is set based on the file extension.
A limited form of content negotiation is done based on the
Accept-Encoding header and the files on disk. Apart from the default of
``identity``, two encodings are supported:
* ``br``, which selects any Brotli-compressed ``.br`` variant of
the file.
* ``gzip``, which selects any gzip-compressed ``.br`` variant of the
file. ``x-gzip`` is also supported.
qvalues are ignored as browsers don't use them. This may produce an
incorrect response if a variant is disabled like ``identity;q=0``.
"""
if not self._validName.match(path):
return NoResource("Not found.")
ext = path[path.rindex(b".") :]
try:
type = self._contentTypes[ext]
except KeyError:
return NoResource("Unknown type.")
acceptEncoding = request.getHeader(b"accept-encoding") or b"*"
file = None
if self._brToken.search(acceptEncoding):
br = self._dir.child(path + b".br")
if br.isfile():
file = self._file(br, type, "br")
if file is None and self._gzToken.search(acceptEncoding):
gz = self._dir.child(path + b".gz")
if gz.isfile():
file = self._file(gz, type, "gzip")
if file is None:
file = self._file(self._dir.child(path), type)
request.setHeader(b"Vary", b"accept-encoding")
request.setHeader(b"Cache-Control", b"public, max-age=31536000, immutable")
return file
class Root(FallbackResource):
"""
Root of the Yarrharr URL hierarchy.
"""
def __init__(self, reactor, threadpool):
wsgi = WSGIResource(reactor, threadpool, application)
FallbackResource.__init__(self, wsgi)
self.putChild(b"csp-report", CSPReportLogger())
self.putChild(b"static", Static())
# Handle requests for /favicon.ico and paths hit by script kiddies at
# the Twisted level so that they don't make it down to Django, which
# logs 404s as errors:
a404 = ErrorPage(404, "Not Found", "")
for path in (b"favicon.ico", b"index.php", b"wp-login.php"):
self.putChild(path, a404)
def getChildWithDefault(self, name, request):
# Disable the Referer header in some browsers. This is complemented by
# the injection of rel="noopener noreferrer" on all links by the HTML
# sanitizer.
request.setHeader(b"Referrer-Policy", b"same-origin")
request.setHeader(b"X-Content-Type-Options", b"nosniff")
request.setHeader(b"Cross-Origin-Opener-Policy", b"same-origin")
script_nonce = b64encode(os.urandom(32))
request.requestHeaders.setRawHeaders(b"Yarrharr-Script-Nonce", [script_nonce])
request.setHeader(
b"Content-Security-Policy",
(
# b"default-src 'none'; "
b"img-src *; "
b"script-src 'self' 'nonce-%s'; "
b"style-src 'self'; "
b"frame-ancestors 'none'; "
b"form-action 'self'; "
b"report-uri /csp-report"
)
% (script_nonce,),
)
return super().getChildWithDefault(name, request)
def updateFeeds(reactor, max_fetch=5):
"""
Poll any feeds due for a check.
"""
from .fetch import poll
def _failed(reason):
"""
Log unexpected errors and schedule a retry in one second.
"""
log.failure("Unexpected failure polling feeds", failure=reason)
return 1.0 # seconds until next poll
d = poll(reactor, max_fetch)
# Last gasp error handler to avoid terminating the LoopingCall.
d.addErrback(_failed)
return d
_txLevelToPriority = {
LogLevel.debug: "<7>",
LogLevel.info: "<6>",
LogLevel.warn: "<4>",
LogLevel.error: "<3>",
LogLevel.critical: "<2>",
}
def formatForSystemd(event):
# Events generated by twisted.python.log have a "system", while ones
# generated with twisted.logger have a "namespace" with similar
# meaning.
#
s = "[{}] ".format(event.get("log_system") or event.get("log_namespace") or "-")
s += formatEvent(event)
if not s:
return None
if "log_failure" in event:
try:
s += "\n" + event["log_failure"].getTraceback().rstrip("\n")
except: # noqa
pass
prefix = _txLevelToPriority.get(event.get("log_level")) or "<6>"
return prefix + s.replace("\n", "\n" + prefix + " ") + "\n"
@implementer(ILogFilterPredicate)
def dropUnhandledHTTP2Shutdown(event):
"""
Suppress the log messages which result from an unhandled error in HTTP/2
connection shutdown. See #282 and Twisted #9462.
This log message is relayed from the :mod:`twisted.python.log` so the
fields are a little odd:
* ``'log_namespace'`` is ``'log_legacy'``, and there is a ``'system'``
field with a value of ``'-'``.
* ``'log_text'`` contains the actual log text, including a pre-formatted
traceback.
* ``'failure'`` used instead of ``'log_failure'``.
"""
if event.get("log_namespace") != "log_legacy":
return PredicateResult.maybe
if event.get("log_level") != LogLevel.critical:
return PredicateResult.maybe
if "failure" not in event or not event["failure"].check(AttributeError):
return PredicateResult.maybe
if event["log_text"].startswith("Unhandled Error") and "no attribute 'shutdown'" in event["log_text"]:
return PredicateResult.no
return PredicateResult.maybe
class TwistedLoggerLogHandler(logging.Handler):
publisher = globalLogPublisher
def _mapLevel(self, levelno):
"""
Convert a stdlib logging level into a Twisted :class:`LogLevel`.
"""
if levelno <= logging.DEBUG:
return LogLevel.debug
elif levelno <= logging.INFO:
return LogLevel.info
elif levelno <= logging.WARNING:
return LogLevel.warn
elif levelno <= logging.ERROR:
return LogLevel.error
return LogLevel.critical
def emit(self, record):
self.publisher(
{
"log_level": self._mapLevel(record.levelno),
"log_namespace": record.name,
"log_format": "{msg}",
"msg": self.format(record),
}
)
class AdaptiveLoopingCall(object):
"""
:class:`AdaptiveLoopingCall` invokes a function periodically. Each time it
is called it returns the time to wait until the next invocation.
:ivar _clock: :class:`IReactorTime` implementer
:ivar _f: The function to call.
:ivar _deferred: Deferred returned by :meth:`.start()`.
:ivar _call: `IDelayedCall` when waiting for the next poll period.
Otherwise `None`.
:ivar bool _poked: `True` when the function should be immediately invoked
again after it completes.
:ivar bool _stopped: `True` once `stop()` has been called.
"""
_deferred = None
_call = None
_poked = False
_stopped = False
def __init__(self, clock, f):
"""
:param clock: :class:`IReactorTime` provider to use when scheduling
calls.
:param f: The function to call when the loop is started. It must return
the number of seconds to wait before calling it again, or
a deferred for the same.
"""
self._clock = clock
self._f = f
def start(self):
"""
Call the function immediately, and schedule future calls according to
its result.
:returns:
:class:`Deferred` which will succeed when :meth:`stop()` is called
and the loop cleanly exits, or fail when the function produces
a failure.
"""
assert self._deferred is None
assert self._call is None
assert not self._stopped
self._deferred = d = defer.Deferred()
self._callIt()
return d
def stop(self):
self._stopped = True
if self._call:
self._call.cancel()
self._deferred.callback(self)
def poke(self):
"""
Run the function as soon as possible: either immediately or once it has
finished any current execution. This is a no-op if the service has been
stopped. Pokes coalesce if received while the function is executing.
"""
if self._stopped or self._poked:
return
if self._call:
self._call.cancel()
self._callIt()
else:
self._poked = True
def _callIt(self):
self._call = None
d = defer.maybeDeferred(self._f)
d.addCallback(self._schedule)
d.addErrback(self._failLoop)
def _schedule(self, seconds):
"""
Schedule the next call.
"""
assert isinstance(seconds, (int, float))
if self._stopped:
d, self._deferred = self._deferred, None
d.callback(self)
elif self._poked:
self._poked = False
self._callIt()
else:
self._call = self._clock.callLater(seconds, self._callIt)
def _failLoop(self, failure):
"""
Terminate the loop due to an unhandled failure.
"""
d, self._deferred = self._deferred, None
d.errback(failure)
def run():
from twisted.internet import reactor
root = logging.getLogger()
logging.getLogger("django").setLevel(logging.INFO)
logging.raiseExceptions = settings.DEBUG
logging._srcfile = None # Disable expensive collection of location information.
root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
root.addHandler(TwistedLoggerLogHandler())
observer = FilteringLogObserver(
FileLogObserver(sys.stdout, formatForSystemd),
[dropUnhandledHTTP2Shutdown],
)
globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)
log.info("Yarrharr {version} starting", version=__version__)
factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None)
endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT)
reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory)
updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor))
loopEndD = updateLoop.start()
loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f))
@receiver(schedule_changed)
def threadPollNow(sender, **kwargs):
"""
When the `schedule_changed` signal is sent poke the polling loop. If it
is sleeping this will cause it to poll immediately. Otherwise this will
cause it to run the poll function immediately once it returns (running
it again protects against races).
"""
log.debug("Immediate poll triggered by {sender}", sender=sender)
reactor.callFromThread(updateLoop.poke)
def stopUpdateLoop():
updateLoop.stop()
return loopEndD
reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop)
reactor.run()
| turn b""
| conditional_block |
application.py | # Copyright © 2013, 2015, 2016, 2017, 2018, 2020, 2022 Tom Most <twm@freecog.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
"""
Yarrharr production server via Twisted Web
"""
import io
import json
import logging
import os
import re
import sys
from base64 import b64encode
import attr
from django.conf import settings
from django.dispatch import receiver
from twisted.internet import defer
from twisted.internet.endpoints import serverFromString
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogFilterPredicate,
Logger,
LogLevel,
PredicateResult,
formatEvent,
globalLogBeginner,
globalLogPublisher,
)
from twisted.python.filepath import FilePath
from twisted.web.resource import ErrorPage, NoResource, Resource
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.wsgi import WSGIResource
from zope.interface import implementer
from . import __version__
from .signals import schedule_changed
from .wsgi import application
log = Logger()
@attr.s
class CSPReport(object):
url = attr.ib()
referrer = attr.ib()
resource = attr.ib()
violatedDirective = attr.ib()
effectiveDirective = attr.ib()
source = attr.ib()
sample = attr.ib()
status = attr.ib()
policy = attr.ib()
disposition = attr.ib()
def __str__(self):
bits = []
for a in attr.fields(self.__class__):
value = getattr(self, a.name)
if value is None:
continue
bits.append("{}={!r}".format(a.name, value))
return "\n".join(bits)
@classmethod
def fromJSON(cls, data):
"""
Construct a :class:`CSPReport` from the serialization of a violation
per CSP Level 3 §5.3.
"""
if {"source-file", "line-number", "column-number"} <= data.keys():
source = "{source-file} {line-number}:{column-number}".format_map(data)
elif {"source-file", "line-number"} <= data.keys():
source = "{source-file} {line-number}".format_map(data)
else:
source = data.get("source-file")
return cls(
url=data["document-uri"],
referrer=data["referrer"] or None, # Always seems to be an empty string.
resource=data["blocked-uri"],
violatedDirective=data.get("violated-directive"),
effectiveDirective=data.get("effective-directive"),
policy=data["original-policy"],
disposition=data.get("disposition"),
status=data.get("status-code"),
sample=data.get("script-sample") or None,
source=source,
)
class CSPReportLogger(Resource):
isLeaf = True
_log = Logger()
def render(self, request):
if request.method != b"POST":
request.setResponseCode(405)
request.setHeader("Allow", "POST")
return b"HTTP 405: Method Not Allowed\n"
if request.requestHeaders.getRawHeaders("Content-Type") != ["application/csp-report"]:
request.setResponseCode(415)
return b"HTTP 415: Only application/csp-report requests are accepted\n"
# Process the JSON text produced per
# https://w3c.github.io/webappsec-csp/#deprecated-serialize-violation
report = CSPReport.fromJSON(json.load(io.TextIOWrapper(request.content, encoding="utf-8"))["csp-report"])
if report.sample and report.sample.startswith(";(function installGlobalHook(window) {"):
# This seems to be a misbehavior in some Firefox extension.
# I cannot reproduce it with a clean profile.
return b""
if report.sample and report.sample == "call to eval() or related function blocked by CSP":
# This is caused by Tridactyl due to a Firefox issue. It's quite
# chatty so we'll disable for now, even though the message is
# generated by the browser and might indicate a script injection.
# See <https://github.com/cmcaine/tridactyl/issues/109> and
# <https://bugzilla.mozilla.org/show_bug.cgi?id=1267027>.
return b""
self._log.debug(
"Content Security Policy violation reported by {userAgent!r}:\n{report}",
userAgent=", ".join(request.requestHeaders.getRawHeaders("User-Agent", [])),
report=report,
)
return b"" # Browser ignores the response.
class FallbackResource(Resource):
"""
Resource which falls back to an alternative resource tree if it doesn't
have a matching child resource.
"""
def __init__(self, fallback):
Resource.__init__(self)
self.fallback = fallback
def render(self, request):
"""
Render this path with the fallback resource.
"""
return self.fallback.render(request)
def getChild(self, path, request):
"""
Dispatch unhandled requests to the fallback resource.
"""
# Mutate the request path such that it's like FallbackResource didn't handle
# the request at all. This is a bit of a nasty hack, since we're
# relying on the t.w.server implementation's behavior to not break when
# we do this. A better way would be to create a wrapper for the request object
request.postpath.insert(0, request.prepath.pop())
return self.fallback
class Static(Resource):
"""
Serve up Yarrharr's static assets directory. The files in this directory
have names like::
In development, the files are served uncompressed and named like so::
main-afffb00fd22ca3ce0250.js
The second dot-delimited section is a hash of the file's contents or source
material. As the filename changes each time the content does, these files
are served with a long max-age and the ``immutable`` flag in the
`Cache-Control`_ header.
In production, each file has two pre-compressed variants: one with
a ``.gz`` extension, and one with a ``.br`` extension. For example::
main-afffb00fd22ca3ce0250.js
main-afffb00fd22ca3ce0250.js.br
main-afffb00fd22ca3ce0250.js.gz
The actual serving of the files is done by `twisted.web.static.File`, which
is fancy and supports range requests, conditional gets, etc.
.. note::
Several features used here are only available to HTTPS origins.
Cache-Control: immutable and Brotli compression both are in Firefox.
.. _cache-control: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
"""
_dir = FilePath(settings.STATIC_ROOT)
_validName = re.compile(rb"^[a-zA-Z0-9]+-[a-zA-Z0-9]+(\.[a-z]+)+$")
# NOTE: RFC 7231 § 5.3.4 is not completely clear about whether
# content-coding tokens are case-sensitive or not. The "identity" token
# appears in EBNF and is therefore definitely case-insensitive, but the
# other tokens only appear in IANA registry tables in lowercase form. In
# contrast, the transfer-coding possibilities are clearly defined in EBNF
# so are definitely case-insensitive. For content-coding every implementer
# seems to agree on lowercase, so I'm not going to worry about it.
_brToken = re.compile(rb"(:?^|[\s,])br(:?$|[\s,;])")
_gzToken = re.compile(rb"(:?^|[\s,])(:?x-)?gzip(:?$|[\s,;])")
_contentTypes = {
b".js": "application/javascript",
b".css": "text/css",
b".map": "application/octet-stream",
b".ico": "image/x-icon",
b".svg": "image/svg+xml",
b".png": "image/png",
}
def _file(self, path, type, encoding=None):
"""
Construct a `twisted.web.static.File` customized to serve Yarrharr
static assets.
:param path: `twisted.internet.filepath.FilePath` instance
:returns: `twisted.web.resource.IResource`
"""
f = File(path.path)
f.type = type
f.encoding = encoding
return f
def getChild(self, path, request):
"""
Serve a file for the given path.
The Content-Type header is set based on the file extension.
A limited form of content negotiation is done based on the
Accept-Encoding header and the files on disk. Apart from the default of
``identity``, two encodings are supported:
* ``br``, which selects any Brotli-compressed ``.br`` variant of
the file.
* ``gzip``, which selects any gzip-compressed ``.br`` variant of the
file. ``x-gzip`` is also supported.
qvalues are ignored as browsers don't use them. This may produce an
incorrect response if a variant is disabled like ``identity;q=0``.
"""
if not self._validName.match(path):
return NoResource("Not found.")
ext = path[path.rindex(b".") :]
try:
type = self._contentTypes[ext]
except KeyError:
return NoResource("Unknown type.")
acceptEncoding = request.getHeader(b"accept-encoding") or b"*"
file = None
if self._brToken.search(acceptEncoding):
br = self._dir.child(path + b".br")
if br.isfile():
file = self._file(br, type, "br")
if file is None and self._gzToken.search(acceptEncoding):
gz = self._dir.child(path + b".gz")
if gz.isfile():
file = self._file(gz, type, "gzip")
if file is None:
file = self._file(self._dir.child(path), type)
request.setHeader(b"Vary", b"accept-encoding")
request.setHeader(b"Cache-Control", b"public, max-age=31536000, immutable")
return file
class Root(FallbackResource):
"""
Root of the Yarrharr URL hierarchy.
"""
def __init__(self, reactor, threadpool):
wsgi = WSGIResource(reactor, threadpool, application)
FallbackResource.__init__(self, wsgi)
self.putChild(b"csp-report", CSPReportLogger())
self.putChild(b"static", Static())
# Handle requests for /favicon.ico and paths hit by script kiddies at
# the Twisted level so that they don't make it down to Django, which
# logs 404s as errors:
a404 = ErrorPage(404, "Not Found", "")
for path in (b"favicon.ico", b"index.php", b"wp-login.php"):
self.putChild(path, a404)
def getChildWithDefault(self, name, request):
# Disable the Referer header in some browsers. This is complemented by
# the injection of rel="noopener noreferrer" on all links by the HTML
# sanitizer.
request.setHeader(b"Referrer-Policy", b"same-origin")
request.setHeader(b"X-Content-Type-Options", b"nosniff")
request.setHeader(b"Cross-Origin-Opener-Policy", b"same-origin")
script_nonce = b64encode(os.urandom(32))
request.requestHeaders.setRawHeaders(b"Yarrharr-Script-Nonce", [script_nonce])
request.setHeader(
b"Content-Security-Policy",
(
# b"default-src 'none'; "
b"img-src *; "
b"script-src 'self' 'nonce-%s'; "
b"style-src 'self'; "
b"frame-ancestors 'none'; "
b"form-action 'self'; "
b"report-uri /csp-report"
)
% (script_nonce,),
)
return super().getChildWithDefault(name, request)
def updateFeeds(reactor, max_fetch=5):
"""
Poll any feeds due for a check.
"""
from .fetch import poll
def _failed(reason):
"""
Log unexpected errors and schedule a retry in one second.
"""
log.failure("Unexpected failure polling feeds", failure=reason)
return 1.0 # seconds until next poll
d = poll(reactor, max_fetch)
# Last gasp error handler to avoid terminating the LoopingCall.
d.addErrback(_failed)
return d
_txLevelToPriority = {
LogLevel.debug: "<7>",
LogLevel.info: "<6>",
LogLevel.warn: "<4>",
LogLevel.error: "<3>",
LogLevel.critical: "<2>",
}
def formatForSystemd(event):
# Events generated by twisted.python.log have a "system", while ones
# generated with twisted.logger have a "namespace" with similar
# meaning.
#
s = "[{}] ".format(event.get("log_system") or event.get("log_namespace") or "-")
s += formatEvent(event)
if not s:
return None
if "log_failure" in event:
try:
s += "\n" + event["log_failure"].getTraceback().rstrip("\n")
except: # noqa
pass
prefix = _txLevelToPriority.get(event.get("log_level")) or "<6>"
return prefix + s.replace("\n", "\n" + prefix + " ") + "\n"
@implementer(ILogFilterPredicate)
def dropUnhandledHTTP2Shutdown(event):
"""
Suppress the log messages which result from an unhandled error in HTTP/2
connection shutdown. See #282 and Twisted #9462.
This log message is relayed from the :mod:`twisted.python.log` so the
fields are a little odd:
* ``'log_namespace'`` is ``'log_legacy'``, and there is a ``'system'``
field with a value of ``'-'``.
* ``'log_text'`` contains the actual log text, including a pre-formatted
traceback.
* ``'failure'`` used instead of ``'log_failure'``.
"""
if event.get("log_namespace") != "log_legacy":
return PredicateResult.maybe
if event.get("log_level") != LogLevel.critical:
return PredicateResult.maybe
if "failure" not in event or not event["failure"].check(AttributeError):
return PredicateResult.maybe
if event["log_text"].startswith("Unhandled Error") and "no attribute 'shutdown'" in event["log_text"]:
return PredicateResult.no
return PredicateResult.maybe
class TwistedLoggerLogHandler(logging.Handler):
publisher = globalLogPublisher
| """
Convert a stdlib logging level into a Twisted :class:`LogLevel`.
"""
if levelno <= logging.DEBUG:
return LogLevel.debug
elif levelno <= logging.INFO:
return LogLevel.info
elif levelno <= logging.WARNING:
return LogLevel.warn
elif levelno <= logging.ERROR:
return LogLevel.error
return LogLevel.critical
def emit(self, record):
self.publisher(
{
"log_level": self._mapLevel(record.levelno),
"log_namespace": record.name,
"log_format": "{msg}",
"msg": self.format(record),
}
)
class AdaptiveLoopingCall(object):
"""
:class:`AdaptiveLoopingCall` invokes a function periodically. Each time it
is called it returns the time to wait until the next invocation.
:ivar _clock: :class:`IReactorTime` implementer
:ivar _f: The function to call.
:ivar _deferred: Deferred returned by :meth:`.start()`.
:ivar _call: `IDelayedCall` when waiting for the next poll period.
Otherwise `None`.
:ivar bool _poked: `True` when the function should be immediately invoked
again after it completes.
:ivar bool _stopped: `True` once `stop()` has been called.
"""
_deferred = None
_call = None
_poked = False
_stopped = False
def __init__(self, clock, f):
"""
:param clock: :class:`IReactorTime` provider to use when scheduling
calls.
:param f: The function to call when the loop is started. It must return
the number of seconds to wait before calling it again, or
a deferred for the same.
"""
self._clock = clock
self._f = f
def start(self):
"""
Call the function immediately, and schedule future calls according to
its result.
:returns:
:class:`Deferred` which will succeed when :meth:`stop()` is called
and the loop cleanly exits, or fail when the function produces
a failure.
"""
assert self._deferred is None
assert self._call is None
assert not self._stopped
self._deferred = d = defer.Deferred()
self._callIt()
return d
def stop(self):
self._stopped = True
if self._call:
self._call.cancel()
self._deferred.callback(self)
def poke(self):
"""
Run the function as soon as possible: either immediately or once it has
finished any current execution. This is a no-op if the service has been
stopped. Pokes coalesce if received while the function is executing.
"""
if self._stopped or self._poked:
return
if self._call:
self._call.cancel()
self._callIt()
else:
self._poked = True
def _callIt(self):
self._call = None
d = defer.maybeDeferred(self._f)
d.addCallback(self._schedule)
d.addErrback(self._failLoop)
def _schedule(self, seconds):
"""
Schedule the next call.
"""
assert isinstance(seconds, (int, float))
if self._stopped:
d, self._deferred = self._deferred, None
d.callback(self)
elif self._poked:
self._poked = False
self._callIt()
else:
self._call = self._clock.callLater(seconds, self._callIt)
def _failLoop(self, failure):
"""
Terminate the loop due to an unhandled failure.
"""
d, self._deferred = self._deferred, None
d.errback(failure)
def run():
from twisted.internet import reactor
root = logging.getLogger()
logging.getLogger("django").setLevel(logging.INFO)
logging.raiseExceptions = settings.DEBUG
logging._srcfile = None # Disable expensive collection of location information.
root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
root.addHandler(TwistedLoggerLogHandler())
observer = FilteringLogObserver(
FileLogObserver(sys.stdout, formatForSystemd),
[dropUnhandledHTTP2Shutdown],
)
globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)
log.info("Yarrharr {version} starting", version=__version__)
factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None)
endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT)
reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory)
updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor))
loopEndD = updateLoop.start()
loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f))
@receiver(schedule_changed)
def threadPollNow(sender, **kwargs):
"""
When the `schedule_changed` signal is sent poke the polling loop. If it
is sleeping this will cause it to poll immediately. Otherwise this will
cause it to run the poll function immediately once it returns (running
it again protects against races).
"""
log.debug("Immediate poll triggered by {sender}", sender=sender)
reactor.callFromThread(updateLoop.poke)
def stopUpdateLoop():
updateLoop.stop()
return loopEndD
reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop)
reactor.run() | def _mapLevel(self, levelno): | random_line_split |
application.py | # Copyright © 2013, 2015, 2016, 2017, 2018, 2020, 2022 Tom Most <twm@freecog.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
"""
Yarrharr production server via Twisted Web
"""
import io
import json
import logging
import os
import re
import sys
from base64 import b64encode
import attr
from django.conf import settings
from django.dispatch import receiver
from twisted.internet import defer
from twisted.internet.endpoints import serverFromString
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogFilterPredicate,
Logger,
LogLevel,
PredicateResult,
formatEvent,
globalLogBeginner,
globalLogPublisher,
)
from twisted.python.filepath import FilePath
from twisted.web.resource import ErrorPage, NoResource, Resource
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.wsgi import WSGIResource
from zope.interface import implementer
from . import __version__
from .signals import schedule_changed
from .wsgi import application
log = Logger()
@attr.s
class C | object):
url = attr.ib()
referrer = attr.ib()
resource = attr.ib()
violatedDirective = attr.ib()
effectiveDirective = attr.ib()
source = attr.ib()
sample = attr.ib()
status = attr.ib()
policy = attr.ib()
disposition = attr.ib()
def __str__(self):
bits = []
for a in attr.fields(self.__class__):
value = getattr(self, a.name)
if value is None:
continue
bits.append("{}={!r}".format(a.name, value))
return "\n".join(bits)
@classmethod
def fromJSON(cls, data):
"""
Construct a :class:`CSPReport` from the serialization of a violation
per CSP Level 3 §5.3.
"""
if {"source-file", "line-number", "column-number"} <= data.keys():
source = "{source-file} {line-number}:{column-number}".format_map(data)
elif {"source-file", "line-number"} <= data.keys():
source = "{source-file} {line-number}".format_map(data)
else:
source = data.get("source-file")
return cls(
url=data["document-uri"],
referrer=data["referrer"] or None, # Always seems to be an empty string.
resource=data["blocked-uri"],
violatedDirective=data.get("violated-directive"),
effectiveDirective=data.get("effective-directive"),
policy=data["original-policy"],
disposition=data.get("disposition"),
status=data.get("status-code"),
sample=data.get("script-sample") or None,
source=source,
)
class CSPReportLogger(Resource):
isLeaf = True
_log = Logger()
def render(self, request):
if request.method != b"POST":
request.setResponseCode(405)
request.setHeader("Allow", "POST")
return b"HTTP 405: Method Not Allowed\n"
if request.requestHeaders.getRawHeaders("Content-Type") != ["application/csp-report"]:
request.setResponseCode(415)
return b"HTTP 415: Only application/csp-report requests are accepted\n"
# Process the JSON text produced per
# https://w3c.github.io/webappsec-csp/#deprecated-serialize-violation
report = CSPReport.fromJSON(json.load(io.TextIOWrapper(request.content, encoding="utf-8"))["csp-report"])
if report.sample and report.sample.startswith(";(function installGlobalHook(window) {"):
# This seems to be a misbehavior in some Firefox extension.
# I cannot reproduce it with a clean profile.
return b""
if report.sample and report.sample == "call to eval() or related function blocked by CSP":
# This is caused by Tridactyl due to a Firefox issue. It's quite
# chatty so we'll disable for now, even though the message is
# generated by the browser and might indicate a script injection.
# See <https://github.com/cmcaine/tridactyl/issues/109> and
# <https://bugzilla.mozilla.org/show_bug.cgi?id=1267027>.
return b""
self._log.debug(
"Content Security Policy violation reported by {userAgent!r}:\n{report}",
userAgent=", ".join(request.requestHeaders.getRawHeaders("User-Agent", [])),
report=report,
)
return b"" # Browser ignores the response.
class FallbackResource(Resource):
"""
Resource which falls back to an alternative resource tree if it doesn't
have a matching child resource.
"""
def __init__(self, fallback):
Resource.__init__(self)
self.fallback = fallback
def render(self, request):
"""
Render this path with the fallback resource.
"""
return self.fallback.render(request)
def getChild(self, path, request):
"""
Dispatch unhandled requests to the fallback resource.
"""
# Mutate the request path such that it's like FallbackResource didn't handle
# the request at all. This is a bit of a nasty hack, since we're
# relying on the t.w.server implementation's behavior to not break when
# we do this. A better way would be to create a wrapper for the request object
request.postpath.insert(0, request.prepath.pop())
return self.fallback
class Static(Resource):
"""
Serve up Yarrharr's static assets directory. The files in this directory
have names like::
In development, the files are served uncompressed and named like so::
main-afffb00fd22ca3ce0250.js
The second dot-delimited section is a hash of the file's contents or source
material. As the filename changes each time the content does, these files
are served with a long max-age and the ``immutable`` flag in the
`Cache-Control`_ header.
In production, each file has two pre-compressed variants: one with
a ``.gz`` extension, and one with a ``.br`` extension. For example::
main-afffb00fd22ca3ce0250.js
main-afffb00fd22ca3ce0250.js.br
main-afffb00fd22ca3ce0250.js.gz
The actual serving of the files is done by `twisted.web.static.File`, which
is fancy and supports range requests, conditional gets, etc.
.. note::
Several features used here are only available to HTTPS origins.
Cache-Control: immutable and Brotli compression both are in Firefox.
.. _cache-control: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
"""
_dir = FilePath(settings.STATIC_ROOT)
_validName = re.compile(rb"^[a-zA-Z0-9]+-[a-zA-Z0-9]+(\.[a-z]+)+$")
# NOTE: RFC 7231 § 5.3.4 is not completely clear about whether
# content-coding tokens are case-sensitive or not. The "identity" token
# appears in EBNF and is therefore definitely case-insensitive, but the
# other tokens only appear in IANA registry tables in lowercase form. In
# contrast, the transfer-coding possibilities are clearly defined in EBNF
# so are definitely case-insensitive. For content-coding every implementer
# seems to agree on lowercase, so I'm not going to worry about it.
_brToken = re.compile(rb"(:?^|[\s,])br(:?$|[\s,;])")
_gzToken = re.compile(rb"(:?^|[\s,])(:?x-)?gzip(:?$|[\s,;])")
_contentTypes = {
b".js": "application/javascript",
b".css": "text/css",
b".map": "application/octet-stream",
b".ico": "image/x-icon",
b".svg": "image/svg+xml",
b".png": "image/png",
}
def _file(self, path, type, encoding=None):
"""
Construct a `twisted.web.static.File` customized to serve Yarrharr
static assets.
:param path: `twisted.internet.filepath.FilePath` instance
:returns: `twisted.web.resource.IResource`
"""
f = File(path.path)
f.type = type
f.encoding = encoding
return f
def getChild(self, path, request):
"""
Serve a file for the given path.
The Content-Type header is set based on the file extension.
A limited form of content negotiation is done based on the
Accept-Encoding header and the files on disk. Apart from the default of
``identity``, two encodings are supported:
* ``br``, which selects any Brotli-compressed ``.br`` variant of
the file.
* ``gzip``, which selects any gzip-compressed ``.br`` variant of the
file. ``x-gzip`` is also supported.
qvalues are ignored as browsers don't use them. This may produce an
incorrect response if a variant is disabled like ``identity;q=0``.
"""
if not self._validName.match(path):
return NoResource("Not found.")
ext = path[path.rindex(b".") :]
try:
type = self._contentTypes[ext]
except KeyError:
return NoResource("Unknown type.")
acceptEncoding = request.getHeader(b"accept-encoding") or b"*"
file = None
if self._brToken.search(acceptEncoding):
br = self._dir.child(path + b".br")
if br.isfile():
file = self._file(br, type, "br")
if file is None and self._gzToken.search(acceptEncoding):
gz = self._dir.child(path + b".gz")
if gz.isfile():
file = self._file(gz, type, "gzip")
if file is None:
file = self._file(self._dir.child(path), type)
request.setHeader(b"Vary", b"accept-encoding")
request.setHeader(b"Cache-Control", b"public, max-age=31536000, immutable")
return file
class Root(FallbackResource):
"""
Root of the Yarrharr URL hierarchy.
"""
def __init__(self, reactor, threadpool):
wsgi = WSGIResource(reactor, threadpool, application)
FallbackResource.__init__(self, wsgi)
self.putChild(b"csp-report", CSPReportLogger())
self.putChild(b"static", Static())
# Handle requests for /favicon.ico and paths hit by script kiddies at
# the Twisted level so that they don't make it down to Django, which
# logs 404s as errors:
a404 = ErrorPage(404, "Not Found", "")
for path in (b"favicon.ico", b"index.php", b"wp-login.php"):
self.putChild(path, a404)
def getChildWithDefault(self, name, request):
# Disable the Referer header in some browsers. This is complemented by
# the injection of rel="noopener noreferrer" on all links by the HTML
# sanitizer.
request.setHeader(b"Referrer-Policy", b"same-origin")
request.setHeader(b"X-Content-Type-Options", b"nosniff")
request.setHeader(b"Cross-Origin-Opener-Policy", b"same-origin")
script_nonce = b64encode(os.urandom(32))
request.requestHeaders.setRawHeaders(b"Yarrharr-Script-Nonce", [script_nonce])
request.setHeader(
b"Content-Security-Policy",
(
# b"default-src 'none'; "
b"img-src *; "
b"script-src 'self' 'nonce-%s'; "
b"style-src 'self'; "
b"frame-ancestors 'none'; "
b"form-action 'self'; "
b"report-uri /csp-report"
)
% (script_nonce,),
)
return super().getChildWithDefault(name, request)
def updateFeeds(reactor, max_fetch=5):
"""
Poll any feeds due for a check.
"""
from .fetch import poll
def _failed(reason):
"""
Log unexpected errors and schedule a retry in one second.
"""
log.failure("Unexpected failure polling feeds", failure=reason)
return 1.0 # seconds until next poll
d = poll(reactor, max_fetch)
# Last gasp error handler to avoid terminating the LoopingCall.
d.addErrback(_failed)
return d
_txLevelToPriority = {
LogLevel.debug: "<7>",
LogLevel.info: "<6>",
LogLevel.warn: "<4>",
LogLevel.error: "<3>",
LogLevel.critical: "<2>",
}
def formatForSystemd(event):
# Events generated by twisted.python.log have a "system", while ones
# generated with twisted.logger have a "namespace" with similar
# meaning.
#
s = "[{}] ".format(event.get("log_system") or event.get("log_namespace") or "-")
s += formatEvent(event)
if not s:
return None
if "log_failure" in event:
try:
s += "\n" + event["log_failure"].getTraceback().rstrip("\n")
except: # noqa
pass
prefix = _txLevelToPriority.get(event.get("log_level")) or "<6>"
return prefix + s.replace("\n", "\n" + prefix + " ") + "\n"
@implementer(ILogFilterPredicate)
def dropUnhandledHTTP2Shutdown(event):
"""
Suppress the log messages which result from an unhandled error in HTTP/2
connection shutdown. See #282 and Twisted #9462.
This log message is relayed from the :mod:`twisted.python.log` so the
fields are a little odd:
* ``'log_namespace'`` is ``'log_legacy'``, and there is a ``'system'``
field with a value of ``'-'``.
* ``'log_text'`` contains the actual log text, including a pre-formatted
traceback.
* ``'failure'`` used instead of ``'log_failure'``.
"""
if event.get("log_namespace") != "log_legacy":
return PredicateResult.maybe
if event.get("log_level") != LogLevel.critical:
return PredicateResult.maybe
if "failure" not in event or not event["failure"].check(AttributeError):
return PredicateResult.maybe
if event["log_text"].startswith("Unhandled Error") and "no attribute 'shutdown'" in event["log_text"]:
return PredicateResult.no
return PredicateResult.maybe
class TwistedLoggerLogHandler(logging.Handler):
publisher = globalLogPublisher
def _mapLevel(self, levelno):
"""
Convert a stdlib logging level into a Twisted :class:`LogLevel`.
"""
if levelno <= logging.DEBUG:
return LogLevel.debug
elif levelno <= logging.INFO:
return LogLevel.info
elif levelno <= logging.WARNING:
return LogLevel.warn
elif levelno <= logging.ERROR:
return LogLevel.error
return LogLevel.critical
def emit(self, record):
self.publisher(
{
"log_level": self._mapLevel(record.levelno),
"log_namespace": record.name,
"log_format": "{msg}",
"msg": self.format(record),
}
)
class AdaptiveLoopingCall(object):
"""
:class:`AdaptiveLoopingCall` invokes a function periodically. Each time it
is called it returns the time to wait until the next invocation.
:ivar _clock: :class:`IReactorTime` implementer
:ivar _f: The function to call.
:ivar _deferred: Deferred returned by :meth:`.start()`.
:ivar _call: `IDelayedCall` when waiting for the next poll period.
Otherwise `None`.
:ivar bool _poked: `True` when the function should be immediately invoked
again after it completes.
:ivar bool _stopped: `True` once `stop()` has been called.
"""
_deferred = None
_call = None
_poked = False
_stopped = False
def __init__(self, clock, f):
"""
:param clock: :class:`IReactorTime` provider to use when scheduling
calls.
:param f: The function to call when the loop is started. It must return
the number of seconds to wait before calling it again, or
a deferred for the same.
"""
self._clock = clock
self._f = f
def start(self):
"""
Call the function immediately, and schedule future calls according to
its result.
:returns:
:class:`Deferred` which will succeed when :meth:`stop()` is called
and the loop cleanly exits, or fail when the function produces
a failure.
"""
assert self._deferred is None
assert self._call is None
assert not self._stopped
self._deferred = d = defer.Deferred()
self._callIt()
return d
def stop(self):
self._stopped = True
if self._call:
self._call.cancel()
self._deferred.callback(self)
def poke(self):
"""
Run the function as soon as possible: either immediately or once it has
finished any current execution. This is a no-op if the service has been
stopped. Pokes coalesce if received while the function is executing.
"""
if self._stopped or self._poked:
return
if self._call:
self._call.cancel()
self._callIt()
else:
self._poked = True
def _callIt(self):
self._call = None
d = defer.maybeDeferred(self._f)
d.addCallback(self._schedule)
d.addErrback(self._failLoop)
def _schedule(self, seconds):
"""
Schedule the next call.
"""
assert isinstance(seconds, (int, float))
if self._stopped:
d, self._deferred = self._deferred, None
d.callback(self)
elif self._poked:
self._poked = False
self._callIt()
else:
self._call = self._clock.callLater(seconds, self._callIt)
def _failLoop(self, failure):
"""
Terminate the loop due to an unhandled failure.
"""
d, self._deferred = self._deferred, None
d.errback(failure)
def run():
from twisted.internet import reactor
root = logging.getLogger()
logging.getLogger("django").setLevel(logging.INFO)
logging.raiseExceptions = settings.DEBUG
logging._srcfile = None # Disable expensive collection of location information.
root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
root.addHandler(TwistedLoggerLogHandler())
observer = FilteringLogObserver(
FileLogObserver(sys.stdout, formatForSystemd),
[dropUnhandledHTTP2Shutdown],
)
globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)
log.info("Yarrharr {version} starting", version=__version__)
factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None)
endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT)
reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory)
updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor))
loopEndD = updateLoop.start()
loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f))
@receiver(schedule_changed)
def threadPollNow(sender, **kwargs):
"""
When the `schedule_changed` signal is sent poke the polling loop. If it
is sleeping this will cause it to poll immediately. Otherwise this will
cause it to run the poll function immediately once it returns (running
it again protects against races).
"""
log.debug("Immediate poll triggered by {sender}", sender=sender)
reactor.callFromThread(updateLoop.poke)
def stopUpdateLoop():
updateLoop.stop()
return loopEndD
reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop)
reactor.run()
| SPReport( | identifier_name |
application.py | # Copyright © 2013, 2015, 2016, 2017, 2018, 2020, 2022 Tom Most <twm@freecog.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
"""
Yarrharr production server via Twisted Web
"""
import io
import json
import logging
import os
import re
import sys
from base64 import b64encode
import attr
from django.conf import settings
from django.dispatch import receiver
from twisted.internet import defer
from twisted.internet.endpoints import serverFromString
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogFilterPredicate,
Logger,
LogLevel,
PredicateResult,
formatEvent,
globalLogBeginner,
globalLogPublisher,
)
from twisted.python.filepath import FilePath
from twisted.web.resource import ErrorPage, NoResource, Resource
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.wsgi import WSGIResource
from zope.interface import implementer
from . import __version__
from .signals import schedule_changed
from .wsgi import application
log = Logger()
@attr.s
class CSPReport(object):
url = attr.ib()
referrer = attr.ib()
resource = attr.ib()
violatedDirective = attr.ib()
effectiveDirective = attr.ib()
source = attr.ib()
sample = attr.ib()
status = attr.ib()
policy = attr.ib()
disposition = attr.ib()
def __str__(self):
bits = []
for a in attr.fields(self.__class__):
value = getattr(self, a.name)
if value is None:
continue
bits.append("{}={!r}".format(a.name, value))
return "\n".join(bits)
@classmethod
def fromJSON(cls, data):
"""
Construct a :class:`CSPReport` from the serialization of a violation
per CSP Level 3 §5.3.
"""
if {"source-file", "line-number", "column-number"} <= data.keys():
source = "{source-file} {line-number}:{column-number}".format_map(data)
elif {"source-file", "line-number"} <= data.keys():
source = "{source-file} {line-number}".format_map(data)
else:
source = data.get("source-file")
return cls(
url=data["document-uri"],
referrer=data["referrer"] or None, # Always seems to be an empty string.
resource=data["blocked-uri"],
violatedDirective=data.get("violated-directive"),
effectiveDirective=data.get("effective-directive"),
policy=data["original-policy"],
disposition=data.get("disposition"),
status=data.get("status-code"),
sample=data.get("script-sample") or None,
source=source,
)
class CSPReportLogger(Resource):
isLeaf = True
_log = Logger()
def render(self, request):
if request.method != b"POST":
request.setResponseCode(405)
request.setHeader("Allow", "POST")
return b"HTTP 405: Method Not Allowed\n"
if request.requestHeaders.getRawHeaders("Content-Type") != ["application/csp-report"]:
request.setResponseCode(415)
return b"HTTP 415: Only application/csp-report requests are accepted\n"
# Process the JSON text produced per
# https://w3c.github.io/webappsec-csp/#deprecated-serialize-violation
report = CSPReport.fromJSON(json.load(io.TextIOWrapper(request.content, encoding="utf-8"))["csp-report"])
if report.sample and report.sample.startswith(";(function installGlobalHook(window) {"):
# This seems to be a misbehavior in some Firefox extension.
# I cannot reproduce it with a clean profile.
return b""
if report.sample and report.sample == "call to eval() or related function blocked by CSP":
# This is caused by Tridactyl due to a Firefox issue. It's quite
# chatty so we'll disable for now, even though the message is
# generated by the browser and might indicate a script injection.
# See <https://github.com/cmcaine/tridactyl/issues/109> and
# <https://bugzilla.mozilla.org/show_bug.cgi?id=1267027>.
return b""
self._log.debug(
"Content Security Policy violation reported by {userAgent!r}:\n{report}",
userAgent=", ".join(request.requestHeaders.getRawHeaders("User-Agent", [])),
report=report,
)
return b"" # Browser ignores the response.
class FallbackResource(Resource):
"""
Resource which falls back to an alternative resource tree if it doesn't
have a matching child resource.
"""
def __init__(self, fallback):
Resource.__init__(self)
self.fallback = fallback
def render(self, request):
"""
Render this path with the fallback resource.
"""
return self.fallback.render(request)
def getChild(self, path, request):
"""
Dispatch unhandled requests to the fallback resource.
"""
# Mutate the request path such that it's like FallbackResource didn't handle
# the request at all. This is a bit of a nasty hack, since we're
# relying on the t.w.server implementation's behavior to not break when
# we do this. A better way would be to create a wrapper for the request object
request.postpath.insert(0, request.prepath.pop())
return self.fallback
class Static(Resource):
"""
Serve up Yarrharr's static assets directory. The files in this directory
have names like::
In development, the files are served uncompressed and named like so::
main-afffb00fd22ca3ce0250.js
The second dot-delimited section is a hash of the file's contents or source
material. As the filename changes each time the content does, these files
are served with a long max-age and the ``immutable`` flag in the
`Cache-Control`_ header.
In production, each file has two pre-compressed variants: one with
a ``.gz`` extension, and one with a ``.br`` extension. For example::
main-afffb00fd22ca3ce0250.js
main-afffb00fd22ca3ce0250.js.br
main-afffb00fd22ca3ce0250.js.gz
The actual serving of the files is done by `twisted.web.static.File`, which
is fancy and supports range requests, conditional gets, etc.
.. note::
Several features used here are only available to HTTPS origins.
Cache-Control: immutable and Brotli compression both are in Firefox.
.. _cache-control: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
"""
_dir = FilePath(settings.STATIC_ROOT)
_validName = re.compile(rb"^[a-zA-Z0-9]+-[a-zA-Z0-9]+(\.[a-z]+)+$")
# NOTE: RFC 7231 § 5.3.4 is not completely clear about whether
# content-coding tokens are case-sensitive or not. The "identity" token
# appears in EBNF and is therefore definitely case-insensitive, but the
# other tokens only appear in IANA registry tables in lowercase form. In
# contrast, the transfer-coding possibilities are clearly defined in EBNF
# so are definitely case-insensitive. For content-coding every implementer
# seems to agree on lowercase, so I'm not going to worry about it.
_brToken = re.compile(rb"(:?^|[\s,])br(:?$|[\s,;])")
_gzToken = re.compile(rb"(:?^|[\s,])(:?x-)?gzip(:?$|[\s,;])")
_contentTypes = {
b".js": "application/javascript",
b".css": "text/css",
b".map": "application/octet-stream",
b".ico": "image/x-icon",
b".svg": "image/svg+xml",
b".png": "image/png",
}
def _file(self, path, type, encoding=None):
"""
Construct a `twisted.web.static.File` customized to serve Yarrharr
static assets.
:param path: `twisted.internet.filepath.FilePath` instance
:returns: `twisted.web.resource.IResource`
"""
f = File(path.path)
f.type = type
f.encoding = encoding
return f
def getChild(self, path, request):
"""
Serve a file for the given path.
The Content-Type header is set based on the file extension.
A limited form of content negotiation is done based on the
Accept-Encoding header and the files on disk. Apart from the default of
``identity``, two encodings are supported:
* ``br``, which selects any Brotli-compressed ``.br`` variant of
the file.
* ``gzip``, which selects any gzip-compressed ``.br`` variant of the
file. ``x-gzip`` is also supported.
qvalues are ignored as browsers don't use them. This may produce an
incorrect response if a variant is disabled like ``identity;q=0``.
"""
if not self._validName.match(path):
return NoResource("Not found.")
ext = path[path.rindex(b".") :]
try:
type = self._contentTypes[ext]
except KeyError:
return NoResource("Unknown type.")
acceptEncoding = request.getHeader(b"accept-encoding") or b"*"
file = None
if self._brToken.search(acceptEncoding):
br = self._dir.child(path + b".br")
if br.isfile():
file = self._file(br, type, "br")
if file is None and self._gzToken.search(acceptEncoding):
gz = self._dir.child(path + b".gz")
if gz.isfile():
file = self._file(gz, type, "gzip")
if file is None:
file = self._file(self._dir.child(path), type)
request.setHeader(b"Vary", b"accept-encoding")
request.setHeader(b"Cache-Control", b"public, max-age=31536000, immutable")
return file
class Root(FallbackResource):
"""
Root of the Yarrharr URL hierarchy.
"""
def __init__(self, reactor, threadpool):
wsgi = WSGIResource(reactor, threadpool, application)
FallbackResource.__init__(self, wsgi)
self.putChild(b"csp-report", CSPReportLogger())
self.putChild(b"static", Static())
# Handle requests for /favicon.ico and paths hit by script kiddies at
# the Twisted level so that they don't make it down to Django, which
# logs 404s as errors:
a404 = ErrorPage(404, "Not Found", "")
for path in (b"favicon.ico", b"index.php", b"wp-login.php"):
self.putChild(path, a404)
def getChildWithDefault(self, name, request):
# Disable the Referer header in some browsers. This is complemented by
# the injection of rel="noopener noreferrer" on all links by the HTML
# sanitizer.
request.setHeader(b"Referrer-Policy", b"same-origin")
request.setHeader(b"X-Content-Type-Options", b"nosniff")
request.setHeader(b"Cross-Origin-Opener-Policy", b"same-origin")
script_nonce = b64encode(os.urandom(32))
request.requestHeaders.setRawHeaders(b"Yarrharr-Script-Nonce", [script_nonce])
request.setHeader(
b"Content-Security-Policy",
(
# b"default-src 'none'; "
b"img-src *; "
b"script-src 'self' 'nonce-%s'; "
b"style-src 'self'; "
b"frame-ancestors 'none'; "
b"form-action 'self'; "
b"report-uri /csp-report"
)
% (script_nonce,),
)
return super().getChildWithDefault(name, request)
def updateFeeds(reactor, max_fetch=5):
"""
Poll any feeds due for a check.
"""
from .fetch import poll
def _failed(reason):
"""
Log unexpected errors and schedule a retry in one second.
"""
log.failure("Unexpected failure polling feeds", failure=reason)
return 1.0 # seconds until next poll
d = poll(reactor, max_fetch)
# Last gasp error handler to avoid terminating the LoopingCall.
d.addErrback(_failed)
return d
_txLevelToPriority = {
LogLevel.debug: "<7>",
LogLevel.info: "<6>",
LogLevel.warn: "<4>",
LogLevel.error: "<3>",
LogLevel.critical: "<2>",
}
def formatForSystemd(event):
# Events generated by twisted.python.log have a "system", while ones
# generated with twisted.logger have a "namespace" with similar
# meaning.
#
s = "[{}] ".format(event.get("log_system") or event.get("log_namespace") or "-")
s += formatEvent(event)
if not s:
return None
if "log_failure" in event:
try:
s += "\n" + event["log_failure"].getTraceback().rstrip("\n")
except: # noqa
pass
prefix = _txLevelToPriority.get(event.get("log_level")) or "<6>"
return prefix + s.replace("\n", "\n" + prefix + " ") + "\n"
@implementer(ILogFilterPredicate)
def dropUnhandledHTTP2Shutdown(event):
""" | class TwistedLoggerLogHandler(logging.Handler):
publisher = globalLogPublisher
def _mapLevel(self, levelno):
"""
Convert a stdlib logging level into a Twisted :class:`LogLevel`.
"""
if levelno <= logging.DEBUG:
return LogLevel.debug
elif levelno <= logging.INFO:
return LogLevel.info
elif levelno <= logging.WARNING:
return LogLevel.warn
elif levelno <= logging.ERROR:
return LogLevel.error
return LogLevel.critical
def emit(self, record):
self.publisher(
{
"log_level": self._mapLevel(record.levelno),
"log_namespace": record.name,
"log_format": "{msg}",
"msg": self.format(record),
}
)
class AdaptiveLoopingCall(object):
"""
:class:`AdaptiveLoopingCall` invokes a function periodically. Each time it
is called it returns the time to wait until the next invocation.
:ivar _clock: :class:`IReactorTime` implementer
:ivar _f: The function to call.
:ivar _deferred: Deferred returned by :meth:`.start()`.
:ivar _call: `IDelayedCall` when waiting for the next poll period.
Otherwise `None`.
:ivar bool _poked: `True` when the function should be immediately invoked
again after it completes.
:ivar bool _stopped: `True` once `stop()` has been called.
"""
_deferred = None
_call = None
_poked = False
_stopped = False
def __init__(self, clock, f):
"""
:param clock: :class:`IReactorTime` provider to use when scheduling
calls.
:param f: The function to call when the loop is started. It must return
the number of seconds to wait before calling it again, or
a deferred for the same.
"""
self._clock = clock
self._f = f
def start(self):
"""
Call the function immediately, and schedule future calls according to
its result.
:returns:
:class:`Deferred` which will succeed when :meth:`stop()` is called
and the loop cleanly exits, or fail when the function produces
a failure.
"""
assert self._deferred is None
assert self._call is None
assert not self._stopped
self._deferred = d = defer.Deferred()
self._callIt()
return d
def stop(self):
self._stopped = True
if self._call:
self._call.cancel()
self._deferred.callback(self)
def poke(self):
"""
Run the function as soon as possible: either immediately or once it has
finished any current execution. This is a no-op if the service has been
stopped. Pokes coalesce if received while the function is executing.
"""
if self._stopped or self._poked:
return
if self._call:
self._call.cancel()
self._callIt()
else:
self._poked = True
def _callIt(self):
self._call = None
d = defer.maybeDeferred(self._f)
d.addCallback(self._schedule)
d.addErrback(self._failLoop)
def _schedule(self, seconds):
"""
Schedule the next call.
"""
assert isinstance(seconds, (int, float))
if self._stopped:
d, self._deferred = self._deferred, None
d.callback(self)
elif self._poked:
self._poked = False
self._callIt()
else:
self._call = self._clock.callLater(seconds, self._callIt)
def _failLoop(self, failure):
"""
Terminate the loop due to an unhandled failure.
"""
d, self._deferred = self._deferred, None
d.errback(failure)
def run():
from twisted.internet import reactor
root = logging.getLogger()
logging.getLogger("django").setLevel(logging.INFO)
logging.raiseExceptions = settings.DEBUG
logging._srcfile = None # Disable expensive collection of location information.
root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
root.addHandler(TwistedLoggerLogHandler())
observer = FilteringLogObserver(
FileLogObserver(sys.stdout, formatForSystemd),
[dropUnhandledHTTP2Shutdown],
)
globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)
log.info("Yarrharr {version} starting", version=__version__)
factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None)
endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT)
reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory)
updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor))
loopEndD = updateLoop.start()
loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f))
@receiver(schedule_changed)
def threadPollNow(sender, **kwargs):
"""
When the `schedule_changed` signal is sent poke the polling loop. If it
is sleeping this will cause it to poll immediately. Otherwise this will
cause it to run the poll function immediately once it returns (running
it again protects against races).
"""
log.debug("Immediate poll triggered by {sender}", sender=sender)
reactor.callFromThread(updateLoop.poke)
def stopUpdateLoop():
updateLoop.stop()
return loopEndD
reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop)
reactor.run()
|
Suppress the log messages which result from an unhandled error in HTTP/2
connection shutdown. See #282 and Twisted #9462.
This log message is relayed from the :mod:`twisted.python.log` so the
fields are a little odd:
* ``'log_namespace'`` is ``'log_legacy'``, and there is a ``'system'``
field with a value of ``'-'``.
* ``'log_text'`` contains the actual log text, including a pre-formatted
traceback.
* ``'failure'`` used instead of ``'log_failure'``.
"""
if event.get("log_namespace") != "log_legacy":
return PredicateResult.maybe
if event.get("log_level") != LogLevel.critical:
return PredicateResult.maybe
if "failure" not in event or not event["failure"].check(AttributeError):
return PredicateResult.maybe
if event["log_text"].startswith("Unhandled Error") and "no attribute 'shutdown'" in event["log_text"]:
return PredicateResult.no
return PredicateResult.maybe
| identifier_body |
utils.py | import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n))) | def compare_surfaces(data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
'''
Function that creates comparative visualization of GWR and MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2)
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show() | return new_cmap
@requires('matplotlib')
@requires('geopandas') | random_line_split |
utils.py | import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@requires('matplotlib')
@requires('geopandas')
def compare_surfaces(data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
'''
Function that creates comparative visualization of GWR and MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
|
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show()
| data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2) | conditional_block |
utils.py | import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@requires('matplotlib')
@requires('geopandas')
def compare_surfaces(data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
| '''
Function that creates comparative visualization of GWR and MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2)
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show() | identifier_body | |
utils.py | import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@requires('matplotlib')
@requires('geopandas')
def | (data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
'''
Function that creates comparative visualization of GWR and MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2)
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show()
| compare_surfaces | identifier_name |
extensions.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::auth::csrf_token;
use iml_wire_types::{GroupType, Session};
use seed::{fetch, prelude::*, *};
/// Extension methods for the Session API object.
pub(crate) trait SessionExt {
/// Does the user need to login?
fn needs_login(&self) -> bool;
/// Does a logged in user exist?
fn has_user(&self) -> bool;
/// Does the user fall within the group?
fn group_allowed(&self, group: GroupType) -> bool;
}
impl SessionExt for Session {
fn needs_login(&self) -> bool {
self.user.is_none() && !self.read_enabled
}
fn has_user(&self) -> bool {
self.user.is_some()
}
fn group_allowed(&self, group: GroupType) -> bool {
self.user
.as_ref()
.and_then(|x| x.groups.as_ref())
.and_then(|xs| {
xs.iter().find(|y| {
//Superusers can do everything.
if y.name == GroupType::Superusers {
return true;
}
//Filesystem administrators can do everything a filesystem user can do.
if y.name == GroupType::FilesystemAdministrators && group == GroupType::FilesystemUsers {
return true;
}
// Fallback to matching on names.
y.name == group
})
})
.is_some()
}
}
/// Extension methods for`fetch::Request`
pub(crate) trait RequestExt: Sized {
fn api_call(path: impl ToString) -> Self;
fn api_query(path: impl ToString, args: impl serde::Serialize) -> Result<Self, serde_urlencoded::ser::Error>;
fn api_item(path: impl ToString, item: impl ToString) -> Self;
fn graphql_query<T: serde::Serialize>(x: &T) -> Self;
fn with_auth(self: Self) -> Self;
}
impl RequestExt for fetch::Request {
fn api_call(path: impl ToString) -> Self {
Self::new(format!("/api/{}/", path.to_string()))
}
fn api_query(path: impl ToString, args: impl serde::Serialize) -> Result<Self, serde_urlencoded::ser::Error> {
let qs = format!("?{}", serde_urlencoded::to_string(args)?);
Ok(Self::new(format!("/api/{}/{}", path.to_string(), qs)))
}
fn api_item(path: impl ToString, item: impl ToString) -> Self {
Self::api_call(format!("{}/{}", path.to_string(), item.to_string()))
}
fn graphql_query<T: serde::Serialize>(x: &T) -> Self {
Self::new("/graphql")
.with_auth()
.method(fetch::Method::Post)
.send_json(x)
}
fn with_auth(self) -> Self |
}
/// Allows for merging attributes onto an existing item
pub(crate) trait MergeAttrs {
fn merge_attrs(self, attrs: Attrs) -> Self;
}
impl MergeAttrs for Attrs {
fn merge_attrs(mut self, attrs: Attrs) -> Self {
self.merge(attrs);
self
}
}
impl<T> MergeAttrs for Node<T> {
fn merge_attrs(self, attrs: Attrs) -> Self {
if let Self::Element(mut el) = self {
el.attrs.merge(attrs);
Self::Element(el)
} else {
self
}
}
}
pub(crate) trait NodeExt<T> {
fn with_listener(self, event_handler: EventHandler<T>) -> Self;
fn with_style(self, key: impl Into<St>, val: impl Into<CSSValue>) -> Self;
}
impl<T> NodeExt<T> for Node<T> {
fn with_listener(mut self, event_handler: EventHandler<T>) -> Self {
self.add_listener(event_handler);
self
}
fn with_style(mut self, key: impl Into<St>, val: impl Into<CSSValue>) -> Self {
self.add_style(key, val);
self
}
}
/// Extension methods for`fetch::Request`
pub(crate) trait FailReasonExt {
fn message(&self) -> String;
}
impl<T> FailReasonExt for fetch::FailReason<T> {
fn message(&self) -> String {
match self {
Self::RequestError(err, _) => match err {
fetch::RequestError::DomException(e) => e.message(),
},
Self::Status(status, _) => format!("Status: {}", status.code),
Self::DataError(err, _) => match err {
fetch::DataError::DomException(e) => e.message(),
fetch::DataError::SerdeError(e, _) => format!("Serde error: {}", e),
},
}
}
}
/// Extension methods for`seed::browser::url::Url`
pub(crate) trait UrlExt {
/// Returns the path of the `Url`.
/// This fn will account for
/// the base (via the `base`) tag
/// and remove it from the path
fn get_path(&self) -> Vec<String>;
}
impl UrlExt for Url {
fn get_path(&self) -> Vec<String> {
let mut path = self.path.clone();
let base = match crate::UI_BASE.as_ref() {
Some(x) => x,
None => return path,
};
let has_base = path.get(0).filter(|x| x == &base).is_some();
if has_base {
path.remove(0);
}
path
}
}
| {
match csrf_token() {
Some(csrf) => self.header("X-CSRFToken", &csrf),
None => self,
}
} | identifier_body |
extensions.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::auth::csrf_token;
use iml_wire_types::{GroupType, Session};
use seed::{fetch, prelude::*, *};
/// Extension methods for the Session API object.
pub(crate) trait SessionExt {
/// Does the user need to login?
fn needs_login(&self) -> bool;
/// Does a logged in user exist?
fn has_user(&self) -> bool;
/// Does the user fall within the group?
fn group_allowed(&self, group: GroupType) -> bool;
}
impl SessionExt for Session {
fn needs_login(&self) -> bool {
self.user.is_none() && !self.read_enabled
}
fn has_user(&self) -> bool {
self.user.is_some()
}
fn group_allowed(&self, group: GroupType) -> bool {
self.user
.as_ref()
.and_then(|x| x.groups.as_ref())
.and_then(|xs| {
xs.iter().find(|y| {
//Superusers can do everything.
if y.name == GroupType::Superusers {
return true;
}
//Filesystem administrators can do everything a filesystem user can do.
if y.name == GroupType::FilesystemAdministrators && group == GroupType::FilesystemUsers {
return true;
}
// Fallback to matching on names.
y.name == group
})
})
.is_some()
}
}
/// Extension methods for`fetch::Request` | fn graphql_query<T: serde::Serialize>(x: &T) -> Self;
fn with_auth(self: Self) -> Self;
}
impl RequestExt for fetch::Request {
fn api_call(path: impl ToString) -> Self {
Self::new(format!("/api/{}/", path.to_string()))
}
fn api_query(path: impl ToString, args: impl serde::Serialize) -> Result<Self, serde_urlencoded::ser::Error> {
let qs = format!("?{}", serde_urlencoded::to_string(args)?);
Ok(Self::new(format!("/api/{}/{}", path.to_string(), qs)))
}
fn api_item(path: impl ToString, item: impl ToString) -> Self {
Self::api_call(format!("{}/{}", path.to_string(), item.to_string()))
}
fn graphql_query<T: serde::Serialize>(x: &T) -> Self {
Self::new("/graphql")
.with_auth()
.method(fetch::Method::Post)
.send_json(x)
}
fn with_auth(self) -> Self {
match csrf_token() {
Some(csrf) => self.header("X-CSRFToken", &csrf),
None => self,
}
}
}
/// Allows for merging attributes onto an existing item
pub(crate) trait MergeAttrs {
fn merge_attrs(self, attrs: Attrs) -> Self;
}
impl MergeAttrs for Attrs {
fn merge_attrs(mut self, attrs: Attrs) -> Self {
self.merge(attrs);
self
}
}
impl<T> MergeAttrs for Node<T> {
fn merge_attrs(self, attrs: Attrs) -> Self {
if let Self::Element(mut el) = self {
el.attrs.merge(attrs);
Self::Element(el)
} else {
self
}
}
}
pub(crate) trait NodeExt<T> {
fn with_listener(self, event_handler: EventHandler<T>) -> Self;
fn with_style(self, key: impl Into<St>, val: impl Into<CSSValue>) -> Self;
}
impl<T> NodeExt<T> for Node<T> {
fn with_listener(mut self, event_handler: EventHandler<T>) -> Self {
self.add_listener(event_handler);
self
}
fn with_style(mut self, key: impl Into<St>, val: impl Into<CSSValue>) -> Self {
self.add_style(key, val);
self
}
}
/// Extension methods for`fetch::Request`
pub(crate) trait FailReasonExt {
fn message(&self) -> String;
}
impl<T> FailReasonExt for fetch::FailReason<T> {
fn message(&self) -> String {
match self {
Self::RequestError(err, _) => match err {
fetch::RequestError::DomException(e) => e.message(),
},
Self::Status(status, _) => format!("Status: {}", status.code),
Self::DataError(err, _) => match err {
fetch::DataError::DomException(e) => e.message(),
fetch::DataError::SerdeError(e, _) => format!("Serde error: {}", e),
},
}
}
}
/// Extension methods for`seed::browser::url::Url`
pub(crate) trait UrlExt {
/// Returns the path of the `Url`.
/// This fn will account for
/// the base (via the `base`) tag
/// and remove it from the path
fn get_path(&self) -> Vec<String>;
}
impl UrlExt for Url {
fn get_path(&self) -> Vec<String> {
let mut path = self.path.clone();
let base = match crate::UI_BASE.as_ref() {
Some(x) => x,
None => return path,
};
let has_base = path.get(0).filter(|x| x == &base).is_some();
if has_base {
path.remove(0);
}
path
}
} | pub(crate) trait RequestExt: Sized {
fn api_call(path: impl ToString) -> Self;
fn api_query(path: impl ToString, args: impl serde::Serialize) -> Result<Self, serde_urlencoded::ser::Error>;
fn api_item(path: impl ToString, item: impl ToString) -> Self; | random_line_split |
extensions.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::auth::csrf_token;
use iml_wire_types::{GroupType, Session};
use seed::{fetch, prelude::*, *};
/// Extension methods for the Session API object.
pub(crate) trait SessionExt {
/// Does the user need to login?
fn needs_login(&self) -> bool;
/// Does a logged in user exist?
fn has_user(&self) -> bool;
/// Does the user fall within the group?
fn group_allowed(&self, group: GroupType) -> bool;
}
impl SessionExt for Session {
fn needs_login(&self) -> bool {
self.user.is_none() && !self.read_enabled
}
fn has_user(&self) -> bool {
self.user.is_some()
}
fn group_allowed(&self, group: GroupType) -> bool {
self.user
.as_ref()
.and_then(|x| x.groups.as_ref())
.and_then(|xs| {
xs.iter().find(|y| {
//Superusers can do everything.
if y.name == GroupType::Superusers {
return true;
}
//Filesystem administrators can do everything a filesystem user can do.
if y.name == GroupType::FilesystemAdministrators && group == GroupType::FilesystemUsers {
return true;
}
// Fallback to matching on names.
y.name == group
})
})
.is_some()
}
}
/// Extension methods for`fetch::Request`
pub(crate) trait RequestExt: Sized {
fn api_call(path: impl ToString) -> Self;
fn api_query(path: impl ToString, args: impl serde::Serialize) -> Result<Self, serde_urlencoded::ser::Error>;
fn api_item(path: impl ToString, item: impl ToString) -> Self;
fn graphql_query<T: serde::Serialize>(x: &T) -> Self;
fn with_auth(self: Self) -> Self;
}
impl RequestExt for fetch::Request {
fn api_call(path: impl ToString) -> Self {
Self::new(format!("/api/{}/", path.to_string()))
}
fn api_query(path: impl ToString, args: impl serde::Serialize) -> Result<Self, serde_urlencoded::ser::Error> {
let qs = format!("?{}", serde_urlencoded::to_string(args)?);
Ok(Self::new(format!("/api/{}/{}", path.to_string(), qs)))
}
fn api_item(path: impl ToString, item: impl ToString) -> Self {
Self::api_call(format!("{}/{}", path.to_string(), item.to_string()))
}
fn graphql_query<T: serde::Serialize>(x: &T) -> Self {
Self::new("/graphql")
.with_auth()
.method(fetch::Method::Post)
.send_json(x)
}
fn with_auth(self) -> Self {
match csrf_token() {
Some(csrf) => self.header("X-CSRFToken", &csrf),
None => self,
}
}
}
/// Allows for merging attributes onto an existing item
pub(crate) trait MergeAttrs {
fn merge_attrs(self, attrs: Attrs) -> Self;
}
impl MergeAttrs for Attrs {
fn | (mut self, attrs: Attrs) -> Self {
self.merge(attrs);
self
}
}
impl<T> MergeAttrs for Node<T> {
fn merge_attrs(self, attrs: Attrs) -> Self {
if let Self::Element(mut el) = self {
el.attrs.merge(attrs);
Self::Element(el)
} else {
self
}
}
}
pub(crate) trait NodeExt<T> {
fn with_listener(self, event_handler: EventHandler<T>) -> Self;
fn with_style(self, key: impl Into<St>, val: impl Into<CSSValue>) -> Self;
}
impl<T> NodeExt<T> for Node<T> {
fn with_listener(mut self, event_handler: EventHandler<T>) -> Self {
self.add_listener(event_handler);
self
}
fn with_style(mut self, key: impl Into<St>, val: impl Into<CSSValue>) -> Self {
self.add_style(key, val);
self
}
}
/// Extension methods for`fetch::Request`
pub(crate) trait FailReasonExt {
fn message(&self) -> String;
}
impl<T> FailReasonExt for fetch::FailReason<T> {
fn message(&self) -> String {
match self {
Self::RequestError(err, _) => match err {
fetch::RequestError::DomException(e) => e.message(),
},
Self::Status(status, _) => format!("Status: {}", status.code),
Self::DataError(err, _) => match err {
fetch::DataError::DomException(e) => e.message(),
fetch::DataError::SerdeError(e, _) => format!("Serde error: {}", e),
},
}
}
}
/// Extension methods for`seed::browser::url::Url`
pub(crate) trait UrlExt {
/// Returns the path of the `Url`.
/// This fn will account for
/// the base (via the `base`) tag
/// and remove it from the path
fn get_path(&self) -> Vec<String>;
}
impl UrlExt for Url {
fn get_path(&self) -> Vec<String> {
let mut path = self.path.clone();
let base = match crate::UI_BASE.as_ref() {
Some(x) => x,
None => return path,
};
let has_base = path.get(0).filter(|x| x == &base).is_some();
if has_base {
path.remove(0);
}
path
}
}
| merge_attrs | identifier_name |
Ol.ts | /*
* Copyright (c) 2014 Jose Carlos Lama. www.typedom.org
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the \"Software\"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
* OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* The {{#crossLink "Ol"}}{{/crossLink}} Defines an ordered list
*
* @class Ol
* @extends Container
* @constructor
**/
class Ol extends Container<Ol, HTMLOListElement>
{
public static OL: string = 'ol';
constructor();
constructor(id: string)
constructor(attributes: Object)
constructor(element: HTMLOListElement)
| (idOrAttributesOrElement?: any) {
super(idOrAttributesOrElement, Ol.OL);
}
} | constructor | identifier_name |
Ol.ts | /* | *
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the \"Software\"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
* OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* The {{#crossLink "Ol"}}{{/crossLink}} Defines an ordered list
*
* @class Ol
* @extends Container
* @constructor
**/
class Ol extends Container<Ol, HTMLOListElement>
{
public static OL: string = 'ol';
constructor();
constructor(id: string)
constructor(attributes: Object)
constructor(element: HTMLOListElement)
constructor(idOrAttributesOrElement?: any) {
super(idOrAttributesOrElement, Ol.OL);
}
} | * Copyright (c) 2014 Jose Carlos Lama. www.typedom.org | random_line_split |
Markers.js | /* Copyright (c) 2006-2010 by OpenLayers Contributors (see authors.txt for
* full list of contributors). Published under the Clear BSD license.
* See http://svn.openlayers.org/trunk/openlayers/license.txt for the
* full text of the license. */
/**
* @requires OpenLayers/Layer.js
*/
/**
* Class: OpenLayers.Layer.Markers
*
* Inherits from:
* - <OpenLayers.Layer>
*/
OpenLayers.Layer.Markers = OpenLayers.Class(OpenLayers.Layer, {
/**
* APIProperty: isBaseLayer
* {Boolean} Markers layer is never a base layer.
*/
isBaseLayer: false,
/**
* APIProperty: markers
* {Array(<OpenLayers.Marker>)} internal marker list
*/
markers: null,
/**
* Property: drawn
* {Boolean} internal state of drawing. This is a workaround for the fact
* that the map does not call moveTo with a zoomChanged when the map is
* first starting up. This lets us catch the case where we have *never*
* drawn the layer, and draw it even if the zoom hasn't changed.
*/
drawn: false,
/**
* Constructor: OpenLayers.Layer.Markers
* Create a Markers layer.
*
* Parameters:
* name - {String}
* options - {Object} Hashtable of extra options to tag onto the layer
*/
initialize: function(name, options) {
OpenLayers.Layer.prototype.initialize.apply(this, arguments);
this.markers = [];
},
/**
* APIMethod: destroy
*/
destroy: function() {
this.clearMarkers();
this.markers = null;
OpenLayers.Layer.prototype.destroy.apply(this, arguments);
},
/**
* APIMethod: setOpacity
* Sets the opacity for all the markers.
*
* Parameter:
* opacity - {Float}
*/
setOpacity: function(opacity) {
if (opacity != this.opacity) {
this.opacity = opacity;
for (var i=0, len=this.markers.length; i<len; i++) {
this.markers[i].setOpacity(this.opacity);
}
}
},
/**
* Method: moveTo
*
* Parameters:
* bounds - {<OpenLayers.Bounds>}
* zoomChanged - {Boolean}
* dragging - {Boolean}
*/
moveTo:function(bounds, zoomChanged, dragging) {
OpenLayers.Layer.prototype.moveTo.apply(this, arguments);
if (zoomChanged || !this.drawn) {
for(var i=0, len=this.markers.length; i<len; i++) {
this.drawMarker(this.markers[i]);
}
this.drawn = true;
}
},
/**
* APIMethod: addMarker
*
* Parameters:
* marker - {<OpenLayers.Marker>}
*/
addMarker: function(marker) {
this.markers.push(marker);
if (this.opacity != null) {
marker.setOpacity(this.opacity);
}
if (this.map && this.map.getExtent()) {
marker.map = this.map;
this.drawMarker(marker);
}
},
/**
* APIMethod: removeMarker
*
* Parameters:
* marker - {<OpenLayers.Marker>}
*/
removeMarker: function(marker) {
if (this.markers && this.markers.length) {
OpenLayers.Util.removeItem(this.markers, marker);
marker.erase();
}
},
/**
* Method: clearMarkers
* This method removes all markers from a layer. The markers are not
* destroyed by this function, but are removed from the list of markers.
*/
clearMarkers: function() {
if (this.markers != null) {
while(this.markers.length > 0) {
this.removeMarker(this.markers[0]);
}
}
},
/**
* Method: drawMarker
* Calculate the pixel location for the marker, create it, and
* add it to the layer's div
*
* Parameters:
* marker - {<OpenLayers.Marker>}
*/
drawMarker: function(marker) {
var px = this.map.getLayerPxFromLonLat(marker.lonlat);
if (px == null) {
marker.display(false);
} else {
if (!marker.isDrawn()) | else if(marker.icon) {
marker.icon.moveTo(px);
}
}
},
/**
* APIMethod: getDataExtent
* Calculates the max extent which includes all of the markers.
*
* Returns:
* {<OpenLayers.Bounds>}
*/
getDataExtent: function () {
var maxExtent = null;
if ( this.markers && (this.markers.length > 0)) {
var maxExtent = new OpenLayers.Bounds();
for(var i=0, len=this.markers.length; i<len; i++) {
var marker = this.markers[i];
maxExtent.extend(marker.lonlat);
}
}
return maxExtent;
},
CLASS_NAME: "OpenLayers.Layer.Markers"
});
| {
var markerImg = marker.draw(px);
this.div.appendChild(markerImg);
} | conditional_block |
Markers.js | /* Copyright (c) 2006-2010 by OpenLayers Contributors (see authors.txt for
* full list of contributors). Published under the Clear BSD license.
* See http://svn.openlayers.org/trunk/openlayers/license.txt for the
* full text of the license. */
/**
* @requires OpenLayers/Layer.js
*/
/**
* Class: OpenLayers.Layer.Markers
*
* Inherits from:
* - <OpenLayers.Layer>
*/
OpenLayers.Layer.Markers = OpenLayers.Class(OpenLayers.Layer, {
/**
* APIProperty: isBaseLayer
* {Boolean} Markers layer is never a base layer.
*/
isBaseLayer: false,
/**
* APIProperty: markers
* {Array(<OpenLayers.Marker>)} internal marker list
*/
markers: null,
/**
* Property: drawn
* {Boolean} internal state of drawing. This is a workaround for the fact
* that the map does not call moveTo with a zoomChanged when the map is
* first starting up. This lets us catch the case where we have *never*
* drawn the layer, and draw it even if the zoom hasn't changed.
*/
drawn: false,
/**
* Constructor: OpenLayers.Layer.Markers
* Create a Markers layer.
*
* Parameters:
* name - {String}
* options - {Object} Hashtable of extra options to tag onto the layer
*/
initialize: function(name, options) {
OpenLayers.Layer.prototype.initialize.apply(this, arguments);
this.markers = [];
},
/**
* APIMethod: destroy
*/
destroy: function() {
this.clearMarkers();
this.markers = null;
OpenLayers.Layer.prototype.destroy.apply(this, arguments);
},
/**
* APIMethod: setOpacity
* Sets the opacity for all the markers.
*
* Parameter:
| setOpacity: function(opacity) {
if (opacity != this.opacity) {
this.opacity = opacity;
for (var i=0, len=this.markers.length; i<len; i++) {
this.markers[i].setOpacity(this.opacity);
}
}
},
/**
* Method: moveTo
*
* Parameters:
* bounds - {<OpenLayers.Bounds>}
* zoomChanged - {Boolean}
* dragging - {Boolean}
*/
moveTo:function(bounds, zoomChanged, dragging) {
OpenLayers.Layer.prototype.moveTo.apply(this, arguments);
if (zoomChanged || !this.drawn) {
for(var i=0, len=this.markers.length; i<len; i++) {
this.drawMarker(this.markers[i]);
}
this.drawn = true;
}
},
/**
* APIMethod: addMarker
*
* Parameters:
* marker - {<OpenLayers.Marker>}
*/
addMarker: function(marker) {
this.markers.push(marker);
if (this.opacity != null) {
marker.setOpacity(this.opacity);
}
if (this.map && this.map.getExtent()) {
marker.map = this.map;
this.drawMarker(marker);
}
},
/**
* APIMethod: removeMarker
*
* Parameters:
* marker - {<OpenLayers.Marker>}
*/
removeMarker: function(marker) {
if (this.markers && this.markers.length) {
OpenLayers.Util.removeItem(this.markers, marker);
marker.erase();
}
},
/**
* Method: clearMarkers
* This method removes all markers from a layer. The markers are not
* destroyed by this function, but are removed from the list of markers.
*/
clearMarkers: function() {
if (this.markers != null) {
while(this.markers.length > 0) {
this.removeMarker(this.markers[0]);
}
}
},
/**
* Method: drawMarker
* Calculate the pixel location for the marker, create it, and
* add it to the layer's div
*
* Parameters:
* marker - {<OpenLayers.Marker>}
*/
drawMarker: function(marker) {
var px = this.map.getLayerPxFromLonLat(marker.lonlat);
if (px == null) {
marker.display(false);
} else {
if (!marker.isDrawn()) {
var markerImg = marker.draw(px);
this.div.appendChild(markerImg);
} else if(marker.icon) {
marker.icon.moveTo(px);
}
}
},
/**
* APIMethod: getDataExtent
* Calculates the max extent which includes all of the markers.
*
* Returns:
* {<OpenLayers.Bounds>}
*/
getDataExtent: function () {
var maxExtent = null;
if ( this.markers && (this.markers.length > 0)) {
var maxExtent = new OpenLayers.Bounds();
for(var i=0, len=this.markers.length; i<len; i++) {
var marker = this.markers[i];
maxExtent.extend(marker.lonlat);
}
}
return maxExtent;
},
CLASS_NAME: "OpenLayers.Layer.Markers"
}); | * opacity - {Float}
*/
| random_line_split |
gen_dist_from_U.py | # encoding: utf8
'''
随机变量的积分变换
下面的基于U(0,1)实现了几个常用的连续随机变量概率分布的随机数模拟
U(0,1) 在0,1区间的连续均匀分布的随机数模拟
CDF: cumulate distribution function
PMF: probility mass function
PDF: probility density function
对任意随机变量X, 它有连续的的CDF F(x), 定义随机变量 Y=F(X), 则Y为 [0,1]上的均匀分布,即有 P(Y<=y) = y
box-muller方法并没有了解原理
'''
import numpy as np
import matplotlib.pyplot as plt
import bisect
import pdb
import standard_normal_dist
'''
根据标准正态分布表生成gauss分布。即:基于面积生成
X = Z*sigma + mu
'''
def gen_gauss_distribute_pdf_from_dist_table(mu, sigma, color='bo'):
nt = standard_normal_dist.normal_dist_table
std_x = list(standard_normal_dist.normal_dist_table_x)
gauss_x = np.zeros(len(std_x))
for id in range(len(std_x)):
#Z = (X - mu)/sigma, X = Z * sigma + mu
x = std_x[id] * sigma + mu
gauss_x[id] = x
gauss_y = np.zeros(len(gauss_x))
step = (gauss_x[-1] - gauss_x[0]) / len(gauss_x)
pre_p = 0
id = 0
for x,p in nt:
y = (p - pre_p) / step
pre_p = p
gauss_y[id] = y
id += 1
return gauss_x, gauss_y
'''
生成高斯分布的x及其对应的pdf
f = 1/(sqrt(2*pi) * sigma) * exp(-(x-mu)^2/(2*sigma^2))
'''
def gen_gauss_distribute_pdf(mu, sigma, sample_count, max_x):
gauss_x = np.linspace(-max_x, max_x, sample_count)
gauss_y = map(lambda x:1/(np.sqrt(2 * np.pi) * sigma) * np.exp(-(x-mu)**2/(2*sigma**2)), gauss_x)
return gauss_x, gauss_y
'''
画直方图
a, 样本值
'''
def plt_hist(a, color='r', normed=False):
sample_count = len(a)
a.sort()
group_count = int(np.sqrt(sample_count) + 0.9999)
width = (a[-1] - a[0]) / group_count
#直方图间隔
bins = np.linspace(a[0], a[-1], group_count+1)
hist = np.zeros(group_count)
for x in a:
index = bisect.bisect_right(bins - 0.00001, x) #减去最小的测量单位的一半,因为是浮点数,取个小的数
#边界上的数归到上一个区间. 即x in [begin, end), 最后一个区间除外
# 一般下界为 起始值 - 最小测量单位 * 0.5. 这样包含了最小元素,但上界值归到下一个区间了
index -= 1
if index >= group_count:
index -= 1 #边界上的数。真实测量上不会发生
hist[index] += 1
if normed == True:
total = np.sum(hist) * 1.0
hist /= total
hist /= width #概率是面积。除以这个得到高度
plt.bar(bins[0:group_count], hist, width = width, facecolor = 'lightskyblue',edgecolor = 'white', align='edge')
'''
#也可以直接用这个来汇制直方图
plt.hist(a, bins = group_count, normed=normed)
'''
'''
样本数一般要 > 100
使用均匀分布随机数,生成符合正态分布的随机数
'''
def gen_gauss_samples_byU(sample_count):
sim_gauss_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
x = standard_normal_dist.guess_x(u)
sim_gauss_num[i] = x
sim_u[i] = u
return sim_u, sim_gauss_num
'''
正态随机变量分布
box muller方法:
U0, U1独立,且U0, U1 ~ Uniform[0,1], 则
Z0 = sqrt(-2ln(U0))cos(2*pi*U1)
Z1 = sqrt(-2ln(U1))sin(2*pi*U0)
且Z0, Z1独立, 且服从正态分布
'''
def gen_gauss_samples_by_BoxMuller(sample_count):
U0 = np.random.uniform(0, 1, size=sample_count)
U1 = np.random.uniform(0, 1, size=sample_count)
Z0 = np.sqrt(-2 * np.log(U0)) * np.cos(2 * np.pi * U1)
Z1 = np.sqrt(-2 * np.log(U1)) * np.sin(2 * np.pi * U0)
return Z0, Z1
'''
使用均匀分布随机数,生成符合指数分布的随机数
F(x) = 1 - exp(-1/beta*x)
x = -1/beta * 1/log(1 - F)
'''
def gen_exp_samples_byU(sample_count, beta):
sim_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
if u == 1:
x = 0.
else:
x = -1./beta * np.log(1. - u)
sim_u[i] = u
sim_gauss_num[i] = x
return sim_u, sim_gauss_num
'''
pdf = 1/beta * exp(-x/beta)
'''
def gen_exp_distribute_pdf(sample_count, beta, max_x):
x = np.linspace(0, max_x, sample_count)
y = map(lambda k:1/beta * np.exp(-k/beta), x)
return x,y
'''
分组的数量在5-12之间较为适宜(本程序用的是下一条)
一般取总样本数开平方取上界. 上一个分组算法一般认为不合理
分得越细,和曲线偏差越大,但好看;分得粗,不好看
'''
sample_count = 1000
group_count = int(np.sqrt(sample_count) + 0.9999)
sim_u, sim_gauss_num = gen_gauss_samples_byU(sample_count)
mean_val = np.mean(sim_gauss_num)
std_val = np.std(sim_gauss_num)
print "mean, std:", mean_val, std_val
#绘制高斯采样的直方图
plt_hist(sim_gauss_num, normed=True)
#叠加均匀分布采样直方图
plt.hist((sim_u-0.5)*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加正态分布图
gauss_x, gauss_y = gen_gauss_distribute_pdf(0, 1, sample_count, np.max(sim_gauss_num))
plt.p | 布图.两种生成方法,对比一下误差
plt.clf()
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 1)
plt.plot(gauss_x, gauss_y, 'g-.')
plt.plot(gauss_x2, gauss_y2, 'r--')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 2)
plt.plot(gauss_x2, gauss_y2, 'b-')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 4)
plt.plot(gauss_x2, gauss_y2, 'y-')
plt.savefig('images/norm_distribute.png', format='png')
#guass dist, by box-muller method
plt.clf()
X1, X2 = gen_gauss_samples_by_BoxMuller(sample_count)
plt.hist(X2, bins = group_count, normed=True, color='r')
plt.hist(X1, bins = group_count, normed=True, color='b', alpha=0.5)
plt.savefig('images/norm_distribute_by_boxmuller.png', format='png')
coef = np.dot(X1-X1.mean(), X2-X2.mean()) /sample_count/(X1.std() * X2.std())
print 'gen gauss indepedent rav by box muller method.'
print 'std of X1: %.4f; std of X2: %.4f; coeffience between X1 and X2: %.4f' % (X1.std(), X2.std(), coef)
#指数分布相关
plt.clf()
sim_u, sim_num = gen_exp_samples_byU(sample_count, 1)
#绘制指数采样直方图
plt.hist(sim_num, bins = group_count, normed=True)
#叠加均匀分布采样直方图
plt.hist(sim_u*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加标准指数分布图
exp_x, exp_y = gen_exp_distribute_pdf(sample_count, 1, np.max(sim_num))
plt.plot(exp_x, exp_y, color='g')
plt.savefig('images/norm_exp_gen_by_U.png', format='png')
| lot(gauss_x, gauss_y, color='g')
plt.savefig('images/norm_distribute_gen_by_U.png', format='png')
#绘制正态分 | identifier_body |
gen_dist_from_U.py | # encoding: utf8
'''
随机变量的积分变换
下面的基于U(0,1)实现了几个常用的连续随机变量概率分布的随机数模拟
U(0,1) 在0,1区间的连续均匀分布的随机数模拟
CDF: cumulate distribution function
PMF: probility mass function
PDF: probility density function
对任意随机变量X, 它有连续的的CDF F(x), 定义随机变量 Y=F(X), 则Y为 [0,1]上的均匀分布,即有 P(Y<=y) = y
box-muller方法并没有了解原理
'''
import numpy as np
import matplotlib.pyplot as plt
import bisect
import pdb
import standard_normal_dist
'''
根据标准正态分布表生成gauss分布。即:基于面积生成
X = Z*sigma + mu
'''
def gen_gauss_distribute_pdf_from_dist_table(mu, sigma, color='bo'):
nt = standard_normal_dist.normal_dist_table
std_x = list(standard_normal_dist.normal_dist_table_x)
gauss_x = np.zeros(len(std_x))
for id in range(len(std_x)):
#Z = (X - mu)/sigma, X = Z * sigma + mu
x = std_x[id] * sigma + mu
gauss_x[id] = x
gauss_y = np.zeros(len(gauss_x))
step = (gauss_x[-1] - gauss_x[0]) / len(gauss_x)
pre_p = 0
id = 0
for x,p in nt:
y = (p - pre_p) / step
pre_p = p
gauss_y[id] = y
id += 1
return gauss_x, gauss_y
'''
生成高斯分布的x及其对应的pdf
f = 1/(sqrt(2*pi) * sigma) * exp(-(x-mu)^2/(2*sigma^2))
'''
def gen_gauss_distribute_pdf(mu, sigma, sample_count, max_x):
gauss_x = np.linspace(-max_x, max_x, sample_count)
gauss_y = map(lambda x:1/(np.sqrt(2 * np.pi) * sigma) * np.exp(-(x-mu)**2/(2*sigma**2)), gauss_x)
return gauss_x, gauss_y
'''
画直方图
a, 样本值
'''
def plt_hist(a, color='r', normed=False):
sample_count = len(a)
a.sort()
group_count = int(np.sqrt(sample_count) + 0.9999)
width = (a[-1] - a[0]) / group_count
#直方图间隔
bins = np.linspace(a[0], a[-1], group_count+1)
hist = np.zeros(group_count)
for x in a:
index = bisect.bisect_right(bins - 0.00001, x) #减去最小的测量单位的一半,因为是浮点数,取个小的数
#边界上的数归到上一个区间. 即x in [begin, end), 最后一个区间除外
# 一般下界为 起始值 - 最小测量单位 * 0.5. 这样包含了最小元素,但上界值归到下一个区间了
index -= 1
if index >= group_count:
index -= 1 #边界上的数。真实测量上不会发生
hist[index] += 1
if normed == True:
total = np.sum(hist) * 1.0
hist /= total
hist /= width #概率是面积。除以这个得到高度
plt.bar(bins[0:group_count], hist, width = width, facecolor = 'lightskyblue',edgecolor = 'white', align='edge')
'''
#也可以直接用这个来汇制直方图
plt.hist(a, bins = group_count, normed=normed)
'''
'''
样本数一般要 > 100
使用均匀分布随机数,生成符合正态分布的随机数
'''
def gen_gauss_samples_byU(sample_count):
sim_gauss_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
x = standard_normal_dist.guess_x(u)
sim_gauss_num[i] = x
sim_u[i] = u
return sim_u, sim_gauss_num
'''
正态随机变量分布
box muller方法:
U0, U1独立,且U0, U1 ~ Uniform[0,1], 则
Z0 = sqrt(-2ln(U0))cos(2*pi*U1)
Z1 = sqrt(-2ln(U1))sin(2*pi*U0)
且Z0, Z1独立, 且服从正态分布
'''
def gen_gauss_samples_by_BoxMuller(sample_count):
U0 = np.random.uniform(0, 1, size=sample_count)
U1 = np.random.uniform(0, 1, size=sample_count)
Z0 = np.sqrt(-2 * np.log(U0)) * np.cos(2 * np.pi * U1)
Z1 = np.sqrt(-2 * np.log(U1)) * np.sin(2 * np | f gen_exp_samples_byU(sample_count, beta):
sim_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
if u == 1:
x = 0.
else:
x = -1./beta * np.log(1. - u)
sim_u[i] = u
sim_gauss_num[i] = x
return sim_u, sim_gauss_num
'''
pdf = 1/beta * exp(-x/beta)
'''
def gen_exp_distribute_pdf(sample_count, beta, max_x):
x = np.linspace(0, max_x, sample_count)
y = map(lambda k:1/beta * np.exp(-k/beta), x)
return x,y
'''
分组的数量在5-12之间较为适宜(本程序用的是下一条)
一般取总样本数开平方取上界. 上一个分组算法一般认为不合理
分得越细,和曲线偏差越大,但好看;分得粗,不好看
'''
sample_count = 1000
group_count = int(np.sqrt(sample_count) + 0.9999)
sim_u, sim_gauss_num = gen_gauss_samples_byU(sample_count)
mean_val = np.mean(sim_gauss_num)
std_val = np.std(sim_gauss_num)
print "mean, std:", mean_val, std_val
#绘制高斯采样的直方图
plt_hist(sim_gauss_num, normed=True)
#叠加均匀分布采样直方图
plt.hist((sim_u-0.5)*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加正态分布图
gauss_x, gauss_y = gen_gauss_distribute_pdf(0, 1, sample_count, np.max(sim_gauss_num))
plt.plot(gauss_x, gauss_y, color='g')
plt.savefig('images/norm_distribute_gen_by_U.png', format='png')
#绘制正态分布图.两种生成方法,对比一下误差
plt.clf()
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 1)
plt.plot(gauss_x, gauss_y, 'g-.')
plt.plot(gauss_x2, gauss_y2, 'r--')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 2)
plt.plot(gauss_x2, gauss_y2, 'b-')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 4)
plt.plot(gauss_x2, gauss_y2, 'y-')
plt.savefig('images/norm_distribute.png', format='png')
#guass dist, by box-muller method
plt.clf()
X1, X2 = gen_gauss_samples_by_BoxMuller(sample_count)
plt.hist(X2, bins = group_count, normed=True, color='r')
plt.hist(X1, bins = group_count, normed=True, color='b', alpha=0.5)
plt.savefig('images/norm_distribute_by_boxmuller.png', format='png')
coef = np.dot(X1-X1.mean(), X2-X2.mean()) /sample_count/(X1.std() * X2.std())
print 'gen gauss indepedent rav by box muller method.'
print 'std of X1: %.4f; std of X2: %.4f; coeffience between X1 and X2: %.4f' % (X1.std(), X2.std(), coef)
#指数分布相关
plt.clf()
sim_u, sim_num = gen_exp_samples_byU(sample_count, 1)
#绘制指数采样直方图
plt.hist(sim_num, bins = group_count, normed=True)
#叠加均匀分布采样直方图
plt.hist(sim_u*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加标准指数分布图
exp_x, exp_y = gen_exp_distribute_pdf(sample_count, 1, np.max(sim_num))
plt.plot(exp_x, exp_y, color='g')
plt.savefig('images/norm_exp_gen_by_U.png', format='png')
| .pi * U0)
return Z0, Z1
'''
使用均匀分布随机数,生成符合指数分布的随机数
F(x) = 1 - exp(-1/beta*x)
x = -1/beta * 1/log(1 - F)
'''
de | conditional_block |
gen_dist_from_U.py | # encoding: utf8
''' |
下面的基于U(0,1)实现了几个常用的连续随机变量概率分布的随机数模拟
U(0,1) 在0,1区间的连续均匀分布的随机数模拟
CDF: cumulate distribution function
PMF: probility mass function
PDF: probility density function
对任意随机变量X, 它有连续的的CDF F(x), 定义随机变量 Y=F(X), 则Y为 [0,1]上的均匀分布,即有 P(Y<=y) = y
box-muller方法并没有了解原理
'''
import numpy as np
import matplotlib.pyplot as plt
import bisect
import pdb
import standard_normal_dist
'''
根据标准正态分布表生成gauss分布。即:基于面积生成
X = Z*sigma + mu
'''
def gen_gauss_distribute_pdf_from_dist_table(mu, sigma, color='bo'):
nt = standard_normal_dist.normal_dist_table
std_x = list(standard_normal_dist.normal_dist_table_x)
gauss_x = np.zeros(len(std_x))
for id in range(len(std_x)):
#Z = (X - mu)/sigma, X = Z * sigma + mu
x = std_x[id] * sigma + mu
gauss_x[id] = x
gauss_y = np.zeros(len(gauss_x))
step = (gauss_x[-1] - gauss_x[0]) / len(gauss_x)
pre_p = 0
id = 0
for x,p in nt:
y = (p - pre_p) / step
pre_p = p
gauss_y[id] = y
id += 1
return gauss_x, gauss_y
'''
生成高斯分布的x及其对应的pdf
f = 1/(sqrt(2*pi) * sigma) * exp(-(x-mu)^2/(2*sigma^2))
'''
def gen_gauss_distribute_pdf(mu, sigma, sample_count, max_x):
gauss_x = np.linspace(-max_x, max_x, sample_count)
gauss_y = map(lambda x:1/(np.sqrt(2 * np.pi) * sigma) * np.exp(-(x-mu)**2/(2*sigma**2)), gauss_x)
return gauss_x, gauss_y
'''
画直方图
a, 样本值
'''
def plt_hist(a, color='r', normed=False):
sample_count = len(a)
a.sort()
group_count = int(np.sqrt(sample_count) + 0.9999)
width = (a[-1] - a[0]) / group_count
#直方图间隔
bins = np.linspace(a[0], a[-1], group_count+1)
hist = np.zeros(group_count)
for x in a:
index = bisect.bisect_right(bins - 0.00001, x) #减去最小的测量单位的一半,因为是浮点数,取个小的数
#边界上的数归到上一个区间. 即x in [begin, end), 最后一个区间除外
# 一般下界为 起始值 - 最小测量单位 * 0.5. 这样包含了最小元素,但上界值归到下一个区间了
index -= 1
if index >= group_count:
index -= 1 #边界上的数。真实测量上不会发生
hist[index] += 1
if normed == True:
total = np.sum(hist) * 1.0
hist /= total
hist /= width #概率是面积。除以这个得到高度
plt.bar(bins[0:group_count], hist, width = width, facecolor = 'lightskyblue',edgecolor = 'white', align='edge')
'''
#也可以直接用这个来汇制直方图
plt.hist(a, bins = group_count, normed=normed)
'''
'''
样本数一般要 > 100
使用均匀分布随机数,生成符合正态分布的随机数
'''
def gen_gauss_samples_byU(sample_count):
sim_gauss_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
x = standard_normal_dist.guess_x(u)
sim_gauss_num[i] = x
sim_u[i] = u
return sim_u, sim_gauss_num
'''
正态随机变量分布
box muller方法:
U0, U1独立,且U0, U1 ~ Uniform[0,1], 则
Z0 = sqrt(-2ln(U0))cos(2*pi*U1)
Z1 = sqrt(-2ln(U1))sin(2*pi*U0)
且Z0, Z1独立, 且服从正态分布
'''
def gen_gauss_samples_by_BoxMuller(sample_count):
U0 = np.random.uniform(0, 1, size=sample_count)
U1 = np.random.uniform(0, 1, size=sample_count)
Z0 = np.sqrt(-2 * np.log(U0)) * np.cos(2 * np.pi * U1)
Z1 = np.sqrt(-2 * np.log(U1)) * np.sin(2 * np.pi * U0)
return Z0, Z1
'''
使用均匀分布随机数,生成符合指数分布的随机数
F(x) = 1 - exp(-1/beta*x)
x = -1/beta * 1/log(1 - F)
'''
def gen_exp_samples_byU(sample_count, beta):
sim_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
if u == 1:
x = 0.
else:
x = -1./beta * np.log(1. - u)
sim_u[i] = u
sim_gauss_num[i] = x
return sim_u, sim_gauss_num
'''
pdf = 1/beta * exp(-x/beta)
'''
def gen_exp_distribute_pdf(sample_count, beta, max_x):
x = np.linspace(0, max_x, sample_count)
y = map(lambda k:1/beta * np.exp(-k/beta), x)
return x,y
'''
分组的数量在5-12之间较为适宜(本程序用的是下一条)
一般取总样本数开平方取上界. 上一个分组算法一般认为不合理
分得越细,和曲线偏差越大,但好看;分得粗,不好看
'''
sample_count = 1000
group_count = int(np.sqrt(sample_count) + 0.9999)
sim_u, sim_gauss_num = gen_gauss_samples_byU(sample_count)
mean_val = np.mean(sim_gauss_num)
std_val = np.std(sim_gauss_num)
print "mean, std:", mean_val, std_val
#绘制高斯采样的直方图
plt_hist(sim_gauss_num, normed=True)
#叠加均匀分布采样直方图
plt.hist((sim_u-0.5)*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加正态分布图
gauss_x, gauss_y = gen_gauss_distribute_pdf(0, 1, sample_count, np.max(sim_gauss_num))
plt.plot(gauss_x, gauss_y, color='g')
plt.savefig('images/norm_distribute_gen_by_U.png', format='png')
#绘制正态分布图.两种生成方法,对比一下误差
plt.clf()
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 1)
plt.plot(gauss_x, gauss_y, 'g-.')
plt.plot(gauss_x2, gauss_y2, 'r--')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 2)
plt.plot(gauss_x2, gauss_y2, 'b-')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 4)
plt.plot(gauss_x2, gauss_y2, 'y-')
plt.savefig('images/norm_distribute.png', format='png')
#guass dist, by box-muller method
plt.clf()
X1, X2 = gen_gauss_samples_by_BoxMuller(sample_count)
plt.hist(X2, bins = group_count, normed=True, color='r')
plt.hist(X1, bins = group_count, normed=True, color='b', alpha=0.5)
plt.savefig('images/norm_distribute_by_boxmuller.png', format='png')
coef = np.dot(X1-X1.mean(), X2-X2.mean()) /sample_count/(X1.std() * X2.std())
print 'gen gauss indepedent rav by box muller method.'
print 'std of X1: %.4f; std of X2: %.4f; coeffience between X1 and X2: %.4f' % (X1.std(), X2.std(), coef)
#指数分布相关
plt.clf()
sim_u, sim_num = gen_exp_samples_byU(sample_count, 1)
#绘制指数采样直方图
plt.hist(sim_num, bins = group_count, normed=True)
#叠加均匀分布采样直方图
plt.hist(sim_u*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加标准指数分布图
exp_x, exp_y = gen_exp_distribute_pdf(sample_count, 1, np.max(sim_num))
plt.plot(exp_x, exp_y, color='g')
plt.savefig('images/norm_exp_gen_by_U.png', format='png') | 随机变量的积分变换 | random_line_split |
gen_dist_from_U.py | # encoding: utf8
'''
随机变量的积分变换
下面的基于U(0,1)实现了几个常用的连续随机变量概率分布的随机数模拟
U(0,1) 在0,1区间的连续均匀分布的随机数模拟
CDF: cumulate distribution function
PMF: probility mass function
PDF: probility density function
对任意随机变量X, 它有连续的的CDF F(x), 定义随机变量 Y=F(X), 则Y为 [0,1]上的均匀分布,即有 P(Y<=y) = y
box-muller方法并没有了解原理
'''
import numpy as np
import matplotlib.pyplot as plt
import bisect
import pdb
import standard_normal_dist
'''
根据标准正态分布表生成gauss分布。即:基于面积生成
X = Z*sigma + mu
'''
def gen_gauss_distribute_pdf_from_dist_table(mu, sigma, color='bo'):
nt = standard_normal_dist.normal_dist_table
std_x = list(standard_normal_dist.normal_dist_table_x)
gauss_x = np.zeros(len(std_x))
for id in range(len(std_x)):
#Z = (X - mu)/sigma, X = Z * sigma + mu
x = std_x[id] * sigma + mu
gauss_x[id] = x
gauss_y = np.zeros(len(gauss_x))
step = (gauss_x[-1] - gauss_x[0]) / len(gauss_x)
pre_p = 0
id = 0
for x,p in nt:
y = (p - pre_p) / step
pre_p = p
gauss_y[id] = y
id += 1
return gauss_x, gauss_y
'''
生成高斯分布的x及其对应的pdf
f = 1/(sqrt(2*pi) * sigma) * exp(-(x-mu)^2/(2*sigma^2))
'''
def gen_gauss_distribute_pdf(mu, sigma, sample_count, max_x):
gauss_x = np.linspace(-max_x, max_x, sample_count)
gauss_y = map(lambda x:1/(np.sqrt(2 * np.pi) * sigma) * np.exp(-(x-mu)**2/(2*sigma**2)), gauss_x)
return gauss_x, gauss_y
'''
画直方图
a, 样本值
'''
def plt_hist(a, color='r', normed=False):
sample_count = len(a)
a.sort()
group_count = int(np.sqrt(sample_count) + 0.9999)
width = (a[-1] - a[0]) / group_count
#直方图间隔
bins = np.linspace(a[0], a[-1], group_count+1)
hist = np.zeros(group_count)
for x in a:
index = bisect.bisect_right(bins - 0.00001, x) #减去最小的测量单位的一半,因为是浮点数,取个小的数
#边界上的数归到上一个区间. 即x in [begin, end), 最后一个区间除外
# 一般下界为 起始值 - 最小测量单位 * 0.5. 这样包含了最小元素,但上界值归到下一个区间了
index -= 1
if index >= group_count:
index -= 1 #边界上的数。真实测量上不会发生
hist[index] += 1
if normed == True:
total = np.sum(hist) * 1.0
hist /= total
hist /= width #概率是面积。除以这个得到高度
plt.bar(bins[0:group_count], hist, width = width, facecolor = 'lightskyblue',edgecolor = 'white', align='edge')
'''
#也可以直接用这个来汇制直方图
plt.hist(a, bins = group_count, normed=normed)
'''
'''
样本数一般要 > 100
使用均匀分布随机数,生成符合正态分布的随机数
'''
def gen_gauss_samples_byU(sample_count):
sim_gauss_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
x = standard_normal_dist.guess_x(u)
sim_gauss_num[i] = x
sim_u[i] = u
return sim_u, sim_gauss_num
'''
正态随机变量分布
box muller方法:
U0, U1独立,且U0, U1 ~ Uniform[0,1], 则
Z0 = sqrt(-2ln(U0))cos(2*pi*U1)
Z1 = sqrt(-2ln(U1))sin(2*pi*U0)
且Z0, Z1独立, 且服从正态分布
'''
def gen_gauss_samples_by_BoxMuller(sample_count):
U0 = np.random.uniform(0, 1, size=sample_count)
U1 = np.random.uniform(0, 1, size=sample_count)
Z0 = np.sqrt(-2 * np.log(U0)) * np.cos(2 * np.pi * U1)
Z1 = np.sqrt(-2 * np.log(U1)) * np.sin(2 * np.pi * U0)
return Z0, Z1
'''
使用均匀分布随机数,生成符合指数分布的随机数
F(x) = 1 - exp(-1/beta*x)
x = -1/beta * 1/log(1 - F)
'''
def gen_exp_samples_byU(sample_count, beta):
sim_num = np.zeros(sample_count)
sim_u = np.zeros(sample_count)
for i in range(0, sample_count):
u = np.random.uniform(0, 1)
if u == 1:
x = 0.
else:
x = -1./beta * np.log(1. - u)
sim_u[i] = u
sim_gauss_num[i] = x
return sim_u, sim_gauss_num
'''
pdf = 1/beta * exp(-x/beta)
'''
def gen_exp_distribute_pdf(sample_count, beta, max_x):
x = np.linspace(0, max_x, sample_count)
y = map(lambda k:1/beta * np.exp(-k/beta), x)
return x,y
'''
分组的数量在5-12之间较为适宜(本程序用的是下一条)
一般取总样本数开平方取上界. 上一个分组算法一般认为不合理
分得越细,和曲线偏差越大,但好看;分得粗,不好看
'''
sample_count | = int(np.sqrt(sample_count) + 0.9999)
sim_u, sim_gauss_num = gen_gauss_samples_byU(sample_count)
mean_val = np.mean(sim_gauss_num)
std_val = np.std(sim_gauss_num)
print "mean, std:", mean_val, std_val
#绘制高斯采样的直方图
plt_hist(sim_gauss_num, normed=True)
#叠加均匀分布采样直方图
plt.hist((sim_u-0.5)*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加正态分布图
gauss_x, gauss_y = gen_gauss_distribute_pdf(0, 1, sample_count, np.max(sim_gauss_num))
plt.plot(gauss_x, gauss_y, color='g')
plt.savefig('images/norm_distribute_gen_by_U.png', format='png')
#绘制正态分布图.两种生成方法,对比一下误差
plt.clf()
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 1)
plt.plot(gauss_x, gauss_y, 'g-.')
plt.plot(gauss_x2, gauss_y2, 'r--')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 2)
plt.plot(gauss_x2, gauss_y2, 'b-')
gauss_x2, gauss_y2 = gen_gauss_distribute_pdf_from_dist_table(0, 4)
plt.plot(gauss_x2, gauss_y2, 'y-')
plt.savefig('images/norm_distribute.png', format='png')
#guass dist, by box-muller method
plt.clf()
X1, X2 = gen_gauss_samples_by_BoxMuller(sample_count)
plt.hist(X2, bins = group_count, normed=True, color='r')
plt.hist(X1, bins = group_count, normed=True, color='b', alpha=0.5)
plt.savefig('images/norm_distribute_by_boxmuller.png', format='png')
coef = np.dot(X1-X1.mean(), X2-X2.mean()) /sample_count/(X1.std() * X2.std())
print 'gen gauss indepedent rav by box muller method.'
print 'std of X1: %.4f; std of X2: %.4f; coeffience between X1 and X2: %.4f' % (X1.std(), X2.std(), coef)
#指数分布相关
plt.clf()
sim_u, sim_num = gen_exp_samples_byU(sample_count, 1)
#绘制指数采样直方图
plt.hist(sim_num, bins = group_count, normed=True)
#叠加均匀分布采样直方图
plt.hist(sim_u*8, bins = group_count, normed=True, color='r', alpha=0.6)
#叠加标准指数分布图
exp_x, exp_y = gen_exp_distribute_pdf(sample_count, 1, np.max(sim_num))
plt.plot(exp_x, exp_y, color='g')
plt.savefig('images/norm_exp_gen_by_U.png', format='png')
| = 1000
group_count | identifier_name |
location_strategy.ts | /**
* `LocationStrategy` is responsible for representing and reading route state
* from the the browser's URL. Angular provides two strategies:
* {@link HashLocationStrategy} (default) and {@link PathLocationStrategy}.
*
* This is used under the hood of the {@link Location} service.
*
* Applications should use the {@link Router} or {@link Location} services to
* interact with application route state.
*
* For instance, {@link HashLocationStrategy} produces URLs like
* `http://example.com#/foo`, and {@link PathLocationStrategy} produces
* `http://example.com/foo` as an equivalent URL.
*
* See these two classes for more.
*/
export abstract class LocationStrategy {
abstract path(): string;
abstract pushState(state: any, title: string, url: string, queryParams: string): void;
abstract forward(): void;
abstract back(): void;
abstract onPopState(fn: (_: any) => any): void;
abstract getBaseHref(): string;
}
export function normalizeQueryParams(params: string): string | {
return (params.length > 0 && params.substring(0, 1) != '?') ? ('?' + params) : params;
} | identifier_body | |
location_strategy.ts | /**
* `LocationStrategy` is responsible for representing and reading route state
* from the the browser's URL. Angular provides two strategies:
* {@link HashLocationStrategy} (default) and {@link PathLocationStrategy}.
*
* This is used under the hood of the {@link Location} service.
*
* Applications should use the {@link Router} or {@link Location} services to
* interact with application route state.
*
* For instance, {@link HashLocationStrategy} produces URLs like
* `http://example.com#/foo`, and {@link PathLocationStrategy} produces
* `http://example.com/foo` as an equivalent URL.
*
* See these two classes for more.
*/
export abstract class LocationStrategy {
abstract path(): string;
abstract pushState(state: any, title: string, url: string, queryParams: string): void;
abstract forward(): void;
abstract back(): void;
abstract onPopState(fn: (_: any) => any): void;
abstract getBaseHref(): string;
}
export function | (params: string): string {
return (params.length > 0 && params.substring(0, 1) != '?') ? ('?' + params) : params;
}
| normalizeQueryParams | identifier_name |
location_strategy.ts | /**
* `LocationStrategy` is responsible for representing and reading route state
* from the the browser's URL. Angular provides two strategies:
* {@link HashLocationStrategy} (default) and {@link PathLocationStrategy}.
*
* This is used under the hood of the {@link Location} service.
*
* Applications should use the {@link Router} or {@link Location} services to | * interact with application route state.
*
* For instance, {@link HashLocationStrategy} produces URLs like
* `http://example.com#/foo`, and {@link PathLocationStrategy} produces
* `http://example.com/foo` as an equivalent URL.
*
* See these two classes for more.
*/
export abstract class LocationStrategy {
abstract path(): string;
abstract pushState(state: any, title: string, url: string, queryParams: string): void;
abstract forward(): void;
abstract back(): void;
abstract onPopState(fn: (_: any) => any): void;
abstract getBaseHref(): string;
}
export function normalizeQueryParams(params: string): string {
return (params.length > 0 && params.substring(0, 1) != '?') ? ('?' + params) : params;
} | random_line_split | |
Update.py | from openflow.optin_manager.sfa.util.method import Method
from openflow.optin_manager.sfa.trust.credential import Credential
from openflow.optin_manager.sfa.util.parameter import Parameter
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def call(self, record_dict, creds):
# validate the cred
| valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions
hrn = record_dict.get('hrn', '')
self.api.auth.verify_object_permission(hrn)
# log
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.Update(self.api, record_dict) | identifier_body | |
Update.py | from openflow.optin_manager.sfa.util.method import Method
from openflow.optin_manager.sfa.trust.credential import Credential
from openflow.optin_manager.sfa.util.parameter import Parameter
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def | (self, record_dict, creds):
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions
hrn = record_dict.get('hrn', '')
self.api.auth.verify_object_permission(hrn)
# log
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.Update(self.api, record_dict)
| call | identifier_name |
Update.py | from openflow.optin_manager.sfa.util.method import Method
from openflow.optin_manager.sfa.trust.credential import Credential
from openflow.optin_manager.sfa.util.parameter import Parameter
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def call(self, record_dict, creds):
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions | origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.Update(self.api, record_dict) | hrn = record_dict.get('hrn', '')
self.api.auth.verify_object_permission(hrn)
# log | random_line_split |
MicrosoftMailServiceReducer.js | import ServiceReducer from './ServiceReducer'
class MicrosoftMailServiceReducer extends ServiceReducer {
/* **************************************************************************/
// Class
/* **************************************************************************/
static get name () |
/* **************************************************************************/
// Reducers
/* **************************************************************************/
/**
* Sets the basic profile info for this account
* @param service: the service to update
* @param userId: the users id
* @param email: the user email address
* @param userFullName: the users full name
*/
static setProfileInfo (service, userId, email, userFullName) {
return service.changeData({
userId: userId,
email: email,
userFullName: userFullName
})
}
/**
* Sets the service avatar url
* @param service: teh service to update
* @param avatarUrl: the url of the avatar
*/
static setServiceAvatarUrl (service, avatarUrl) {
return service.changeData({
serviceAvatarURL: avatarUrl
})
}
/**
* Sets the unread mode
* @param service: the service to update
* @param unreadMode: the new unread mode
*/
static setUnreadMode (service, unreadMode) {
if (service.unreadMode !== unreadMode) {
return service.changeData({ unreadMode: unreadMode })
}
}
}
export default MicrosoftMailServiceReducer
| { return 'MicrosoftMailServiceReducer' } | identifier_body |
MicrosoftMailServiceReducer.js | import ServiceReducer from './ServiceReducer'
class MicrosoftMailServiceReducer extends ServiceReducer {
/* **************************************************************************/
// Class
/* **************************************************************************/
static get name () { return 'MicrosoftMailServiceReducer' }
/* **************************************************************************/
// Reducers
/* **************************************************************************/
/**
* Sets the basic profile info for this account
* @param service: the service to update
* @param userId: the users id
* @param email: the user email address
* @param userFullName: the users full name
*/
static setProfileInfo (service, userId, email, userFullName) {
return service.changeData({
userId: userId,
email: email,
userFullName: userFullName
})
}
/**
* Sets the service avatar url
* @param service: teh service to update
* @param avatarUrl: the url of the avatar
*/
static setServiceAvatarUrl (service, avatarUrl) {
return service.changeData({
serviceAvatarURL: avatarUrl
})
}
/**
* Sets the unread mode
* @param service: the service to update
* @param unreadMode: the new unread mode
*/
static setUnreadMode (service, unreadMode) {
if (service.unreadMode !== unreadMode) |
}
}
export default MicrosoftMailServiceReducer
| {
return service.changeData({ unreadMode: unreadMode })
} | conditional_block |
MicrosoftMailServiceReducer.js | import ServiceReducer from './ServiceReducer'
class MicrosoftMailServiceReducer extends ServiceReducer {
/* **************************************************************************/
// Class
/* **************************************************************************/
static get name () { return 'MicrosoftMailServiceReducer' }
/* **************************************************************************/
// Reducers
/* **************************************************************************/
/**
* Sets the basic profile info for this account
* @param service: the service to update
* @param userId: the users id
* @param email: the user email address
* @param userFullName: the users full name
*/
static setProfileInfo (service, userId, email, userFullName) {
return service.changeData({
userId: userId,
email: email,
userFullName: userFullName
})
}
/**
* Sets the service avatar url
* @param service: teh service to update
* @param avatarUrl: the url of the avatar
*/
static setServiceAvatarUrl (service, avatarUrl) {
return service.changeData({
serviceAvatarURL: avatarUrl
})
}
/**
* Sets the unread mode
* @param service: the service to update
* @param unreadMode: the new unread mode
*/
static | (service, unreadMode) {
if (service.unreadMode !== unreadMode) {
return service.changeData({ unreadMode: unreadMode })
}
}
}
export default MicrosoftMailServiceReducer
| setUnreadMode | identifier_name |
MicrosoftMailServiceReducer.js | import ServiceReducer from './ServiceReducer'
class MicrosoftMailServiceReducer extends ServiceReducer {
/* **************************************************************************/
// Class |
/* **************************************************************************/
// Reducers
/* **************************************************************************/
/**
* Sets the basic profile info for this account
* @param service: the service to update
* @param userId: the users id
* @param email: the user email address
* @param userFullName: the users full name
*/
static setProfileInfo (service, userId, email, userFullName) {
return service.changeData({
userId: userId,
email: email,
userFullName: userFullName
})
}
/**
* Sets the service avatar url
* @param service: teh service to update
* @param avatarUrl: the url of the avatar
*/
static setServiceAvatarUrl (service, avatarUrl) {
return service.changeData({
serviceAvatarURL: avatarUrl
})
}
/**
* Sets the unread mode
* @param service: the service to update
* @param unreadMode: the new unread mode
*/
static setUnreadMode (service, unreadMode) {
if (service.unreadMode !== unreadMode) {
return service.changeData({ unreadMode: unreadMode })
}
}
}
export default MicrosoftMailServiceReducer | /* **************************************************************************/
static get name () { return 'MicrosoftMailServiceReducer' } | random_line_split |
input-demo.ts | import {Component} from '@angular/core';
import {FormGroup, FormBuilder, Validators, FormControl} from "@angular/forms";
let max = 5;
@Component({
moduleId: module.id,
selector: 'input-demo',
templateUrl: 'input-demo.html',
styleUrls: ['input-demo.css'],
})
export class InputDemo {
validationForm: FormGroup;
dividerColor: boolean;
requiredField: boolean;
floatingLabel: boolean;
name: string;
items: any[] = [
{ value: 10 },
{ value: 20 },
{ value: 30 },
{ value: 40 },
{ value: 50 },
];
rows = 8;
constructor(private fb: FormBuilder) {
this.validationForm = this.fb.group({
username: new FormControl({value: '', disabled: false}, Validators.minLength(5)),
password: new FormControl({value: '', disabled: false}, Validators.required),
}
);
}
addABunch(n: number) {
for (let x = 0; x < n; x++) |
}
}
| {
this.items.push({ value: ++max });
} | conditional_block |
input-demo.ts | import {Component} from '@angular/core';
import {FormGroup, FormBuilder, Validators, FormControl} from "@angular/forms";
let max = 5;
@Component({
moduleId: module.id,
selector: 'input-demo',
templateUrl: 'input-demo.html',
styleUrls: ['input-demo.css'],
})
export class InputDemo {
validationForm: FormGroup;
dividerColor: boolean;
requiredField: boolean;
floatingLabel: boolean;
name: string;
items: any[] = [
{ value: 10 },
{ value: 20 },
{ value: 30 },
{ value: 40 },
{ value: 50 },
];
rows = 8;
constructor(private fb: FormBuilder) {
this.validationForm = this.fb.group({
username: new FormControl({value: '', disabled: false}, Validators.minLength(5)),
password: new FormControl({value: '', disabled: false}, Validators.required),
}
);
}
| (n: number) {
for (let x = 0; x < n; x++) {
this.items.push({ value: ++max });
}
}
}
| addABunch | identifier_name |
input-demo.ts | import {Component} from '@angular/core';
import {FormGroup, FormBuilder, Validators, FormControl} from "@angular/forms";
let max = 5;
@Component({
moduleId: module.id,
selector: 'input-demo',
templateUrl: 'input-demo.html',
styleUrls: ['input-demo.css'],
})
export class InputDemo {
validationForm: FormGroup;
dividerColor: boolean;
requiredField: boolean;
floatingLabel: boolean; | { value: 20 },
{ value: 30 },
{ value: 40 },
{ value: 50 },
];
rows = 8;
constructor(private fb: FormBuilder) {
this.validationForm = this.fb.group({
username: new FormControl({value: '', disabled: false}, Validators.minLength(5)),
password: new FormControl({value: '', disabled: false}, Validators.required),
}
);
}
addABunch(n: number) {
for (let x = 0; x < n; x++) {
this.items.push({ value: ++max });
}
}
} | name: string;
items: any[] = [
{ value: 10 }, | random_line_split |
ast.rs | use std::cell::Cell;
use std::fmt;
use std::vec::Vec;
pub type Var = String;
pub type Atom = String;
pub enum TopLevel {
Fact(Term),
Query(Term)
}
#[derive(Clone, Copy)]
pub enum Level {
Shallow, Deep
}
impl fmt::Display for Level {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Level::Shallow => write!(f, "A"),
&Level::Deep => write!(f, "X")
}
}
}
#[derive(Clone, Copy)]
pub enum Reg {
ArgAndNorm(usize, usize),
Norm(usize)
}
impl Reg {
pub fn has_arg(&self) -> bool {
match self {
&Reg::ArgAndNorm(_, _) => true,
_ => false
}
}
pub fn | (&self) -> usize {
match self {
&Reg::ArgAndNorm(_, norm) | &Reg::Norm(norm) => norm
}
}
}
pub enum Term {
Atom(Cell<usize>, Atom),
Clause(Cell<usize>, Atom, Vec<Box<Term>>),
Var(Cell<Reg>, Var)
}
pub enum TermRef<'a> {
Atom(Level, &'a Cell<usize>, &'a Atom),
Clause(Level, &'a Cell<usize>, &'a Atom, &'a Vec<Box<Term>>),
Var(Level, &'a Cell<Reg>, &'a Var)
}
#[derive(Clone)]
pub enum FactInstruction {
GetStructure(Level, Atom, usize, usize),
GetValue(usize, usize),
GetVariable(usize, usize),
Proceed,
UnifyVariable(usize),
UnifyValue(usize)
}
pub enum QueryInstruction {
Call(Atom, usize),
PutStructure(Level, Atom, usize, usize),
PutValue(usize, usize),
PutVariable(usize, usize),
SetVariable(usize),
SetValue(usize),
}
pub type CompiledFact = Vec<FactInstruction>;
pub type CompiledQuery = Vec<QueryInstruction>;
#[derive(Clone, Copy, PartialEq)]
pub enum Addr {
HeapCell(usize),
RegNum(usize)
}
#[derive(Clone)]
pub enum HeapCellValue {
NamedStr(usize, Atom),
Ref(usize),
Str(usize),
}
pub type Heap = Vec<HeapCellValue>;
pub type Registers = Vec<HeapCellValue>;
impl Term {
pub fn subterms(&self) -> usize {
match self {
&Term::Clause(_, _, ref terms) => terms.len(),
_ => 1
}
}
pub fn name(&self) -> &Atom {
match self {
&Term::Atom(_, ref atom)
| &Term::Var(_, ref atom)
| &Term::Clause(_, ref atom, _) => atom
}
}
pub fn arity(&self) -> usize {
match self {
&Term::Atom(_, _) | &Term::Var(_, _) => 0,
&Term::Clause(_, _, ref child_terms) => child_terms.len()
}
}
}
| norm | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.