file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
fasta.rs | // Copyright 2014-2016 Johannes Köster, Christopher Schröder.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! FASTA format reading and writing.
//!
//! # Example
//!
//! ```
//! use std::io;
//! use bio::io::fasta;
//! let reader = fasta::Reader::new(io::stdin());
//! ```
use std::io;
use std::io::prelude::*;
use std::ascii::AsciiExt;
use std::collections;
use std::fs;
use std::path::Path;
use std::convert::AsRef;
use csv;
use utils::{TextSlice, Text};
/// A FASTA reader.
pub struct Reader<R: io::Read> {
reader: io::BufReader<R>,
line: String,
}
impl Reader<fs::File> {
/// Read FASTA from given file path.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::open(path).map(Reader::new)
}
}
impl<R: io::Read> Reader<R> {
/// Create a new Fasta reader given an instance of `io::Read`.
pub fn new(reader: R) -> Self {
Reader {
reader: io::BufReader::new(reader),
line: String::new(),
}
}
/// Read next FASTA record into the given `Record`.
pub fn read(&mut self, record: &mut Record) -> io::Result<()> {
record.clear();
if self.line.is_empty() {
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() {
return Ok(());
}
}
if !self.line.starts_with('>') {
return Err(io::Error::new(io::ErrorKind::Other, "Expected > at record start."));
}
record.header.push_str(&self.line);
loop {
self.line.clear();
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() || self.line.starts_with('>') {
break;
}
record.seq.push_str(self.line.trim_right());
}
Ok(())
}
/// Return an iterator over the records of this FastQ file.
pub fn records(self) -> Records<R> {
Records { reader: self }
}
}
/// A FASTA index as created by SAMtools (.fai).
pub struct Index {
inner: collections::HashMap<String, IndexRecord>,
seqs: Vec<String>,
}
impl Index {
/// Open a FASTA index from a given `io::Read` instance.
pub fn new<R: io::Read>(fai: R) -> csv::Result<Self> {
let mut inner = collections::HashMap::new();
let mut seqs = vec![];
let mut fai_reader = csv::Reader::from_reader(fai).delimiter(b'\t').has_headers(false);
for row in fai_reader.decode() {
let (name, record): (String, IndexRecord) = try!(row);
seqs.push(name.clone());
inner.insert(name, record);
}
Ok(Index {
inner: inner,
seqs: seqs,
})
}
/// Open a FASTA index from a given file path.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
match fs::File::open(path) {
Ok(fai) => Self::new(fai),
Err(e) => Err(csv::Error::Io(e)),
}
}
/// Open a FASTA index given the corresponding FASTA file path (e.g. for ref.fasta we expect ref.fasta.fai).
pub fn with_fasta_file<P: AsRef<Path>>(fasta_path: &P) -> csv::Result<Self> {
let mut ext = fasta_path.as_ref().extension().unwrap().to_str().unwrap().to_owned();
ext.push_str(".fai");
let fai_path = fasta_path.as_ref().with_extension(ext);
Self::from_file(&fai_path)
}
/// Return a vector of sequences described in the index.
pub fn sequences(&self) -> Vec<Sequence> {
self.seqs
.iter()
.map(|name| {
Sequence {
name: name.clone(),
len: self.inner.get(name).unwrap().len,
}
})
.collect()
}
}
/// A FASTA reader with an index as created by SAMtools (.fai).
pub struct IndexedReader<R: io::Read + io::Seek> {
reader: io::BufReader<R>,
pub index: Index,
}
impl IndexedReader<fs::File> {
/// Read from a given file path. This assumes the index ref.fasta.fai to be present for FASTA ref.fasta.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
let index = try!(Index::with_fasta_file(path));
match fs::File::open(path) {
Ok(fasta) => Ok(IndexedReader::with_index(fasta, index)),
Err(e) => Err(csv::Error::Io(e)),
}
}
}
impl<R: io::Read + io::Seek> IndexedReader<R> {
/// Read from a FASTA and its index, both given as `io::Read`. FASTA has to be `io::Seek` in addition.
pub fn new<I: io::Read>(fasta: R, fai: I) -> csv::Result<Self> {
let index = try!(Index::new(fai));
Ok(IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
})
}
/// Read from a FASTA and its index, the first given as `io::Read`, the second given as index object.
pub fn with_index(fasta: R, index: Index) -> Self {
IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
}
}
/// For a given seqname, read the whole sequence into the given vector.
pub fn read_all(&mut self, seqname: &str, seq: &mut Text) -> io::Result<()> {
match self.index.inner.get(seqname) {
Some(&idx) => self.read(seqname, 0, idx.len, seq),
None => Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name.")),
}
}
/// Read the given interval of the given seqname into the given vector (stop position is exclusive).
pub fn read(&mut self,
seqname: &str,
start: u64,
stop: u64,
seq: &mut Text)
-> io::Result<()> {
if let Some(idx) = self.index.inner.get(seqname) {
seq.clear();
if stop > idx.len {
return Err(io::Error::new(io::ErrorKind::Other, "FASTA read interval was out of bounds"));
}
if start > stop {
return Err(io::Error::new(io::ErrorKind::Other, "Invalid query interval"));
}
let mut line_offset = start % idx.line_bases;
let line_start = start / idx.line_bases * idx.line_bytes;
let offset = idx.offset + line_start + line_offset;
try!(self.reader.seek(io::SeekFrom::Start(offset)));
let length = stop - start as u64;
let mut buf = vec![0u8; idx.line_bytes as usize];
while (seq.len() as u64) < length {
let bases_left = length - seq.len() as u64;
let bases_on_line = idx.line_bases - line_offset;
let (bytes_to_read, bytes_to_keep) = if bases_on_line < bases_left {
(idx.line_bytes - line_offset, bases_on_line)
} else {
(bases_left, bases_left)
};
try!(self.reader.read_exact(&mut buf[..bytes_to_read as usize]));
seq.extend_from_slice(&buf[..bytes_to_keep as usize]);
line_offset = 0;
}
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name."))
}
}
}
/// Record of a FASTA index.
#[derive(RustcDecodable, Debug, Copy, Clone)]
struct IndexRecord {
len: u64,
offset: u64,
line_bases: u64,
line_bytes: u64,
}
/// A sequence record returned by the FASTA index.
pub struct Sequence {
pub name: String,
pub len: u64,
}
/// A Fasta writer.
pub struct Writer<W: io::Write> {
writer: io::BufWriter<W>,
}
impl Writer<fs::File> {
/// Write to the given file path.
pub fn to_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::create(path).map(Writer::new)
}
}
impl<W: io::Write> Writer<W> {
/// Create a new Fasta writer.
pub fn new(writer: W) -> Self {
Writer { writer: io::BufWriter::new(writer) }
}
/// Directly write a Fasta record.
pub fn write_record(&mut self, record: &Record) -> io::Result<()> {
self.write(record.id().unwrap_or(""), record.desc(), record.seq())
}
/// Write a Fasta record with given id, optional description and sequence.
pub fn write(&mut self, id: &str, desc: Option<&str>, seq: TextSlice) -> io::Result<()> {
try!(self.writer.write(b">"));
try!(self.writer.write(id.as_bytes()));
if desc.is_some() {
try!(self.writer.write(b" "));
try!(self.writer.write(desc.unwrap().as_bytes()));
}
try!(self.writer.write(b"\n"));
try!(self.writer.write(seq));
try!(self.writer.write(b"\n"));
Ok(())
}
/// Flush the writer, ensuring that everything is written.
pub fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}
/// A FASTA record.
#[derive(Default)]
pub struct Record {
header: String,
seq: String,
}
impl Record {
/// Create a new instance.
pub fn new() -> Self {
Record {
header: String::new(),
seq: String::new(),
}
}
/// Check if record is empty.
pub fn is_empty(&self) -> bool {
self.header.is_empty() && self.seq.is_empty()
}
/// Check validity of Fasta record.
pub fn check(&self) -> Result<(), &str> {
| /// Return the id of the record.
pub fn id(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2, ' ').next()
}
/// Return descriptions if present.
pub fn desc(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2, ' ').skip(1).next()
}
/// Return the sequence of the record.
pub fn seq(&self) -> TextSlice {
self.seq.as_bytes()
}
/// Clear the record.
fn clear(&mut self) {
self.header.clear();
self.seq.clear();
}
}
/// An iterator over the records of a Fasta file.
pub struct Records<R: io::Read> {
reader: Reader<R>,
}
impl<R: io::Read> Iterator for Records<R> {
type Item = io::Result<Record>;
fn next(&mut self) -> Option<io::Result<Record>> {
let mut record = Record::new();
match self.reader.read(&mut record) {
Ok(()) if record.is_empty() => None,
Ok(()) => Some(Ok(record)),
Err(err) => Some(Err(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
const FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
CCGTAGGCTGAA
CGTAGGCTGAAA
GTAGGCTGAAAA
CCCC
>id2
ATTGTTGTTTTA
ATTGTTGTTTTA
ATTGTTGTTTTA
GGGG
";
const FAI_FILE: &'static [u8] = b"id\t52\t9\t12\t13
id2\t40\t71\t12\t13
";
const FASTA_FILE_CRLF: &'static [u8] = b">id desc\r
ACCGTAGGCTGA\r
CCGTAGGCTGAA\r
CGTAGGCTGAAA\r
GTAGGCTGAAAA\r
CCCC\r
>id2\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
GGGG\r
";
const FAI_FILE_CRLF: &'static [u8] = b"id\t52\t10\t12\t14\r
id2\t40\t78\t12\t14\r
";
const FASTA_FILE_NO_TRAILING_LF : &'static [u8] = b">id desc
GTAGGCTGAAAA
CCCC";
const FAI_FILE_NO_TRAILING_LF: &'static [u8] = b"id\t16\t9\t12\t13";
const WRITE_FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
>id2
ATTGTTGTTTTA
";
#[test]
fn test_reader() {
let reader = Reader::new(FASTA_FILE);
let ids = [Some("id"), Some("id2")];
let descs = [Some("desc"), None];
let seqs: [&[u8]; 2] = [b"ACCGTAGGCTGACCGTAGGCTGAACGTAGGCTGAAAGTAGGCTGAAAACCCC",
b"ATTGTTGTTTTAATTGTTGTTTTAATTGTTGTTTTAGGGG"];
for (i, r) in reader.records().enumerate() {
let record = r.ok().expect("Error reading record");
assert_eq!(record.check(), Ok(()));
assert_eq!(record.id(), ids[i]);
assert_eq!(record.desc(), descs[i]);
assert_eq!(record.seq(), seqs[i]);
}
// let record = records.ok().nth(1).unwrap();
}
#[test]
fn test_indexed_reader() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE), FAI_FILE)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
#[test]
fn test_indexed_reader_crlf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_CRLF), FAI_FILE_CRLF)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
fn _test_indexed_reader<T: Seek + Read>(reader: &mut IndexedReader<T>) {
let mut seq = Vec::new();
// Test reading various substrings of the sequence
reader.read("id", 1, 5, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGT");
reader.read("id", 1, 31, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGTAGGCTGACCGTAGGCTGAACGTAGGC");
reader.read("id", 13, 23, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CGTAGGCTGA");
reader.read("id", 36, 52, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
reader.read("id2", 12, 40, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"ATTGTTGTTTTAATTGTTGTTTTAGGGG");
reader.read("id2", 12, 12, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"");
reader.read("id2", 12, 13, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"A");
assert!(reader.read("id2", 12, 11, &mut seq).is_err());
assert!(reader.read("id2", 12, 1000, &mut seq).is_err());
}
#[test]
fn test_indexed_reader_no_trailing_lf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_NO_TRAILING_LF),
FAI_FILE_NO_TRAILING_LF)
.ok()
.expect("Error reading index");
let mut seq = Vec::new();
reader.read("id", 0, 16, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
}
#[test]
fn test_writer() {
let mut writer = Writer::new(Vec::new());
writer.write("id", Some("desc"), b"ACCGTAGGCTGA").ok().expect("Expected successful write");
writer.write("id2", None, b"ATTGTTGTTTTA").ok().expect("Expected successful write");
writer.flush().ok().expect("Expected successful write");
assert_eq!(writer.writer.get_ref(), &WRITE_FASTA_FILE);
}
}
| if self.id().is_none() {
return Err("Expecting id for FastQ record.");
}
if !self.seq.is_ascii() {
return Err("Non-ascii character found in sequence.");
}
Ok(())
}
| identifier_body |
Svg.ts | import h from './h'
function | (tagName: string) {
return h.bind(null, tagName)
}
export default Object.freeze({
a : f('a'),
altGlyph : f('altGlyph'),
altGlyphDef : f('altGlyphDef'),
altGlyphItem : f('altGlyphItem'),
animate : f('animate'),
animateColor : f('animateColor'),
animateMotion : f('animateMotion'),
animateTransform : f('animateTransform'),
circle : f('circle'),
clipPath : f('clipPath'),
colorProfile : f('color-profile'),
cursor : f('cursor'),
defs : f('defs'),
desc : f('desc'),
ellipse : f('ellipse'),
feBlend : f('feBlend'),
feColorMatrix : f('feColorMatrix'),
feComponentTransfer : f('feComponentTransfer'),
feComposite : f('feComposite'),
feConvolveMatrix : f('feConvolveMatrix'),
feDiffuseLighting : f('feDiffuseLighting'),
feDisplacementMap : f('feDisplacementMap'),
feDistantLight : f('feDistantLight'),
feFlood : f('feFlood'),
feFuncA : f('feFuncA'),
feFuncB : f('feFuncB'),
feFuncG : f('feFuncG'),
feFuncR : f('feFuncR'),
feGaussianBlur : f('feGaussianBlur'),
feImage : f('feImage'),
feMerge : f('feMerge'),
feMergeNode : f('feMergeNode'),
feMorphology : f('feMorphology'),
feOffset : f('feOffset'),
fePointLight : f('fePointLight'),
feSpecularLighting : f('feSpecularLighting'),
feSpotLight : f('feSpotLight'),
feTile : f('feTile'),
feTurbulence : f('feTurbulence'),
filter : f('filter'),
font : f('font'),
foreignObject : f('foreignObject'),
g : f('g'),
glyph : f('glyph'),
glyphRef : f('glyphRef'),
hkern : f('hkern'),
image : f('image'),
line : f('line'),
linearGradient : f('linearGradient'),
marker : f('marker'),
mask : f('mask'),
metadata : f('metadata'),
mpath : f('mpath'),
path : f('path'),
pattern : f('pattern'),
polygon : f('polygon'),
polyline : f('polyline'),
radialGradient : f('radialGradient'),
rect : f('rect'),
script : f('script'),
set : f('set'),
stop : f('stop'),
style : f('style'),
svg : f('svg'),
switch: f('switch'),
symbol : f('symbol'),
text : f('text'),
textPath : f('textPath'),
title : f('title'),
tref : f('tref'),
tspan : f('tspan'),
use : f('use'),
view : f('view'),
vkern : f('vkern')
})
| f | identifier_name |
Svg.ts | import h from './h'
function f(tagName: string) {
return h.bind(null, tagName)
}
export default Object.freeze({
a : f('a'),
altGlyph : f('altGlyph'),
altGlyphDef : f('altGlyphDef'),
altGlyphItem : f('altGlyphItem'),
animate : f('animate'),
animateColor : f('animateColor'),
animateMotion : f('animateMotion'),
animateTransform : f('animateTransform'),
circle : f('circle'),
clipPath : f('clipPath'),
colorProfile : f('color-profile'),
cursor : f('cursor'),
defs : f('defs'),
desc : f('desc'),
ellipse : f('ellipse'),
feBlend : f('feBlend'),
feColorMatrix : f('feColorMatrix'),
feComponentTransfer : f('feComponentTransfer'),
feComposite : f('feComposite'),
feConvolveMatrix : f('feConvolveMatrix'), | feDiffuseLighting : f('feDiffuseLighting'),
feDisplacementMap : f('feDisplacementMap'),
feDistantLight : f('feDistantLight'),
feFlood : f('feFlood'),
feFuncA : f('feFuncA'),
feFuncB : f('feFuncB'),
feFuncG : f('feFuncG'),
feFuncR : f('feFuncR'),
feGaussianBlur : f('feGaussianBlur'),
feImage : f('feImage'),
feMerge : f('feMerge'),
feMergeNode : f('feMergeNode'),
feMorphology : f('feMorphology'),
feOffset : f('feOffset'),
fePointLight : f('fePointLight'),
feSpecularLighting : f('feSpecularLighting'),
feSpotLight : f('feSpotLight'),
feTile : f('feTile'),
feTurbulence : f('feTurbulence'),
filter : f('filter'),
font : f('font'),
foreignObject : f('foreignObject'),
g : f('g'),
glyph : f('glyph'),
glyphRef : f('glyphRef'),
hkern : f('hkern'),
image : f('image'),
line : f('line'),
linearGradient : f('linearGradient'),
marker : f('marker'),
mask : f('mask'),
metadata : f('metadata'),
mpath : f('mpath'),
path : f('path'),
pattern : f('pattern'),
polygon : f('polygon'),
polyline : f('polyline'),
radialGradient : f('radialGradient'),
rect : f('rect'),
script : f('script'),
set : f('set'),
stop : f('stop'),
style : f('style'),
svg : f('svg'),
switch: f('switch'),
symbol : f('symbol'),
text : f('text'),
textPath : f('textPath'),
title : f('title'),
tref : f('tref'),
tspan : f('tspan'),
use : f('use'),
view : f('view'),
vkern : f('vkern')
}) | random_line_split | |
Svg.ts | import h from './h'
function f(tagName: string) |
export default Object.freeze({
a : f('a'),
altGlyph : f('altGlyph'),
altGlyphDef : f('altGlyphDef'),
altGlyphItem : f('altGlyphItem'),
animate : f('animate'),
animateColor : f('animateColor'),
animateMotion : f('animateMotion'),
animateTransform : f('animateTransform'),
circle : f('circle'),
clipPath : f('clipPath'),
colorProfile : f('color-profile'),
cursor : f('cursor'),
defs : f('defs'),
desc : f('desc'),
ellipse : f('ellipse'),
feBlend : f('feBlend'),
feColorMatrix : f('feColorMatrix'),
feComponentTransfer : f('feComponentTransfer'),
feComposite : f('feComposite'),
feConvolveMatrix : f('feConvolveMatrix'),
feDiffuseLighting : f('feDiffuseLighting'),
feDisplacementMap : f('feDisplacementMap'),
feDistantLight : f('feDistantLight'),
feFlood : f('feFlood'),
feFuncA : f('feFuncA'),
feFuncB : f('feFuncB'),
feFuncG : f('feFuncG'),
feFuncR : f('feFuncR'),
feGaussianBlur : f('feGaussianBlur'),
feImage : f('feImage'),
feMerge : f('feMerge'),
feMergeNode : f('feMergeNode'),
feMorphology : f('feMorphology'),
feOffset : f('feOffset'),
fePointLight : f('fePointLight'),
feSpecularLighting : f('feSpecularLighting'),
feSpotLight : f('feSpotLight'),
feTile : f('feTile'),
feTurbulence : f('feTurbulence'),
filter : f('filter'),
font : f('font'),
foreignObject : f('foreignObject'),
g : f('g'),
glyph : f('glyph'),
glyphRef : f('glyphRef'),
hkern : f('hkern'),
image : f('image'),
line : f('line'),
linearGradient : f('linearGradient'),
marker : f('marker'),
mask : f('mask'),
metadata : f('metadata'),
mpath : f('mpath'),
path : f('path'),
pattern : f('pattern'),
polygon : f('polygon'),
polyline : f('polyline'),
radialGradient : f('radialGradient'),
rect : f('rect'),
script : f('script'),
set : f('set'),
stop : f('stop'),
style : f('style'),
svg : f('svg'),
switch: f('switch'),
symbol : f('symbol'),
text : f('text'),
textPath : f('textPath'),
title : f('title'),
tref : f('tref'),
tspan : f('tspan'),
use : f('use'),
view : f('view'),
vkern : f('vkern')
})
| {
return h.bind(null, tagName)
} | identifier_body |
tier.js | 'use strict';
var mongoose = require('mongoose');
var mongoose_uuid = require('mongoose-uuid');
var mongoose_relationship = require('mongoose-relationship');
function tierModel () {
var tierSchema = mongoose.Schema({
environment: { type: String, ref: 'Environment', childPath: 'tiers', index: true },
platform: { type: String, ref: 'Platform', childPath: 'tiers', index: true },
machines: [{ type: String, ref: 'Machine' }],
cfpersonas: { type: String, ref: 'CfPersonas', index: true },
name: { type: String },
system_name: { type: String },
user_script: { type: String },
base_image: { type: String },
base_package: { type: String },
home_network: { type: String },
networks: [{ type: String }]
}, { _id: false, versionKey: false });
tierSchema.plugin(mongoose_uuid.plugin, 'Tier');
tierSchema.plugin(mongoose_relationship, {
relationshipPathName: [ 'environment', 'platform' ]
});
tierSchema.pre('save', function (next) {
var tier = this;
if (tier.system_name && tier.system_name.length > 0) |
tier.system_name = tier.name.toLowerCase().replace(/\s+/g, '_');
next();
});
return mongoose.model('Tier', tierSchema);
}
module.exports = new tierModel();
| {
next();
return;
} | conditional_block |
tier.js | 'use strict';
var mongoose = require('mongoose');
var mongoose_uuid = require('mongoose-uuid');
var mongoose_relationship = require('mongoose-relationship');
function tierModel () {
var tierSchema = mongoose.Schema({
environment: { type: String, ref: 'Environment', childPath: 'tiers', index: true },
platform: { type: String, ref: 'Platform', childPath: 'tiers', index: true },
machines: [{ type: String, ref: 'Machine' }],
cfpersonas: { type: String, ref: 'CfPersonas', index: true },
name: { type: String },
system_name: { type: String },
user_script: { type: String },
base_image: { type: String },
base_package: { type: String }, | networks: [{ type: String }]
}, { _id: false, versionKey: false });
tierSchema.plugin(mongoose_uuid.plugin, 'Tier');
tierSchema.plugin(mongoose_relationship, {
relationshipPathName: [ 'environment', 'platform' ]
});
tierSchema.pre('save', function (next) {
var tier = this;
if (tier.system_name && tier.system_name.length > 0) {
next();
return;
}
tier.system_name = tier.name.toLowerCase().replace(/\s+/g, '_');
next();
});
return mongoose.model('Tier', tierSchema);
}
module.exports = new tierModel(); | home_network: { type: String }, | random_line_split |
tier.js | 'use strict';
var mongoose = require('mongoose');
var mongoose_uuid = require('mongoose-uuid');
var mongoose_relationship = require('mongoose-relationship');
function tierModel () |
module.exports = new tierModel();
| {
var tierSchema = mongoose.Schema({
environment: { type: String, ref: 'Environment', childPath: 'tiers', index: true },
platform: { type: String, ref: 'Platform', childPath: 'tiers', index: true },
machines: [{ type: String, ref: 'Machine' }],
cfpersonas: { type: String, ref: 'CfPersonas', index: true },
name: { type: String },
system_name: { type: String },
user_script: { type: String },
base_image: { type: String },
base_package: { type: String },
home_network: { type: String },
networks: [{ type: String }]
}, { _id: false, versionKey: false });
tierSchema.plugin(mongoose_uuid.plugin, 'Tier');
tierSchema.plugin(mongoose_relationship, {
relationshipPathName: [ 'environment', 'platform' ]
});
tierSchema.pre('save', function (next) {
var tier = this;
if (tier.system_name && tier.system_name.length > 0) {
next();
return;
}
tier.system_name = tier.name.toLowerCase().replace(/\s+/g, '_');
next();
});
return mongoose.model('Tier', tierSchema);
} | identifier_body |
tier.js | 'use strict';
var mongoose = require('mongoose');
var mongoose_uuid = require('mongoose-uuid');
var mongoose_relationship = require('mongoose-relationship');
function | () {
var tierSchema = mongoose.Schema({
environment: { type: String, ref: 'Environment', childPath: 'tiers', index: true },
platform: { type: String, ref: 'Platform', childPath: 'tiers', index: true },
machines: [{ type: String, ref: 'Machine' }],
cfpersonas: { type: String, ref: 'CfPersonas', index: true },
name: { type: String },
system_name: { type: String },
user_script: { type: String },
base_image: { type: String },
base_package: { type: String },
home_network: { type: String },
networks: [{ type: String }]
}, { _id: false, versionKey: false });
tierSchema.plugin(mongoose_uuid.plugin, 'Tier');
tierSchema.plugin(mongoose_relationship, {
relationshipPathName: [ 'environment', 'platform' ]
});
tierSchema.pre('save', function (next) {
var tier = this;
if (tier.system_name && tier.system_name.length > 0) {
next();
return;
}
tier.system_name = tier.name.toLowerCase().replace(/\s+/g, '_');
next();
});
return mongoose.model('Tier', tierSchema);
}
module.exports = new tierModel();
| tierModel | identifier_name |
HTPGeoprocessor.py | """
/***************************************************************************
Name : HTP Geoprocessor
Description : Tools for processing HTP geospatial data
Date : 29/Mar/12
copyright : (C) 2012 by Dr. Kelly Thorp, USDA-ARS
email : kelly.thorp@ars.usda.gov
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
import os
import sys
# Import the code for the dialogs
from MapCreatorDlg import MapCreatorDlg
from GeoprocessorDlg import GeoprocessorDlg
from PreprocessorDlg import PreprocessorDlg
class HTPGeoprocessor:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'htpgeoprocessor_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
def initGui(self):
# Create action that will start plugin configuration
icon = QIcon(":/plugins/htpgeoprocessor/icon.png")
self.createmap = QAction(icon,u"Map Creator", self.iface.mainWindow())
self.preprocess = QAction(icon,u"Preprocessor", self.iface.mainWindow())
self.geoprocess = QAction(icon,u"Geoprocessor", self.iface.mainWindow())
self.helpme = QAction(icon, u"Help", self.iface.mainWindow())
# connect the action to a method
self.createmap.triggered.connect(self.CreateMap)
self.preprocess.triggered.connect(self.Preprocess)
self.geoprocess.triggered.connect(self.Geoprocess)
self.helpme.triggered.connect(self.Help)
# Add toolbar button and menu item
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.helpme)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.helpme)
# run methods that perform all the real work
def CreateMap(self):
dlg = MapCreatorDlg(self.iface)
dlg.exec_()
def Preprocess(self):
dlg = PreprocessorDlg()
dlg.exec_()
def Geoprocess(self):
# create and show the dialog
dlg = GeoprocessorDlg(self.iface)
# show the dialog
#dlg.show() #Modeless dialog
dlg.exec_() #Modal dialog
def Help(self):
path = os.path.dirname(sys.modules[__name__].__file__)
if sys.platform == 'linux':
os.system(path+"//HTP Geoprocessor README.pdf")
elif sys.platform == 'win32':
|
else:
QMessageBox.critical(self.iface.mainWindow(),'Help','Error opening document. Look in plug-in install directory for PDF.')
| os.startfile(path+"\\HTP Geoprocessor README.pdf") | conditional_block |
HTPGeoprocessor.py | """
/***************************************************************************
Name : HTP Geoprocessor
Description : Tools for processing HTP geospatial data
Date : 29/Mar/12
copyright : (C) 2012 by Dr. Kelly Thorp, USDA-ARS
email : kelly.thorp@ars.usda.gov
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
import os
import sys
# Import the code for the dialogs
from MapCreatorDlg import MapCreatorDlg
from GeoprocessorDlg import GeoprocessorDlg
from PreprocessorDlg import PreprocessorDlg
class HTPGeoprocessor:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'htpgeoprocessor_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
def initGui(self):
# Create action that will start plugin configuration
icon = QIcon(":/plugins/htpgeoprocessor/icon.png")
self.createmap = QAction(icon,u"Map Creator", self.iface.mainWindow())
self.preprocess = QAction(icon,u"Preprocessor", self.iface.mainWindow())
self.geoprocess = QAction(icon,u"Geoprocessor", self.iface.mainWindow())
self.helpme = QAction(icon, u"Help", self.iface.mainWindow())
# connect the action to a method
self.createmap.triggered.connect(self.CreateMap)
self.preprocess.triggered.connect(self.Preprocess)
self.geoprocess.triggered.connect(self.Geoprocess)
self.helpme.triggered.connect(self.Help)
# Add toolbar button and menu item
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.helpme)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.helpme)
# run methods that perform all the real work
def CreateMap(self):
dlg = MapCreatorDlg(self.iface)
dlg.exec_()
def Preprocess(self):
| def Geoprocess(self):
# create and show the dialog
dlg = GeoprocessorDlg(self.iface)
# show the dialog
#dlg.show() #Modeless dialog
dlg.exec_() #Modal dialog
def Help(self):
path = os.path.dirname(sys.modules[__name__].__file__)
if sys.platform == 'linux':
os.system(path+"//HTP Geoprocessor README.pdf")
elif sys.platform == 'win32':
os.startfile(path+"\\HTP Geoprocessor README.pdf")
else:
QMessageBox.critical(self.iface.mainWindow(),'Help','Error opening document. Look in plug-in install directory for PDF.') | dlg = PreprocessorDlg()
dlg.exec_()
| random_line_split |
HTPGeoprocessor.py | """
/***************************************************************************
Name : HTP Geoprocessor
Description : Tools for processing HTP geospatial data
Date : 29/Mar/12
copyright : (C) 2012 by Dr. Kelly Thorp, USDA-ARS
email : kelly.thorp@ars.usda.gov
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
import os
import sys
# Import the code for the dialogs
from MapCreatorDlg import MapCreatorDlg
from GeoprocessorDlg import GeoprocessorDlg
from PreprocessorDlg import PreprocessorDlg
class HTPGeoprocessor:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'htpgeoprocessor_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
def initGui(self):
# Create action that will start plugin configuration
icon = QIcon(":/plugins/htpgeoprocessor/icon.png")
self.createmap = QAction(icon,u"Map Creator", self.iface.mainWindow())
self.preprocess = QAction(icon,u"Preprocessor", self.iface.mainWindow())
self.geoprocess = QAction(icon,u"Geoprocessor", self.iface.mainWindow())
self.helpme = QAction(icon, u"Help", self.iface.mainWindow())
# connect the action to a method
self.createmap.triggered.connect(self.CreateMap)
self.preprocess.triggered.connect(self.Preprocess)
self.geoprocess.triggered.connect(self.Geoprocess)
self.helpme.triggered.connect(self.Help)
# Add toolbar button and menu item
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.helpme)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.helpme)
# run methods that perform all the real work
def CreateMap(self):
dlg = MapCreatorDlg(self.iface)
dlg.exec_()
def Preprocess(self):
|
def Geoprocess(self):
# create and show the dialog
dlg = GeoprocessorDlg(self.iface)
# show the dialog
#dlg.show() #Modeless dialog
dlg.exec_() #Modal dialog
def Help(self):
path = os.path.dirname(sys.modules[__name__].__file__)
if sys.platform == 'linux':
os.system(path+"//HTP Geoprocessor README.pdf")
elif sys.platform == 'win32':
os.startfile(path+"\\HTP Geoprocessor README.pdf")
else:
QMessageBox.critical(self.iface.mainWindow(),'Help','Error opening document. Look in plug-in install directory for PDF.')
| dlg = PreprocessorDlg()
dlg.exec_() | identifier_body |
HTPGeoprocessor.py | """
/***************************************************************************
Name : HTP Geoprocessor
Description : Tools for processing HTP geospatial data
Date : 29/Mar/12
copyright : (C) 2012 by Dr. Kelly Thorp, USDA-ARS
email : kelly.thorp@ars.usda.gov
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
import os
import sys
# Import the code for the dialogs
from MapCreatorDlg import MapCreatorDlg
from GeoprocessorDlg import GeoprocessorDlg
from PreprocessorDlg import PreprocessorDlg
class HTPGeoprocessor:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'htpgeoprocessor_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
def initGui(self):
# Create action that will start plugin configuration
icon = QIcon(":/plugins/htpgeoprocessor/icon.png")
self.createmap = QAction(icon,u"Map Creator", self.iface.mainWindow())
self.preprocess = QAction(icon,u"Preprocessor", self.iface.mainWindow())
self.geoprocess = QAction(icon,u"Geoprocessor", self.iface.mainWindow())
self.helpme = QAction(icon, u"Help", self.iface.mainWindow())
# connect the action to a method
self.createmap.triggered.connect(self.CreateMap)
self.preprocess.triggered.connect(self.Preprocess)
self.geoprocess.triggered.connect(self.Geoprocess)
self.helpme.triggered.connect(self.Help)
# Add toolbar button and menu item
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.helpme)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.helpme)
# run methods that perform all the real work
def CreateMap(self):
dlg = MapCreatorDlg(self.iface)
dlg.exec_()
def Preprocess(self):
dlg = PreprocessorDlg()
dlg.exec_()
def Geoprocess(self):
# create and show the dialog
dlg = GeoprocessorDlg(self.iface)
# show the dialog
#dlg.show() #Modeless dialog
dlg.exec_() #Modal dialog
def | (self):
path = os.path.dirname(sys.modules[__name__].__file__)
if sys.platform == 'linux':
os.system(path+"//HTP Geoprocessor README.pdf")
elif sys.platform == 'win32':
os.startfile(path+"\\HTP Geoprocessor README.pdf")
else:
QMessageBox.critical(self.iface.mainWindow(),'Help','Error opening document. Look in plug-in install directory for PDF.')
| Help | identifier_name |
private-match.js | (function(){
'use strict';
var _ = require('lodash');
var PrivateMatch = function() {
this.waitingText = null; | this.nextDot = 0;
};
PrivateMatch.prototype.create = function() {
this.game.socketHandler.startPrivateMatch();
this.game.stage.backgroundColor = '#C8F7C5';
this.sendLinkText = this.add.text(
this.world.centerX,
this.world.centerY - 150,
'Send this link to your opponent:',
{
font: '45pt hallo_sansblack',
fill: '#e67e22',
align: 'center'
}
);
this.sendLinkText.anchor.setTo(0.5, 0.5);
this.waitingText = this.add.text(
-999,
this.world.centerY+250,
'Waiting for a player to join',
{
font: '30pt hallo_sansblack',
fill: '#f1c40f',
align: 'center'
}
);
// Can't anchor it as it would move around when the dots are added to its string.
this.waitingText.x = this.world.width/2 - this.waitingText.width/2;
this.matchInput = document.querySelector('#match-input');
this.matchInput.style.display = 'inline';
this.matchInput.focus();
this.smallMenuButton = this.add.button(this.world.centerX, this.world.centerY+170,
'menuButton', this.startMenu, this, 1, 0, 0);
this.smallMenuButton.scale.setTo(0.6, 0,6);
this.smallMenuButton.anchor.setTo(0.5, 0.5);
};
PrivateMatch.prototype.setLink = function(matchID) {
window.location.hash = matchID;
this.matchInput.value = window.location.href;
};
PrivateMatch.prototype.update = function() {
if (this.time.now > this.nextDot) {
if (this.dots === 3) {
this.dots = 0;
}
this.dots++;
var dotString = '';
for (var i = 0; i < this.dots; i++) {
dotString += '.';
}
this.waitingText.setText('Waiting for a player to join' + dotString);
this.nextDot = this.time.now + 500;
}
};
PrivateMatch.prototype.shutdown = function() {
this.matchInput.style.display = 'none';
};
PrivateMatch.prototype.startMenu = function() {
this.game.socketHandler.stopPrivateMatch();
window.location.hash = '';
this.game.state.start('Menu');
};
module.exports = PrivateMatch;
})(); |
this.dots = 0; | random_line_split |
private-match.js | (function(){
'use strict';
var _ = require('lodash');
var PrivateMatch = function() {
this.waitingText = null;
this.dots = 0;
this.nextDot = 0;
};
PrivateMatch.prototype.create = function() {
this.game.socketHandler.startPrivateMatch();
this.game.stage.backgroundColor = '#C8F7C5';
this.sendLinkText = this.add.text(
this.world.centerX,
this.world.centerY - 150,
'Send this link to your opponent:',
{
font: '45pt hallo_sansblack',
fill: '#e67e22',
align: 'center'
}
);
this.sendLinkText.anchor.setTo(0.5, 0.5);
this.waitingText = this.add.text(
-999,
this.world.centerY+250,
'Waiting for a player to join',
{
font: '30pt hallo_sansblack',
fill: '#f1c40f',
align: 'center'
}
);
// Can't anchor it as it would move around when the dots are added to its string.
this.waitingText.x = this.world.width/2 - this.waitingText.width/2;
this.matchInput = document.querySelector('#match-input');
this.matchInput.style.display = 'inline';
this.matchInput.focus();
this.smallMenuButton = this.add.button(this.world.centerX, this.world.centerY+170,
'menuButton', this.startMenu, this, 1, 0, 0);
this.smallMenuButton.scale.setTo(0.6, 0,6);
this.smallMenuButton.anchor.setTo(0.5, 0.5);
};
PrivateMatch.prototype.setLink = function(matchID) {
window.location.hash = matchID;
this.matchInput.value = window.location.href;
};
PrivateMatch.prototype.update = function() {
if (this.time.now > this.nextDot) |
};
PrivateMatch.prototype.shutdown = function() {
this.matchInput.style.display = 'none';
};
PrivateMatch.prototype.startMenu = function() {
this.game.socketHandler.stopPrivateMatch();
window.location.hash = '';
this.game.state.start('Menu');
};
module.exports = PrivateMatch;
})();
| {
if (this.dots === 3) {
this.dots = 0;
}
this.dots++;
var dotString = '';
for (var i = 0; i < this.dots; i++) {
dotString += '.';
}
this.waitingText.setText('Waiting for a player to join' + dotString);
this.nextDot = this.time.now + 500;
} | conditional_block |
menudata.js | /*
@licstart The following is the entire license notice for the
JavaScript code in this file.
Copyright (C) 1997-2019 by Dimitri van Heesch | This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
@licend The above is the entire license notice
for the JavaScript code in this file
*/
var menudata={children:[
{text:"Introduction",url:"index.html"},
{text:"Tutorial",url:"quick_guide.html"},
{text:"Guides",url:"pages.html"},
{text:"Reference",url:"modules.html"},
{text:"Files",url:"files.html"}]} |
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as published by
the Free Software Foundation
| random_line_split |
poly.py | #!/usr/bin/env python
from sys import path
import os.path
thisrep = os.path.dirname(os.path.abspath(__file__))
path.append(os.path.dirname(thisrep))
from random import randint
from pygame import *
from pygame import gfxdraw
from EasyGame import pathgetter,confirm
controls = """hold the left mouse button to draw
d = undo
s = save"""
scr = display.set_mode((800,800))
confirm(controls,fontsize=14,mode=1)
a = []
c = []
color = [randint(0,255) for i in (1,2,3)]+[50]
while 1:
ev = event.wait()
if ev.type == MOUSEBUTTONDOWN and ev.button == 1:
a.append([ev.pos])
c.append(color)
if ev.type == MOUSEMOTION and ev.buttons[0]:
|
if ev.type == MOUSEBUTTONUP and ev.button == 1:
if len(a[-1]) >= 2:
draw.aaline(scr,color,a[-1][0],a[-1][-1],1)
gfxdraw.filled_polygon(scr,a[-1],color)
display.flip()
color = [randint(0,255) for i in (1,2,3)]+[50]
if ev.type == QUIT: break
if ev.type == KEYDOWN and ev.key == K_s:
p = pathgetter()
if p: image.save(scr,p)
if ev.type == KEYDOWN and ev.key == K_d and a:
a.pop()
c.pop()
scr.fill(0)
for lines,color in zip(a,c):
draw.aalines(scr,color,1,lines,1)
gfxdraw.filled_polygon(scr,lines,color)
display.flip()
if ev.type == KEYDOWN and ev.key == K_p:
a = [[(x//10*10,y//10*10) for x,y in i] for i in a]
scr.fill(0)
for lines,col in zip(a,c):
if len(lines) > 1:
draw.aalines(scr,col,1,lines,1)
gfxdraw.filled_polygon(scr,lines,col)
display.flip()
| a[-1].append(ev.pos)
if len(a[-1]) >= 2:
draw.aaline(scr,color,a[-1][-1],a[-1][-2],1)
display.flip() | conditional_block |
poly.py | #!/usr/bin/env python
from sys import path
import os.path
thisrep = os.path.dirname(os.path.abspath(__file__))
path.append(os.path.dirname(thisrep))
from random import randint
from pygame import *
from pygame import gfxdraw
from EasyGame import pathgetter,confirm
controls = """hold the left mouse button to draw
d = undo
s = save"""
scr = display.set_mode((800,800))
confirm(controls,fontsize=14,mode=1)
a = []
c = []
color = [randint(0,255) for i in (1,2,3)]+[50]
while 1:
ev = event.wait()
if ev.type == MOUSEBUTTONDOWN and ev.button == 1:
a.append([ev.pos])
c.append(color)
if ev.type == MOUSEMOTION and ev.buttons[0]:
a[-1].append(ev.pos)
if len(a[-1]) >= 2: | display.flip()
if ev.type == MOUSEBUTTONUP and ev.button == 1:
if len(a[-1]) >= 2:
draw.aaline(scr,color,a[-1][0],a[-1][-1],1)
gfxdraw.filled_polygon(scr,a[-1],color)
display.flip()
color = [randint(0,255) for i in (1,2,3)]+[50]
if ev.type == QUIT: break
if ev.type == KEYDOWN and ev.key == K_s:
p = pathgetter()
if p: image.save(scr,p)
if ev.type == KEYDOWN and ev.key == K_d and a:
a.pop()
c.pop()
scr.fill(0)
for lines,color in zip(a,c):
draw.aalines(scr,color,1,lines,1)
gfxdraw.filled_polygon(scr,lines,color)
display.flip()
if ev.type == KEYDOWN and ev.key == K_p:
a = [[(x//10*10,y//10*10) for x,y in i] for i in a]
scr.fill(0)
for lines,col in zip(a,c):
if len(lines) > 1:
draw.aalines(scr,col,1,lines,1)
gfxdraw.filled_polygon(scr,lines,col)
display.flip() | draw.aaline(scr,color,a[-1][-1],a[-1][-2],1) | random_line_split |
add_multi_asset_responsive_display_ad.py | #!/usr/bin/env python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a responsive display ad to an ad group.
Image assets are uploaded using AssetService. To get ad groups, run
get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
import requests
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def UploadImageAsset(client, url):
"""Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201809')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId']
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809')
# Create the ad.
multi_asset_responsive_display_ad = {
'xsi_type': 'MultiAssetResponsiveDisplayAd',
'headlines': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Mars'
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Jupiter',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Pluto'
}
}],
'descriptions': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'See the planet in style.',
}
}],
'businessName': 'Galactic Luxury Cruises',
'longHeadline': {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
},
# This ad format does not allow the creation of an image asset by setting
# the asset.imageData field. An image asset must first be created using
# the AssetService, and asset.assetId must be populated when creating
# the ad.
'marketingImages': [{ | }],
'squareMarketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}],
# Optional values
'finalUrls': ['http://www.example.com'],
'callToActionText': 'Shop Now',
# Set color settings using hexadecimal values. Set allowFlexibleColor to
# false if you want your ads to render by always using your colors
# strictly.
'mainColor': '#0000ff',
'accentColor': '#ffff00',
'allowFlexibleColor': False,
'formatSetting': 'NON_NATIVE',
# Set dynamic display ad settings, composed of landscape logo image,
# promotion text, and price prefix.
'dynamicSettingsPricePrefix': 'as low as',
'dynamicSettingsPromoText': 'Free shipping!',
'logoImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}]
}
# Create ad group ad.
ad_group_ad = {
'adGroupId': ad_group_id,
'ad': multi_asset_responsive_display_ad,
# Optional.
'status': 'PAUSED'
}
# Add ad.
ads = ad_group_ad_service.mutate([
{'operator': 'ADD', 'operand': ad_group_ad}
])
# Display results.
if 'value' in ads:
for ad in ads['value']:
print ('Added new responsive display ad ad with ID "%d" '
'and long headline "%s".'
% (ad['ad']['id'], ad['ad']['longHeadline']['asset']['assetText']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID) | 'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/3b9Wfh')
} | random_line_split |
add_multi_asset_responsive_display_ad.py | #!/usr/bin/env python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a responsive display ad to an ad group.
Image assets are uploaded using AssetService. To get ad groups, run
get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
import requests
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def UploadImageAsset(client, url):
"""Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201809')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId']
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809')
# Create the ad.
multi_asset_responsive_display_ad = {
'xsi_type': 'MultiAssetResponsiveDisplayAd',
'headlines': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Mars'
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Jupiter',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Pluto'
}
}],
'descriptions': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'See the planet in style.',
}
}],
'businessName': 'Galactic Luxury Cruises',
'longHeadline': {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
},
# This ad format does not allow the creation of an image asset by setting
# the asset.imageData field. An image asset must first be created using
# the AssetService, and asset.assetId must be populated when creating
# the ad.
'marketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/3b9Wfh')
}
}],
'squareMarketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}],
# Optional values
'finalUrls': ['http://www.example.com'],
'callToActionText': 'Shop Now',
# Set color settings using hexadecimal values. Set allowFlexibleColor to
# false if you want your ads to render by always using your colors
# strictly.
'mainColor': '#0000ff',
'accentColor': '#ffff00',
'allowFlexibleColor': False,
'formatSetting': 'NON_NATIVE',
# Set dynamic display ad settings, composed of landscape logo image,
# promotion text, and price prefix.
'dynamicSettingsPricePrefix': 'as low as',
'dynamicSettingsPromoText': 'Free shipping!',
'logoImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}]
}
# Create ad group ad.
ad_group_ad = {
'adGroupId': ad_group_id,
'ad': multi_asset_responsive_display_ad,
# Optional.
'status': 'PAUSED'
}
# Add ad.
ads = ad_group_ad_service.mutate([
{'operator': 'ADD', 'operand': ad_group_ad}
])
# Display results.
if 'value' in ads:
for ad in ads['value']:
print ('Added new responsive display ad ad with ID "%d" '
'and long headline "%s".'
% (ad['ad']['id'], ad['ad']['longHeadline']['asset']['assetText']))
else:
|
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| print 'No ads were added.' | conditional_block |
add_multi_asset_responsive_display_ad.py | #!/usr/bin/env python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a responsive display ad to an ad group.
Image assets are uploaded using AssetService. To get ad groups, run
get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
import requests
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def UploadImageAsset(client, url):
"""Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201809')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId']
def | (client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809')
# Create the ad.
multi_asset_responsive_display_ad = {
'xsi_type': 'MultiAssetResponsiveDisplayAd',
'headlines': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Mars'
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Jupiter',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Pluto'
}
}],
'descriptions': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'See the planet in style.',
}
}],
'businessName': 'Galactic Luxury Cruises',
'longHeadline': {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
},
# This ad format does not allow the creation of an image asset by setting
# the asset.imageData field. An image asset must first be created using
# the AssetService, and asset.assetId must be populated when creating
# the ad.
'marketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/3b9Wfh')
}
}],
'squareMarketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}],
# Optional values
'finalUrls': ['http://www.example.com'],
'callToActionText': 'Shop Now',
# Set color settings using hexadecimal values. Set allowFlexibleColor to
# false if you want your ads to render by always using your colors
# strictly.
'mainColor': '#0000ff',
'accentColor': '#ffff00',
'allowFlexibleColor': False,
'formatSetting': 'NON_NATIVE',
# Set dynamic display ad settings, composed of landscape logo image,
# promotion text, and price prefix.
'dynamicSettingsPricePrefix': 'as low as',
'dynamicSettingsPromoText': 'Free shipping!',
'logoImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}]
}
# Create ad group ad.
ad_group_ad = {
'adGroupId': ad_group_id,
'ad': multi_asset_responsive_display_ad,
# Optional.
'status': 'PAUSED'
}
# Add ad.
ads = ad_group_ad_service.mutate([
{'operator': 'ADD', 'operand': ad_group_ad}
])
# Display results.
if 'value' in ads:
for ad in ads['value']:
print ('Added new responsive display ad ad with ID "%d" '
'and long headline "%s".'
% (ad['ad']['id'], ad['ad']['longHeadline']['asset']['assetText']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| main | identifier_name |
add_multi_asset_responsive_display_ad.py | #!/usr/bin/env python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a responsive display ad to an ad group.
Image assets are uploaded using AssetService. To get ad groups, run
get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
import requests
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def UploadImageAsset(client, url):
|
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809')
# Create the ad.
multi_asset_responsive_display_ad = {
'xsi_type': 'MultiAssetResponsiveDisplayAd',
'headlines': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Mars'
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Jupiter',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Pluto'
}
}],
'descriptions': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'See the planet in style.',
}
}],
'businessName': 'Galactic Luxury Cruises',
'longHeadline': {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
},
# This ad format does not allow the creation of an image asset by setting
# the asset.imageData field. An image asset must first be created using
# the AssetService, and asset.assetId must be populated when creating
# the ad.
'marketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/3b9Wfh')
}
}],
'squareMarketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}],
# Optional values
'finalUrls': ['http://www.example.com'],
'callToActionText': 'Shop Now',
# Set color settings using hexadecimal values. Set allowFlexibleColor to
# false if you want your ads to render by always using your colors
# strictly.
'mainColor': '#0000ff',
'accentColor': '#ffff00',
'allowFlexibleColor': False,
'formatSetting': 'NON_NATIVE',
# Set dynamic display ad settings, composed of landscape logo image,
# promotion text, and price prefix.
'dynamicSettingsPricePrefix': 'as low as',
'dynamicSettingsPromoText': 'Free shipping!',
'logoImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}]
}
# Create ad group ad.
ad_group_ad = {
'adGroupId': ad_group_id,
'ad': multi_asset_responsive_display_ad,
# Optional.
'status': 'PAUSED'
}
# Add ad.
ads = ad_group_ad_service.mutate([
{'operator': 'ADD', 'operand': ad_group_ad}
])
# Display results.
if 'value' in ads:
for ad in ads['value']:
print ('Added new responsive display ad ad with ID "%d" '
'and long headline "%s".'
% (ad['ad']['id'], ad['ad']['longHeadline']['asset']['assetText']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| """Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201809')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId'] | identifier_body |
content-object-list.component.spec.ts | import { Observable, of } from 'rxjs';
import { ContentService, FileUrlParameters } from '../shared/content.service';
import { ContentItem } from '../shared/model/content-item';
import { Config, ContentConfig } from '../../core/shared/model/config';
import { ContentObjectListComponent } from './content-object-list.component';
import { ComponentFixture, TestBed } from '@angular/core/testing';
import { Field } from '../../core/shared/model/field';
import { User } from '../../user/shared/user';
import { MatButtonModule } from '@angular/material/button';
import { MatSnackBar } from '@angular/material/snack-bar';
import { MatTooltipModule } from '@angular/material/tooltip';
import { FormBuilder, FormGroup, FormsModule, ReactiveFormsModule } from '@angular/forms';
import { ActivatedRoute } from '@angular/router';
import { UserService } from '../../user/shared/user.service';
import { Title } from '@angular/platform-browser';
import { LiveAnnouncer } from '@angular/cdk/a11y';
import { MaterialConfigModule } from '../../routing/material-config.module';
import { FileUploadComponent } from '../../shared/widgets/file-upload/file-upload.component';
import { TruncatePipe } from '../../shared/pipes/truncate.pipe';
import { ActivatedRouteStub } from '../../../testing/router-stubs';
import { RouterTestingModule } from '@angular/router/testing';
import { ContentPageConfig } from '../../core/shared/model/content-page-config';
import { NoopAnimationsModule } from '@angular/platform-browser/animations';
import { HttpClientModule } from '@angular/common/http';
import { FocusDirective } from '../../shared/directives/focus/focus.directive';
import { FieldOption } from '../../core/shared/model/field/field-option';
import { NotificationService } from '../../shared/providers/notification.service';
import { first } from 'rxjs/operators';
const testFile = new File(['This is a test file'], 'test.pdf', { type: 'application/pdf' });
const testFormData = {
metadata: {
1: 'asdf',
},
};
class | extends ContentService {
constructor() {
super(null, null, null);
}
create(contentItem: ContentItem, file: File): Observable<ContentItem> {
return of(contentItem);
}
getFileUrl({ itemId, webViewable, useOriginalFilename, disposition }: FileUrlParameters): string {
return 'testUrl/' + itemId;
}
}
class MockUserService extends UserService {
constructor() {
super(null, null, null);
}
getUser(): User {
return new User('testUser');
}
}
describe('ContentObjectList', () => {
let mockContentService: ContentService;
let mockUserService: MockUserService;
let activatedRoute: ActivatedRouteStub;
let component: ContentObjectListComponent;
let fixture: ComponentFixture<ContentObjectListComponent>;
const sourceItem: ContentItem = null;
const config = new Config();
config.contentConfig = new ContentConfig();
config.contentConfig.profile = 'testProfile';
const formModel = {
metadata: {
1: 'test',
},
};
const field = new Field();
field.key = '1';
const fields = new Array<Field>();
fields.push(field);
const user = new User('testUser');
beforeEach(() => {
activatedRoute = new ActivatedRouteStub();
mockContentService = new MockContentService();
mockUserService = new MockUserService();
TestBed.configureTestingModule({
imports: [
FormsModule,
HttpClientModule,
MaterialConfigModule,
MatButtonModule,
MatTooltipModule,
ReactiveFormsModule,
RouterTestingModule,
NoopAnimationsModule,
],
providers: [
{ provide: ActivatedRoute, useValue: activatedRoute },
{ provide: ContentService, useValue: mockContentService },
{ provide: UserService, useValue: mockUserService },
Title,
FormBuilder,
LiveAnnouncer,
MatSnackBar,
NotificationService,
],
declarations: [FileUploadComponent, ContentObjectListComponent, TruncatePipe, FocusDirective],
}).compileComponents();
fixture = TestBed.createComponent(ContentObjectListComponent);
component = fixture.componentInstance;
component.formGroup = new FormGroup({});
component.contentItem = undefined;
component.page = undefined;
const editPageConfig = new ContentPageConfig();
editPageConfig.pageName = 'test-edit-page';
editPageConfig.fieldsToDisplay = [
Object.assign(new Field(), { key: '1', label: 'First' }),
Object.assign(new Field(), { key: '2', label: 'Second' }),
Object.assign(new Field(), { key: '3', label: 'Third' }),
Object.assign(new Field(), { key: 'a', label: 'a' }),
Object.assign(new Field(), { key: 'd', label: 'd', displayType: 'date' }),
Object.assign(new Field(), {
key: 't',
label: 't',
displayType: 'autocomplete',
options: [new FieldOption('o1'), new FieldOption('o2'), new FieldOption('o3')],
}),
];
editPageConfig.viewPanel = false;
config.pages['edit'] = editPageConfig;
activatedRoute.testData = { config: config };
component.ngOnInit();
fixture.detectChanges();
});
it('should be created', () => {
expect(component).toBeTruthy();
});
it('should correctly populate the profileId when preparing to save', () => {
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user);
expect(contentItem.metadata['ProfileId']).toBe('testProfile');
});
it('should populate the account when preparing to save', () => {
config.contentConfig.account = 'testAccount';
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user);
expect(contentItem.metadata['Account']).toBe('testAccount');
});
it('should populate the account replacing user template when preparing to save', () => {
config.contentConfig.account = 'testAccount/${user}';
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user);
expect(contentItem.metadata['Account']).toBe('testAccount/testUser');
});
it('should add the specified metadata overrides when preparing to save', () => {
const metadataOverrides = [
{ name: 'PublishStatus', value: 'Published' },
{ name: 'AnotherOnSave', value: 'Value' },
];
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user, metadataOverrides);
expect(contentItem.metadata['PublishStatus']).toBe('Published');
expect(contentItem.metadata['AnotherOnSave']).toBe('Value');
});
it('should create a content object with an initialized contentItemUrl', () => {
const contentItem = new ContentItem();
contentItem.id = '123';
const index = component.addItem(contentItem);
const contentObject = component.contentObjects[index];
component.onDisplayType(contentObject, 'anyvalue');
expect(contentObject.url).toBe('testUrl/123');
});
it('should add a file to the transaction and select the object', () => {
const properties = {
type: 'application/pdf',
};
const file = new File(['This is a test file'], 'test.pdf', properties);
const index = component.addFile(file);
expect(component.contentObjects.length).toBe(1);
component.removeObject(undefined, index);
});
it('should replace a file on a persisted content item', () => {
const properties = {
type: 'application/pdf',
};
const item = new ContentItem();
item.id = '123';
item.metadata = new Map<string, string>();
item.metadata.set('MimeType', 'application/pdf');
item.metadata.set('FileSize', '2000');
item.metadata.set('OriginalFileName', 'file123.pdf');
const index = component.addItem(item);
const co = component.contentObjects[index];
const file = new File(['This is a test file'], 'test.pdf', properties);
component.replace(co, file);
expect(component.contentObjects.length).toBe(1);
expect(co.file).toBe(file);
expect(co.url).toBe('testUrl/123');
component.removeContentObject(index);
});
it('should create item on save', () => {
const properties = {
type: 'application/pdf',
};
const file = new File(['This is a test file'], 'test.pdf', properties);
component.addFile(file);
expect(component.contentObjects.length).toBe(1);
const formData = {
metadata: {
1: 'asdf',
},
};
const metadataOverrides = new Array<any>();
const mockContentServiceCreate = spyOn(mockContentService, 'create');
component.saveItem(fields, formData, metadataOverrides);
expect(mockContentServiceCreate).toHaveBeenCalled();
});
/**
* Addresses bug CAB-4036: New revision was created sometimes when only metadata was changed
*/
it('should update item and clear file on save', (done: DoneFn) => {
const contentItem = new ContentItem();
const metadataOverrides = new Array<any>();
const mockContentServiceUpdate = spyOn(mockContentService, 'update').and.returnValue(of(contentItem));
// Setup a document update by loading the content item and replacing the file.
component.addItem(contentItem);
component.replace(component.contentObjects[0], testFile);
// Update the document.
component.saveItem(fields, testFormData, metadataOverrides);
// Subscribe to event when saving is completed (can be done here because 'saveItem' is asynchronous).
const subscription = component.saving.pipe(first((val) => !val)).subscribe(() => {
subscription.unsubscribe();
// Verify the replaced file is supplied to service.
expect(mockContentServiceUpdate).toHaveBeenCalledWith(jasmine.anything(), testFile);
// Update document again, verify that file is NOT supplied to service.
mockContentServiceUpdate.calls.reset();
component.saveItem(fields, testFormData, metadataOverrides);
expect(mockContentServiceUpdate).toHaveBeenCalledWith(jasmine.anything(), null);
done();
});
});
it('should emit saving event on save', () => {
const formData = {
metadata: {
1: 'asdf',
},
};
const metadataOverrides = new Array<any>();
spyOn(component.saving, 'emit');
component.saveItem(fields, formData, metadataOverrides);
expect(component.saving.emit).toHaveBeenCalledWith(true);
});
});
| MockContentService | identifier_name |
content-object-list.component.spec.ts | import { Observable, of } from 'rxjs';
import { ContentService, FileUrlParameters } from '../shared/content.service';
import { ContentItem } from '../shared/model/content-item';
import { Config, ContentConfig } from '../../core/shared/model/config';
import { ContentObjectListComponent } from './content-object-list.component';
import { ComponentFixture, TestBed } from '@angular/core/testing';
import { Field } from '../../core/shared/model/field';
import { User } from '../../user/shared/user';
import { MatButtonModule } from '@angular/material/button';
import { MatSnackBar } from '@angular/material/snack-bar';
import { MatTooltipModule } from '@angular/material/tooltip';
import { FormBuilder, FormGroup, FormsModule, ReactiveFormsModule } from '@angular/forms';
import { ActivatedRoute } from '@angular/router';
import { UserService } from '../../user/shared/user.service';
import { Title } from '@angular/platform-browser';
import { LiveAnnouncer } from '@angular/cdk/a11y';
import { MaterialConfigModule } from '../../routing/material-config.module';
import { FileUploadComponent } from '../../shared/widgets/file-upload/file-upload.component';
import { TruncatePipe } from '../../shared/pipes/truncate.pipe';
import { ActivatedRouteStub } from '../../../testing/router-stubs';
import { RouterTestingModule } from '@angular/router/testing';
import { ContentPageConfig } from '../../core/shared/model/content-page-config';
import { NoopAnimationsModule } from '@angular/platform-browser/animations';
import { HttpClientModule } from '@angular/common/http';
import { FocusDirective } from '../../shared/directives/focus/focus.directive';
import { FieldOption } from '../../core/shared/model/field/field-option';
import { NotificationService } from '../../shared/providers/notification.service';
import { first } from 'rxjs/operators';
const testFile = new File(['This is a test file'], 'test.pdf', { type: 'application/pdf' });
const testFormData = {
metadata: {
1: 'asdf',
},
};
class MockContentService extends ContentService {
constructor() {
super(null, null, null);
}
create(contentItem: ContentItem, file: File): Observable<ContentItem> {
return of(contentItem);
} | }
class MockUserService extends UserService {
constructor() {
super(null, null, null);
}
getUser(): User {
return new User('testUser');
}
}
describe('ContentObjectList', () => {
let mockContentService: ContentService;
let mockUserService: MockUserService;
let activatedRoute: ActivatedRouteStub;
let component: ContentObjectListComponent;
let fixture: ComponentFixture<ContentObjectListComponent>;
const sourceItem: ContentItem = null;
const config = new Config();
config.contentConfig = new ContentConfig();
config.contentConfig.profile = 'testProfile';
const formModel = {
metadata: {
1: 'test',
},
};
const field = new Field();
field.key = '1';
const fields = new Array<Field>();
fields.push(field);
const user = new User('testUser');
beforeEach(() => {
activatedRoute = new ActivatedRouteStub();
mockContentService = new MockContentService();
mockUserService = new MockUserService();
TestBed.configureTestingModule({
imports: [
FormsModule,
HttpClientModule,
MaterialConfigModule,
MatButtonModule,
MatTooltipModule,
ReactiveFormsModule,
RouterTestingModule,
NoopAnimationsModule,
],
providers: [
{ provide: ActivatedRoute, useValue: activatedRoute },
{ provide: ContentService, useValue: mockContentService },
{ provide: UserService, useValue: mockUserService },
Title,
FormBuilder,
LiveAnnouncer,
MatSnackBar,
NotificationService,
],
declarations: [FileUploadComponent, ContentObjectListComponent, TruncatePipe, FocusDirective],
}).compileComponents();
fixture = TestBed.createComponent(ContentObjectListComponent);
component = fixture.componentInstance;
component.formGroup = new FormGroup({});
component.contentItem = undefined;
component.page = undefined;
const editPageConfig = new ContentPageConfig();
editPageConfig.pageName = 'test-edit-page';
editPageConfig.fieldsToDisplay = [
Object.assign(new Field(), { key: '1', label: 'First' }),
Object.assign(new Field(), { key: '2', label: 'Second' }),
Object.assign(new Field(), { key: '3', label: 'Third' }),
Object.assign(new Field(), { key: 'a', label: 'a' }),
Object.assign(new Field(), { key: 'd', label: 'd', displayType: 'date' }),
Object.assign(new Field(), {
key: 't',
label: 't',
displayType: 'autocomplete',
options: [new FieldOption('o1'), new FieldOption('o2'), new FieldOption('o3')],
}),
];
editPageConfig.viewPanel = false;
config.pages['edit'] = editPageConfig;
activatedRoute.testData = { config: config };
component.ngOnInit();
fixture.detectChanges();
});
it('should be created', () => {
expect(component).toBeTruthy();
});
it('should correctly populate the profileId when preparing to save', () => {
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user);
expect(contentItem.metadata['ProfileId']).toBe('testProfile');
});
it('should populate the account when preparing to save', () => {
config.contentConfig.account = 'testAccount';
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user);
expect(contentItem.metadata['Account']).toBe('testAccount');
});
it('should populate the account replacing user template when preparing to save', () => {
config.contentConfig.account = 'testAccount/${user}';
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user);
expect(contentItem.metadata['Account']).toBe('testAccount/testUser');
});
it('should add the specified metadata overrides when preparing to save', () => {
const metadataOverrides = [
{ name: 'PublishStatus', value: 'Published' },
{ name: 'AnotherOnSave', value: 'Value' },
];
const contentItem = component.prepareItem(sourceItem, fields, formModel, config, user, metadataOverrides);
expect(contentItem.metadata['PublishStatus']).toBe('Published');
expect(contentItem.metadata['AnotherOnSave']).toBe('Value');
});
it('should create a content object with an initialized contentItemUrl', () => {
const contentItem = new ContentItem();
contentItem.id = '123';
const index = component.addItem(contentItem);
const contentObject = component.contentObjects[index];
component.onDisplayType(contentObject, 'anyvalue');
expect(contentObject.url).toBe('testUrl/123');
});
it('should add a file to the transaction and select the object', () => {
const properties = {
type: 'application/pdf',
};
const file = new File(['This is a test file'], 'test.pdf', properties);
const index = component.addFile(file);
expect(component.contentObjects.length).toBe(1);
component.removeObject(undefined, index);
});
it('should replace a file on a persisted content item', () => {
const properties = {
type: 'application/pdf',
};
const item = new ContentItem();
item.id = '123';
item.metadata = new Map<string, string>();
item.metadata.set('MimeType', 'application/pdf');
item.metadata.set('FileSize', '2000');
item.metadata.set('OriginalFileName', 'file123.pdf');
const index = component.addItem(item);
const co = component.contentObjects[index];
const file = new File(['This is a test file'], 'test.pdf', properties);
component.replace(co, file);
expect(component.contentObjects.length).toBe(1);
expect(co.file).toBe(file);
expect(co.url).toBe('testUrl/123');
component.removeContentObject(index);
});
it('should create item on save', () => {
const properties = {
type: 'application/pdf',
};
const file = new File(['This is a test file'], 'test.pdf', properties);
component.addFile(file);
expect(component.contentObjects.length).toBe(1);
const formData = {
metadata: {
1: 'asdf',
},
};
const metadataOverrides = new Array<any>();
const mockContentServiceCreate = spyOn(mockContentService, 'create');
component.saveItem(fields, formData, metadataOverrides);
expect(mockContentServiceCreate).toHaveBeenCalled();
});
/**
* Addresses bug CAB-4036: New revision was created sometimes when only metadata was changed
*/
it('should update item and clear file on save', (done: DoneFn) => {
const contentItem = new ContentItem();
const metadataOverrides = new Array<any>();
const mockContentServiceUpdate = spyOn(mockContentService, 'update').and.returnValue(of(contentItem));
// Setup a document update by loading the content item and replacing the file.
component.addItem(contentItem);
component.replace(component.contentObjects[0], testFile);
// Update the document.
component.saveItem(fields, testFormData, metadataOverrides);
// Subscribe to event when saving is completed (can be done here because 'saveItem' is asynchronous).
const subscription = component.saving.pipe(first((val) => !val)).subscribe(() => {
subscription.unsubscribe();
// Verify the replaced file is supplied to service.
expect(mockContentServiceUpdate).toHaveBeenCalledWith(jasmine.anything(), testFile);
// Update document again, verify that file is NOT supplied to service.
mockContentServiceUpdate.calls.reset();
component.saveItem(fields, testFormData, metadataOverrides);
expect(mockContentServiceUpdate).toHaveBeenCalledWith(jasmine.anything(), null);
done();
});
});
it('should emit saving event on save', () => {
const formData = {
metadata: {
1: 'asdf',
},
};
const metadataOverrides = new Array<any>();
spyOn(component.saving, 'emit');
component.saveItem(fields, formData, metadataOverrides);
expect(component.saving.emit).toHaveBeenCalledWith(true);
});
}); |
getFileUrl({ itemId, webViewable, useOriginalFilename, disposition }: FileUrlParameters): string {
return 'testUrl/' + itemId;
} | random_line_split |
sim.worker.jetstream.js | /*
6 sectors * 512 lines * 60 vertices, t=threads, m=stamps
jetstream t: 1 m: 10 ms: 15651
jetstream t: 2 m: 10 ms: 9348
jetstream t: 3 m: 10 ms: 8583
jetstream t: 4 m: 10 ms: 7969
jetstream t: 5 m: 10 ms: 7891
jetstream t: 6 m: 10 ms: 8037
jetstream t: 7 m: 10 ms: 7981
jetstream t: 8 m: 10 ms: 8594
jetstream t: 9 m: 10 ms: 9060
jetstream t: 10 m: 10 ms: 8797
*/
if( typeof importScripts === 'function') {
const
PI = Math.PI,
TAU = 2 * PI,
PI2 = PI / 2,
RADIUS = 1.0,
DEGRAD = PI / 180.0
;
var
name = 'jet.worker',
cfg, topics, doe, pool,
datagrams = {
ugrdprs: null,
vgrdprs: null
},
prelines = null, // pos, wid, col per sector
multilines = null, // lines per sector
sectors = null // with attributes per sector
;
function vec3toLat (v, radius) {return 90 - (Math.acos(v.y / radius)) * 180 / PI;}
function vec3toLon (v, radius) {return ((270 + (Math.atan2(v.x , v.z)) * 180 / PI) % 360);}
function vector3ToLatLong (v, radius) {
return {
lat: 90 - (Math.acos(v.y / radius)) * 180 / Math.PI,
lon: ((270 + (Math.atan2(v.x , v.z)) * 180 / Math.PI) % 360)
};
}
function latLonRadToVector3 (lat, lon, radius) {
var phi = lat * Math.PI / 180;
var theta = (lon - 180) * Math.PI / 180;
var x = -radius * Math.cos(phi) * Math.cos(theta);
var y = radius * Math.sin(phi);
var z = radius * Math.cos(phi) * Math.sin(theta);
return new Vector3(x, y, z);
}
function filterPool (sector, amount) {
var i, j = 0, coord, out = [], len = pool.length;
for (i=0; j<amount && i<len; i++) {
coord = pool[i];
if (
coord.lat < sector[0] &&
coord.lon > sector[1] &&
coord.lat > sector[2] &&
coord.lon < sector[3]
) {
out.push(coord);
j += 1;
}
}
return out;
}
function onmessage (event) {
var
id = event.data.id,
topic = event.data.topic,
payload = event.data.payload,
callback = function (id, result, transferables) {
postMessage({id, result}, transferables);
}
;
if (topics[topic]) {
topics[topic](id, payload, callback);
} else {
console.warn(name + ': unknown topic', topic);
}
}
topics = {
importScripts: function (id, payload, callback) {
importScripts.apply(null, payload.scripts);
callback(id, null);
},
retrieve: function (id, payload, callback) {
var datagramm;
cfg = payload.cfg;
doe = payload.doe;
pool = payload.pool;
RES.load({ urls: payload.urls, onFinish: function (err, responses) {
if (err) { throw err } else {
responses.forEach(function (response) {
datagramm = new SIM.Datagram(response.data);
datagrams[datagramm.vari] = datagramm;
});
topics.prepare(id, payload, function () {
topics.process(id, payload, function () {
topics.combine(id, payload, function (id, result, transferables) {
callback(id, result, transferables)
});
});
});
}
}});
},
prepare: function (id, payload, callback) {
var
t0 = Date.now(),
i, j, u, v, speed, width, pool, lat, lon, color, vec3, seeds, positions, widths, colors, seeds,
sat = 0.4,
spcl = new Spherical(),
length = cfg.length,
amount = NaN,
filler = () => [],
counter = (a, b) => a + b.positions.length
;
// over sectors
prelines = cfg.sim.sectors.map( sector => {
seeds = [];
pool = filterPool(sector, cfg.amount);
amount = pool.length;
positions = new Array(amount).fill(0).map(filler);
colors = new Array(amount).fill(0).map(filler);
widths = new Array(amount).fill(0).map(filler);
// over lines
for (i=0; i<amount; i++) {
lat = pool[i].lat;
lon = pool[i].lon;
vec3 = latLonRadToVector3(lat, lon, cfg.radius);
// keep start point
seeds.push(vec3.x, vec3.y, vec3.z);
// over vertices
for (j=0; j<length; j++) {
u = datagrams.ugrdprs.linearXY(doe, lat, lon);
v = datagrams.vgrdprs.linearXY(doe, lat, lon);
speed = Math.hypot(u, v);
u /= Math.cos(lat * DEGRAD);
color = new Color().setHSL(cfg.hue, sat, speed / 100);
width = H.clampScale(speed, 0, 50, 0.5, 2.0);
| widths[i].push(width);
spcl.setFromVector3(vec3);
spcl.theta += u * cfg.factor; // east-direction
spcl.phi -= v * cfg.factor; // north-direction
vec3 = vec3.setFromSpherical(spcl).clone();
lat = vec3toLat(vec3, cfg.radius);
lon = vec3toLon(vec3, cfg.radius);
}
}
return { seeds: new Float32Array(seeds), positions, colors, widths };
});
// debugger;
// console.log(name + ': prepare', id, Date.now() - t0, prelines.reduce(counter, 0));
callback(id, {}, [])
},
process: function (id, payload, callback) {
var
t0 = Date.now(),
counter = (a, b) => a + b.length
;
multilines = prelines.map(preline => {
var
idx = 0,
multiline = H.zip(
preline.positions,
preline.colors,
preline.widths,
(vectors, colors, widths) => new Multiline(idx++, vectors, colors, widths)
)
;
multiline.seeds = preline.seeds;
return multiline;
});
// debugger;
// console.log(name + ': process', id, Date.now() - t0, multilines.reduce(counter, 0));
callback(id, {}, []);
},
combine: function (id, payload, callback) {
var
transferables,
attributeTypes = {
colors: Float32Array,
index: Uint16Array,
lineIndex: Float32Array,
next: Float32Array,
position: Float32Array,
previous: Float32Array,
side: Float32Array,
width: Float32Array,
},
textures = {
u: datagrams.ugrdprs.data[doe],
v: datagrams.vgrdprs.data[doe],
}
;
// debugger;
// over sectors (n=6)
sectors = multilines.map( lines => {
var
length,
attributes = {},
uniforms = {
seeds: lines.seeds
}
;
delete lines.seeds;
// prepare attributes
H.each(attributeTypes, (name, type) => {
length = lines[0].attributes[name].length * lines.length;
attributes[name] = new type(length);
});
// over attributes (n=8)
H.each(attributeTypes, (name) => {
// debugger;
// if (name === 'seeds') { return; }
var
i, source, length,
pointer = 0,
indexOffset = 0,
positLength = lines[0].attributes['position'].length / 3,
target = attributes[name]
;
// over lines (n=512)
H.each(lines, (_, line) => {
source = line.attributes[name];
length = source.length;
if (name === 'index'){
for (i=0; i<length; i++) {
target[pointer + i] = source[i] + indexOffset;
}
} else if (name !== 'seeds') {
for (i=0; i<length; i++) {
target[pointer + i] = source[i];
}
}
pointer += length;
indexOffset += positLength;
});
});
return { attributes, uniforms };
});
// finish transferables
transferables = [textures.u.buffer, textures.v.buffer];
H.each(sectors, (_, sector) => {
transferables.push(sector.uniforms.seeds.buffer);
H.each(attributeTypes, (name) => {
transferables.push(sector.attributes[name].buffer);
});
});
// TODO: check edge + transferable
callback(id, {sectors, textures}, transferables);
}
};
}
function Multiline ( idx, vertices, colors, widths ) {
this.idx = idx;
this.index = [];
this.lineIndex = [];
this.next = [];
this.positions = [];
this.previous = [];
this.side = [];
this.widths = [];
this.colors = [];
this.length = vertices.length;
this.init(vertices, colors, widths);
this.process();
// TODO: Needed? ~15% faster
this.attributes = {
index: new Uint16Array( this.index ),
lineIndex: new Float32Array( this.lineIndex ),
next: new Float32Array( this.next ),
position: new Float32Array( this.positions ),
previous: new Float32Array( this.previous ),
side: new Float32Array( this.side ),
width: new Float32Array( this.widths ),
colors: new Float32Array( this.colors ),
}
};
Multiline.prototype = {
constructor: Multiline,
compareV3: function( a, b ) {
var aa = a * 6, ab = b * 6;
return (
( this.positions[ aa ] === this.positions[ ab ] ) &&
( this.positions[ aa + 1 ] === this.positions[ ab + 1 ] ) &&
( this.positions[ aa + 2 ] === this.positions[ ab + 2 ] )
);
},
copyV3: function( a ) {
var aa = a * 6;
return [ this.positions[ aa ], this.positions[ aa + 1 ], this.positions[ aa + 2 ] ];
},
init: function( vertices, colors, widths ) {
var j, ver, cnt, col, wid, n, len = this.length;
for( j = 0; j < len; j++ ) {
ver = vertices[ j ];
col = colors[ j ];
wid = widths[ j ];
cnt = j / vertices.length;
this.positions.push( ver.x, ver.y, ver.z );
this.positions.push( ver.x, ver.y, ver.z );
this.lineIndex.push(this.idx + cnt);
this.lineIndex.push(this.idx + cnt);
this.colors.push(col.r, col.g, col.b);
this.colors.push(col.r, col.g, col.b);
this.widths.push(wid);
this.widths.push(wid);
this.side.push( 1 );
this.side.push( -1 );
}
for( j = 0; j < len - 1; j++ ) {
n = j + j;
this.index.push( n, n + 1, n + 2 );
this.index.push( n + 2, n + 1, n + 3 );
}
},
process: function() {
var j, v, l = this.positions.length / 6;
v = this.compareV3( 0, l - 1 ) ? this.copyV3( l - 2 ) : this.copyV3( 0 ) ;
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
for( j = 0; j < l - 1; j++ ) {
v = this.copyV3( j );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
for( j = 1; j < l; j++ ) {
v = this.copyV3( j );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
v = this.compareV3( l - 1, 0 ) ? this.copyV3( 1 ) : this.copyV3( l - 1 ) ;
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
}; | positions[i].push(vec3);
colors[i].push(color); | random_line_split |
sim.worker.jetstream.js | /*
6 sectors * 512 lines * 60 vertices, t=threads, m=stamps
jetstream t: 1 m: 10 ms: 15651
jetstream t: 2 m: 10 ms: 9348
jetstream t: 3 m: 10 ms: 8583
jetstream t: 4 m: 10 ms: 7969
jetstream t: 5 m: 10 ms: 7891
jetstream t: 6 m: 10 ms: 8037
jetstream t: 7 m: 10 ms: 7981
jetstream t: 8 m: 10 ms: 8594
jetstream t: 9 m: 10 ms: 9060
jetstream t: 10 m: 10 ms: 8797
*/
if( typeof importScripts === 'function') {
const
PI = Math.PI,
TAU = 2 * PI,
PI2 = PI / 2,
RADIUS = 1.0,
DEGRAD = PI / 180.0
;
var
name = 'jet.worker',
cfg, topics, doe, pool,
datagrams = {
ugrdprs: null,
vgrdprs: null
},
prelines = null, // pos, wid, col per sector
multilines = null, // lines per sector
sectors = null // with attributes per sector
;
function vec3toLat (v, radius) {return 90 - (Math.acos(v.y / radius)) * 180 / PI;}
function vec3toLon (v, radius) {return ((270 + (Math.atan2(v.x , v.z)) * 180 / PI) % 360);}
function vector3ToLatLong (v, radius) {
return {
lat: 90 - (Math.acos(v.y / radius)) * 180 / Math.PI,
lon: ((270 + (Math.atan2(v.x , v.z)) * 180 / Math.PI) % 360)
};
}
function latLonRadToVector3 (lat, lon, radius) {
var phi = lat * Math.PI / 180;
var theta = (lon - 180) * Math.PI / 180;
var x = -radius * Math.cos(phi) * Math.cos(theta);
var y = radius * Math.sin(phi);
var z = radius * Math.cos(phi) * Math.sin(theta);
return new Vector3(x, y, z);
}
function filterPool (sector, amount) {
var i, j = 0, coord, out = [], len = pool.length;
for (i=0; j<amount && i<len; i++) {
coord = pool[i];
if (
coord.lat < sector[0] &&
coord.lon > sector[1] &&
coord.lat > sector[2] &&
coord.lon < sector[3]
) {
out.push(coord);
j += 1;
}
}
return out;
}
function onmessage (event) {
var
id = event.data.id,
topic = event.data.topic,
payload = event.data.payload,
callback = function (id, result, transferables) {
postMessage({id, result}, transferables);
}
;
if (topics[topic]) {
topics[topic](id, payload, callback);
} else {
console.warn(name + ': unknown topic', topic);
}
}
topics = {
importScripts: function (id, payload, callback) {
importScripts.apply(null, payload.scripts);
callback(id, null);
},
retrieve: function (id, payload, callback) {
var datagramm;
cfg = payload.cfg;
doe = payload.doe;
pool = payload.pool;
RES.load({ urls: payload.urls, onFinish: function (err, responses) {
if (err) { throw err } else {
responses.forEach(function (response) {
datagramm = new SIM.Datagram(response.data);
datagrams[datagramm.vari] = datagramm;
});
topics.prepare(id, payload, function () {
topics.process(id, payload, function () {
topics.combine(id, payload, function (id, result, transferables) {
callback(id, result, transferables)
});
});
});
}
}});
},
prepare: function (id, payload, callback) {
var
t0 = Date.now(),
i, j, u, v, speed, width, pool, lat, lon, color, vec3, seeds, positions, widths, colors, seeds,
sat = 0.4,
spcl = new Spherical(),
length = cfg.length,
amount = NaN,
filler = () => [],
counter = (a, b) => a + b.positions.length
;
// over sectors
prelines = cfg.sim.sectors.map( sector => {
seeds = [];
pool = filterPool(sector, cfg.amount);
amount = pool.length;
positions = new Array(amount).fill(0).map(filler);
colors = new Array(amount).fill(0).map(filler);
widths = new Array(amount).fill(0).map(filler);
// over lines
for (i=0; i<amount; i++) |
return { seeds: new Float32Array(seeds), positions, colors, widths };
});
// debugger;
// console.log(name + ': prepare', id, Date.now() - t0, prelines.reduce(counter, 0));
callback(id, {}, [])
},
process: function (id, payload, callback) {
var
t0 = Date.now(),
counter = (a, b) => a + b.length
;
multilines = prelines.map(preline => {
var
idx = 0,
multiline = H.zip(
preline.positions,
preline.colors,
preline.widths,
(vectors, colors, widths) => new Multiline(idx++, vectors, colors, widths)
)
;
multiline.seeds = preline.seeds;
return multiline;
});
// debugger;
// console.log(name + ': process', id, Date.now() - t0, multilines.reduce(counter, 0));
callback(id, {}, []);
},
combine: function (id, payload, callback) {
var
transferables,
attributeTypes = {
colors: Float32Array,
index: Uint16Array,
lineIndex: Float32Array,
next: Float32Array,
position: Float32Array,
previous: Float32Array,
side: Float32Array,
width: Float32Array,
},
textures = {
u: datagrams.ugrdprs.data[doe],
v: datagrams.vgrdprs.data[doe],
}
;
// debugger;
// over sectors (n=6)
sectors = multilines.map( lines => {
var
length,
attributes = {},
uniforms = {
seeds: lines.seeds
}
;
delete lines.seeds;
// prepare attributes
H.each(attributeTypes, (name, type) => {
length = lines[0].attributes[name].length * lines.length;
attributes[name] = new type(length);
});
// over attributes (n=8)
H.each(attributeTypes, (name) => {
// debugger;
// if (name === 'seeds') { return; }
var
i, source, length,
pointer = 0,
indexOffset = 0,
positLength = lines[0].attributes['position'].length / 3,
target = attributes[name]
;
// over lines (n=512)
H.each(lines, (_, line) => {
source = line.attributes[name];
length = source.length;
if (name === 'index'){
for (i=0; i<length; i++) {
target[pointer + i] = source[i] + indexOffset;
}
} else if (name !== 'seeds') {
for (i=0; i<length; i++) {
target[pointer + i] = source[i];
}
}
pointer += length;
indexOffset += positLength;
});
});
return { attributes, uniforms };
});
// finish transferables
transferables = [textures.u.buffer, textures.v.buffer];
H.each(sectors, (_, sector) => {
transferables.push(sector.uniforms.seeds.buffer);
H.each(attributeTypes, (name) => {
transferables.push(sector.attributes[name].buffer);
});
});
// TODO: check edge + transferable
callback(id, {sectors, textures}, transferables);
}
};
}
function Multiline ( idx, vertices, colors, widths ) {
this.idx = idx;
this.index = [];
this.lineIndex = [];
this.next = [];
this.positions = [];
this.previous = [];
this.side = [];
this.widths = [];
this.colors = [];
this.length = vertices.length;
this.init(vertices, colors, widths);
this.process();
// TODO: Needed? ~15% faster
this.attributes = {
index: new Uint16Array( this.index ),
lineIndex: new Float32Array( this.lineIndex ),
next: new Float32Array( this.next ),
position: new Float32Array( this.positions ),
previous: new Float32Array( this.previous ),
side: new Float32Array( this.side ),
width: new Float32Array( this.widths ),
colors: new Float32Array( this.colors ),
}
};
Multiline.prototype = {
constructor: Multiline,
compareV3: function( a, b ) {
var aa = a * 6, ab = b * 6;
return (
( this.positions[ aa ] === this.positions[ ab ] ) &&
( this.positions[ aa + 1 ] === this.positions[ ab + 1 ] ) &&
( this.positions[ aa + 2 ] === this.positions[ ab + 2 ] )
);
},
copyV3: function( a ) {
var aa = a * 6;
return [ this.positions[ aa ], this.positions[ aa + 1 ], this.positions[ aa + 2 ] ];
},
init: function( vertices, colors, widths ) {
var j, ver, cnt, col, wid, n, len = this.length;
for( j = 0; j < len; j++ ) {
ver = vertices[ j ];
col = colors[ j ];
wid = widths[ j ];
cnt = j / vertices.length;
this.positions.push( ver.x, ver.y, ver.z );
this.positions.push( ver.x, ver.y, ver.z );
this.lineIndex.push(this.idx + cnt);
this.lineIndex.push(this.idx + cnt);
this.colors.push(col.r, col.g, col.b);
this.colors.push(col.r, col.g, col.b);
this.widths.push(wid);
this.widths.push(wid);
this.side.push( 1 );
this.side.push( -1 );
}
for( j = 0; j < len - 1; j++ ) {
n = j + j;
this.index.push( n, n + 1, n + 2 );
this.index.push( n + 2, n + 1, n + 3 );
}
},
process: function() {
var j, v, l = this.positions.length / 6;
v = this.compareV3( 0, l - 1 ) ? this.copyV3( l - 2 ) : this.copyV3( 0 ) ;
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
for( j = 0; j < l - 1; j++ ) {
v = this.copyV3( j );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
for( j = 1; j < l; j++ ) {
v = this.copyV3( j );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
v = this.compareV3( l - 1, 0 ) ? this.copyV3( 1 ) : this.copyV3( l - 1 ) ;
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
};
| {
lat = pool[i].lat;
lon = pool[i].lon;
vec3 = latLonRadToVector3(lat, lon, cfg.radius);
// keep start point
seeds.push(vec3.x, vec3.y, vec3.z);
// over vertices
for (j=0; j<length; j++) {
u = datagrams.ugrdprs.linearXY(doe, lat, lon);
v = datagrams.vgrdprs.linearXY(doe, lat, lon);
speed = Math.hypot(u, v);
u /= Math.cos(lat * DEGRAD);
color = new Color().setHSL(cfg.hue, sat, speed / 100);
width = H.clampScale(speed, 0, 50, 0.5, 2.0);
positions[i].push(vec3);
colors[i].push(color);
widths[i].push(width);
spcl.setFromVector3(vec3);
spcl.theta += u * cfg.factor; // east-direction
spcl.phi -= v * cfg.factor; // north-direction
vec3 = vec3.setFromSpherical(spcl).clone();
lat = vec3toLat(vec3, cfg.radius);
lon = vec3toLon(vec3, cfg.radius);
}
} | conditional_block |
sim.worker.jetstream.js | /*
6 sectors * 512 lines * 60 vertices, t=threads, m=stamps
jetstream t: 1 m: 10 ms: 15651
jetstream t: 2 m: 10 ms: 9348
jetstream t: 3 m: 10 ms: 8583
jetstream t: 4 m: 10 ms: 7969
jetstream t: 5 m: 10 ms: 7891
jetstream t: 6 m: 10 ms: 8037
jetstream t: 7 m: 10 ms: 7981
jetstream t: 8 m: 10 ms: 8594
jetstream t: 9 m: 10 ms: 9060
jetstream t: 10 m: 10 ms: 8797
*/
if( typeof importScripts === 'function') {
const
PI = Math.PI,
TAU = 2 * PI,
PI2 = PI / 2,
RADIUS = 1.0,
DEGRAD = PI / 180.0
;
var
name = 'jet.worker',
cfg, topics, doe, pool,
datagrams = {
ugrdprs: null,
vgrdprs: null
},
prelines = null, // pos, wid, col per sector
multilines = null, // lines per sector
sectors = null // with attributes per sector
;
function vec3toLat (v, radius) {return 90 - (Math.acos(v.y / radius)) * 180 / PI;}
function vec3toLon (v, radius) {return ((270 + (Math.atan2(v.x , v.z)) * 180 / PI) % 360);}
function vector3ToLatLong (v, radius) {
return {
lat: 90 - (Math.acos(v.y / radius)) * 180 / Math.PI,
lon: ((270 + (Math.atan2(v.x , v.z)) * 180 / Math.PI) % 360)
};
}
function latLonRadToVector3 (lat, lon, radius) {
var phi = lat * Math.PI / 180;
var theta = (lon - 180) * Math.PI / 180;
var x = -radius * Math.cos(phi) * Math.cos(theta);
var y = radius * Math.sin(phi);
var z = radius * Math.cos(phi) * Math.sin(theta);
return new Vector3(x, y, z);
}
function | (sector, amount) {
var i, j = 0, coord, out = [], len = pool.length;
for (i=0; j<amount && i<len; i++) {
coord = pool[i];
if (
coord.lat < sector[0] &&
coord.lon > sector[1] &&
coord.lat > sector[2] &&
coord.lon < sector[3]
) {
out.push(coord);
j += 1;
}
}
return out;
}
function onmessage (event) {
var
id = event.data.id,
topic = event.data.topic,
payload = event.data.payload,
callback = function (id, result, transferables) {
postMessage({id, result}, transferables);
}
;
if (topics[topic]) {
topics[topic](id, payload, callback);
} else {
console.warn(name + ': unknown topic', topic);
}
}
topics = {
importScripts: function (id, payload, callback) {
importScripts.apply(null, payload.scripts);
callback(id, null);
},
retrieve: function (id, payload, callback) {
var datagramm;
cfg = payload.cfg;
doe = payload.doe;
pool = payload.pool;
RES.load({ urls: payload.urls, onFinish: function (err, responses) {
if (err) { throw err } else {
responses.forEach(function (response) {
datagramm = new SIM.Datagram(response.data);
datagrams[datagramm.vari] = datagramm;
});
topics.prepare(id, payload, function () {
topics.process(id, payload, function () {
topics.combine(id, payload, function (id, result, transferables) {
callback(id, result, transferables)
});
});
});
}
}});
},
prepare: function (id, payload, callback) {
var
t0 = Date.now(),
i, j, u, v, speed, width, pool, lat, lon, color, vec3, seeds, positions, widths, colors, seeds,
sat = 0.4,
spcl = new Spherical(),
length = cfg.length,
amount = NaN,
filler = () => [],
counter = (a, b) => a + b.positions.length
;
// over sectors
prelines = cfg.sim.sectors.map( sector => {
seeds = [];
pool = filterPool(sector, cfg.amount);
amount = pool.length;
positions = new Array(amount).fill(0).map(filler);
colors = new Array(amount).fill(0).map(filler);
widths = new Array(amount).fill(0).map(filler);
// over lines
for (i=0; i<amount; i++) {
lat = pool[i].lat;
lon = pool[i].lon;
vec3 = latLonRadToVector3(lat, lon, cfg.radius);
// keep start point
seeds.push(vec3.x, vec3.y, vec3.z);
// over vertices
for (j=0; j<length; j++) {
u = datagrams.ugrdprs.linearXY(doe, lat, lon);
v = datagrams.vgrdprs.linearXY(doe, lat, lon);
speed = Math.hypot(u, v);
u /= Math.cos(lat * DEGRAD);
color = new Color().setHSL(cfg.hue, sat, speed / 100);
width = H.clampScale(speed, 0, 50, 0.5, 2.0);
positions[i].push(vec3);
colors[i].push(color);
widths[i].push(width);
spcl.setFromVector3(vec3);
spcl.theta += u * cfg.factor; // east-direction
spcl.phi -= v * cfg.factor; // north-direction
vec3 = vec3.setFromSpherical(spcl).clone();
lat = vec3toLat(vec3, cfg.radius);
lon = vec3toLon(vec3, cfg.radius);
}
}
return { seeds: new Float32Array(seeds), positions, colors, widths };
});
// debugger;
// console.log(name + ': prepare', id, Date.now() - t0, prelines.reduce(counter, 0));
callback(id, {}, [])
},
process: function (id, payload, callback) {
var
t0 = Date.now(),
counter = (a, b) => a + b.length
;
multilines = prelines.map(preline => {
var
idx = 0,
multiline = H.zip(
preline.positions,
preline.colors,
preline.widths,
(vectors, colors, widths) => new Multiline(idx++, vectors, colors, widths)
)
;
multiline.seeds = preline.seeds;
return multiline;
});
// debugger;
// console.log(name + ': process', id, Date.now() - t0, multilines.reduce(counter, 0));
callback(id, {}, []);
},
combine: function (id, payload, callback) {
var
transferables,
attributeTypes = {
colors: Float32Array,
index: Uint16Array,
lineIndex: Float32Array,
next: Float32Array,
position: Float32Array,
previous: Float32Array,
side: Float32Array,
width: Float32Array,
},
textures = {
u: datagrams.ugrdprs.data[doe],
v: datagrams.vgrdprs.data[doe],
}
;
// debugger;
// over sectors (n=6)
sectors = multilines.map( lines => {
var
length,
attributes = {},
uniforms = {
seeds: lines.seeds
}
;
delete lines.seeds;
// prepare attributes
H.each(attributeTypes, (name, type) => {
length = lines[0].attributes[name].length * lines.length;
attributes[name] = new type(length);
});
// over attributes (n=8)
H.each(attributeTypes, (name) => {
// debugger;
// if (name === 'seeds') { return; }
var
i, source, length,
pointer = 0,
indexOffset = 0,
positLength = lines[0].attributes['position'].length / 3,
target = attributes[name]
;
// over lines (n=512)
H.each(lines, (_, line) => {
source = line.attributes[name];
length = source.length;
if (name === 'index'){
for (i=0; i<length; i++) {
target[pointer + i] = source[i] + indexOffset;
}
} else if (name !== 'seeds') {
for (i=0; i<length; i++) {
target[pointer + i] = source[i];
}
}
pointer += length;
indexOffset += positLength;
});
});
return { attributes, uniforms };
});
// finish transferables
transferables = [textures.u.buffer, textures.v.buffer];
H.each(sectors, (_, sector) => {
transferables.push(sector.uniforms.seeds.buffer);
H.each(attributeTypes, (name) => {
transferables.push(sector.attributes[name].buffer);
});
});
// TODO: check edge + transferable
callback(id, {sectors, textures}, transferables);
}
};
}
function Multiline ( idx, vertices, colors, widths ) {
this.idx = idx;
this.index = [];
this.lineIndex = [];
this.next = [];
this.positions = [];
this.previous = [];
this.side = [];
this.widths = [];
this.colors = [];
this.length = vertices.length;
this.init(vertices, colors, widths);
this.process();
// TODO: Needed? ~15% faster
this.attributes = {
index: new Uint16Array( this.index ),
lineIndex: new Float32Array( this.lineIndex ),
next: new Float32Array( this.next ),
position: new Float32Array( this.positions ),
previous: new Float32Array( this.previous ),
side: new Float32Array( this.side ),
width: new Float32Array( this.widths ),
colors: new Float32Array( this.colors ),
}
};
Multiline.prototype = {
constructor: Multiline,
compareV3: function( a, b ) {
var aa = a * 6, ab = b * 6;
return (
( this.positions[ aa ] === this.positions[ ab ] ) &&
( this.positions[ aa + 1 ] === this.positions[ ab + 1 ] ) &&
( this.positions[ aa + 2 ] === this.positions[ ab + 2 ] )
);
},
copyV3: function( a ) {
var aa = a * 6;
return [ this.positions[ aa ], this.positions[ aa + 1 ], this.positions[ aa + 2 ] ];
},
init: function( vertices, colors, widths ) {
var j, ver, cnt, col, wid, n, len = this.length;
for( j = 0; j < len; j++ ) {
ver = vertices[ j ];
col = colors[ j ];
wid = widths[ j ];
cnt = j / vertices.length;
this.positions.push( ver.x, ver.y, ver.z );
this.positions.push( ver.x, ver.y, ver.z );
this.lineIndex.push(this.idx + cnt);
this.lineIndex.push(this.idx + cnt);
this.colors.push(col.r, col.g, col.b);
this.colors.push(col.r, col.g, col.b);
this.widths.push(wid);
this.widths.push(wid);
this.side.push( 1 );
this.side.push( -1 );
}
for( j = 0; j < len - 1; j++ ) {
n = j + j;
this.index.push( n, n + 1, n + 2 );
this.index.push( n + 2, n + 1, n + 3 );
}
},
process: function() {
var j, v, l = this.positions.length / 6;
v = this.compareV3( 0, l - 1 ) ? this.copyV3( l - 2 ) : this.copyV3( 0 ) ;
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
for( j = 0; j < l - 1; j++ ) {
v = this.copyV3( j );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
for( j = 1; j < l; j++ ) {
v = this.copyV3( j );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
v = this.compareV3( l - 1, 0 ) ? this.copyV3( 1 ) : this.copyV3( l - 1 ) ;
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
};
| filterPool | identifier_name |
sim.worker.jetstream.js | /*
6 sectors * 512 lines * 60 vertices, t=threads, m=stamps
jetstream t: 1 m: 10 ms: 15651
jetstream t: 2 m: 10 ms: 9348
jetstream t: 3 m: 10 ms: 8583
jetstream t: 4 m: 10 ms: 7969
jetstream t: 5 m: 10 ms: 7891
jetstream t: 6 m: 10 ms: 8037
jetstream t: 7 m: 10 ms: 7981
jetstream t: 8 m: 10 ms: 8594
jetstream t: 9 m: 10 ms: 9060
jetstream t: 10 m: 10 ms: 8797
*/
if( typeof importScripts === 'function') {
const
PI = Math.PI,
TAU = 2 * PI,
PI2 = PI / 2,
RADIUS = 1.0,
DEGRAD = PI / 180.0
;
var
name = 'jet.worker',
cfg, topics, doe, pool,
datagrams = {
ugrdprs: null,
vgrdprs: null
},
prelines = null, // pos, wid, col per sector
multilines = null, // lines per sector
sectors = null // with attributes per sector
;
function vec3toLat (v, radius) {return 90 - (Math.acos(v.y / radius)) * 180 / PI;}
function vec3toLon (v, radius) {return ((270 + (Math.atan2(v.x , v.z)) * 180 / PI) % 360);}
function vector3ToLatLong (v, radius) |
function latLonRadToVector3 (lat, lon, radius) {
var phi = lat * Math.PI / 180;
var theta = (lon - 180) * Math.PI / 180;
var x = -radius * Math.cos(phi) * Math.cos(theta);
var y = radius * Math.sin(phi);
var z = radius * Math.cos(phi) * Math.sin(theta);
return new Vector3(x, y, z);
}
function filterPool (sector, amount) {
var i, j = 0, coord, out = [], len = pool.length;
for (i=0; j<amount && i<len; i++) {
coord = pool[i];
if (
coord.lat < sector[0] &&
coord.lon > sector[1] &&
coord.lat > sector[2] &&
coord.lon < sector[3]
) {
out.push(coord);
j += 1;
}
}
return out;
}
function onmessage (event) {
var
id = event.data.id,
topic = event.data.topic,
payload = event.data.payload,
callback = function (id, result, transferables) {
postMessage({id, result}, transferables);
}
;
if (topics[topic]) {
topics[topic](id, payload, callback);
} else {
console.warn(name + ': unknown topic', topic);
}
}
topics = {
importScripts: function (id, payload, callback) {
importScripts.apply(null, payload.scripts);
callback(id, null);
},
retrieve: function (id, payload, callback) {
var datagramm;
cfg = payload.cfg;
doe = payload.doe;
pool = payload.pool;
RES.load({ urls: payload.urls, onFinish: function (err, responses) {
if (err) { throw err } else {
responses.forEach(function (response) {
datagramm = new SIM.Datagram(response.data);
datagrams[datagramm.vari] = datagramm;
});
topics.prepare(id, payload, function () {
topics.process(id, payload, function () {
topics.combine(id, payload, function (id, result, transferables) {
callback(id, result, transferables)
});
});
});
}
}});
},
prepare: function (id, payload, callback) {
var
t0 = Date.now(),
i, j, u, v, speed, width, pool, lat, lon, color, vec3, seeds, positions, widths, colors, seeds,
sat = 0.4,
spcl = new Spherical(),
length = cfg.length,
amount = NaN,
filler = () => [],
counter = (a, b) => a + b.positions.length
;
// over sectors
prelines = cfg.sim.sectors.map( sector => {
seeds = [];
pool = filterPool(sector, cfg.amount);
amount = pool.length;
positions = new Array(amount).fill(0).map(filler);
colors = new Array(amount).fill(0).map(filler);
widths = new Array(amount).fill(0).map(filler);
// over lines
for (i=0; i<amount; i++) {
lat = pool[i].lat;
lon = pool[i].lon;
vec3 = latLonRadToVector3(lat, lon, cfg.radius);
// keep start point
seeds.push(vec3.x, vec3.y, vec3.z);
// over vertices
for (j=0; j<length; j++) {
u = datagrams.ugrdprs.linearXY(doe, lat, lon);
v = datagrams.vgrdprs.linearXY(doe, lat, lon);
speed = Math.hypot(u, v);
u /= Math.cos(lat * DEGRAD);
color = new Color().setHSL(cfg.hue, sat, speed / 100);
width = H.clampScale(speed, 0, 50, 0.5, 2.0);
positions[i].push(vec3);
colors[i].push(color);
widths[i].push(width);
spcl.setFromVector3(vec3);
spcl.theta += u * cfg.factor; // east-direction
spcl.phi -= v * cfg.factor; // north-direction
vec3 = vec3.setFromSpherical(spcl).clone();
lat = vec3toLat(vec3, cfg.radius);
lon = vec3toLon(vec3, cfg.radius);
}
}
return { seeds: new Float32Array(seeds), positions, colors, widths };
});
// debugger;
// console.log(name + ': prepare', id, Date.now() - t0, prelines.reduce(counter, 0));
callback(id, {}, [])
},
process: function (id, payload, callback) {
var
t0 = Date.now(),
counter = (a, b) => a + b.length
;
multilines = prelines.map(preline => {
var
idx = 0,
multiline = H.zip(
preline.positions,
preline.colors,
preline.widths,
(vectors, colors, widths) => new Multiline(idx++, vectors, colors, widths)
)
;
multiline.seeds = preline.seeds;
return multiline;
});
// debugger;
// console.log(name + ': process', id, Date.now() - t0, multilines.reduce(counter, 0));
callback(id, {}, []);
},
combine: function (id, payload, callback) {
var
transferables,
attributeTypes = {
colors: Float32Array,
index: Uint16Array,
lineIndex: Float32Array,
next: Float32Array,
position: Float32Array,
previous: Float32Array,
side: Float32Array,
width: Float32Array,
},
textures = {
u: datagrams.ugrdprs.data[doe],
v: datagrams.vgrdprs.data[doe],
}
;
// debugger;
// over sectors (n=6)
sectors = multilines.map( lines => {
var
length,
attributes = {},
uniforms = {
seeds: lines.seeds
}
;
delete lines.seeds;
// prepare attributes
H.each(attributeTypes, (name, type) => {
length = lines[0].attributes[name].length * lines.length;
attributes[name] = new type(length);
});
// over attributes (n=8)
H.each(attributeTypes, (name) => {
// debugger;
// if (name === 'seeds') { return; }
var
i, source, length,
pointer = 0,
indexOffset = 0,
positLength = lines[0].attributes['position'].length / 3,
target = attributes[name]
;
// over lines (n=512)
H.each(lines, (_, line) => {
source = line.attributes[name];
length = source.length;
if (name === 'index'){
for (i=0; i<length; i++) {
target[pointer + i] = source[i] + indexOffset;
}
} else if (name !== 'seeds') {
for (i=0; i<length; i++) {
target[pointer + i] = source[i];
}
}
pointer += length;
indexOffset += positLength;
});
});
return { attributes, uniforms };
});
// finish transferables
transferables = [textures.u.buffer, textures.v.buffer];
H.each(sectors, (_, sector) => {
transferables.push(sector.uniforms.seeds.buffer);
H.each(attributeTypes, (name) => {
transferables.push(sector.attributes[name].buffer);
});
});
// TODO: check edge + transferable
callback(id, {sectors, textures}, transferables);
}
};
}
function Multiline ( idx, vertices, colors, widths ) {
this.idx = idx;
this.index = [];
this.lineIndex = [];
this.next = [];
this.positions = [];
this.previous = [];
this.side = [];
this.widths = [];
this.colors = [];
this.length = vertices.length;
this.init(vertices, colors, widths);
this.process();
// TODO: Needed? ~15% faster
this.attributes = {
index: new Uint16Array( this.index ),
lineIndex: new Float32Array( this.lineIndex ),
next: new Float32Array( this.next ),
position: new Float32Array( this.positions ),
previous: new Float32Array( this.previous ),
side: new Float32Array( this.side ),
width: new Float32Array( this.widths ),
colors: new Float32Array( this.colors ),
}
};
Multiline.prototype = {
constructor: Multiline,
compareV3: function( a, b ) {
var aa = a * 6, ab = b * 6;
return (
( this.positions[ aa ] === this.positions[ ab ] ) &&
( this.positions[ aa + 1 ] === this.positions[ ab + 1 ] ) &&
( this.positions[ aa + 2 ] === this.positions[ ab + 2 ] )
);
},
copyV3: function( a ) {
var aa = a * 6;
return [ this.positions[ aa ], this.positions[ aa + 1 ], this.positions[ aa + 2 ] ];
},
init: function( vertices, colors, widths ) {
var j, ver, cnt, col, wid, n, len = this.length;
for( j = 0; j < len; j++ ) {
ver = vertices[ j ];
col = colors[ j ];
wid = widths[ j ];
cnt = j / vertices.length;
this.positions.push( ver.x, ver.y, ver.z );
this.positions.push( ver.x, ver.y, ver.z );
this.lineIndex.push(this.idx + cnt);
this.lineIndex.push(this.idx + cnt);
this.colors.push(col.r, col.g, col.b);
this.colors.push(col.r, col.g, col.b);
this.widths.push(wid);
this.widths.push(wid);
this.side.push( 1 );
this.side.push( -1 );
}
for( j = 0; j < len - 1; j++ ) {
n = j + j;
this.index.push( n, n + 1, n + 2 );
this.index.push( n + 2, n + 1, n + 3 );
}
},
process: function() {
var j, v, l = this.positions.length / 6;
v = this.compareV3( 0, l - 1 ) ? this.copyV3( l - 2 ) : this.copyV3( 0 ) ;
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
for( j = 0; j < l - 1; j++ ) {
v = this.copyV3( j );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
for( j = 1; j < l; j++ ) {
v = this.copyV3( j );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
v = this.compareV3( l - 1, 0 ) ? this.copyV3( 1 ) : this.copyV3( l - 1 ) ;
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
}
};
| {
return {
lat: 90 - (Math.acos(v.y / radius)) * 180 / Math.PI,
lon: ((270 + (Math.atan2(v.x , v.z)) * 180 / Math.PI) % 360)
};
} | identifier_body |
Material.d.ts | import { Base } from './core/Base';
import { IDictionary } from './core/container';
import { Shader, IShaderUniform } from './Shader';
import { Renderer } from './Renderer';
interface IMaterialOption {
name?: string;
shader?: Shader;
depthTest?: boolean;
depthMask?: boolean;
transparent?: boolean;
blend?: (gl: WebGLRenderingContext) => void;
}
export class Material extends Base {
constructor(option?: IMaterialOption);
name: string; |
uniforms: IDictionary<IShaderUniform>;
shader: Shader;
depthTest: boolean;
depthMask: boolean;
transparent: boolean;
precision: string;
blend: (renderer: Renderer) => void;
bind(renderer: Renderer): void;
setUniform(symbol: string, value: any): void;
setUniforms(object: Object): void;
enableUniform(symbol: string): void;
disableUniform(symbol: string): void;
isUniformEnabled(symbol: string): void;
set(symbol: string, value: any): void;
set(object: Object): void;
get(symbol: string): any;
attachShader(shader: Shader, keepUniform?: boolean): void;
detachShader(): void;
define(shaderType: string, symbol: string, val?: number): void;
unDefine(shaderType: string, symbol: string): void;
isDefined(shaderType: string, symbol: string): void;
getDefine(shaderType: string, symbol: string): number;
enableTexture(symbol: string): void;
enableTexturesAll(): void;
disableTexture(symbol: string): void;
disableTexturesAll(): void;
isTextureEnabled(symbol: string): boolean;
hasUniform(symbol: string): boolean;
setUniform(gl: WebGLRenderingContext, type: string, symbol: string, value: any): boolean;
setUniformBySemantic(gl: WebGLRenderingContext, semantic: string, val: any): boolean;
} | random_line_split | |
Material.d.ts | import { Base } from './core/Base';
import { IDictionary } from './core/container';
import { Shader, IShaderUniform } from './Shader';
import { Renderer } from './Renderer';
interface IMaterialOption {
name?: string;
shader?: Shader;
depthTest?: boolean;
depthMask?: boolean;
transparent?: boolean;
blend?: (gl: WebGLRenderingContext) => void;
}
export class | extends Base {
constructor(option?: IMaterialOption);
name: string;
uniforms: IDictionary<IShaderUniform>;
shader: Shader;
depthTest: boolean;
depthMask: boolean;
transparent: boolean;
precision: string;
blend: (renderer: Renderer) => void;
bind(renderer: Renderer): void;
setUniform(symbol: string, value: any): void;
setUniforms(object: Object): void;
enableUniform(symbol: string): void;
disableUniform(symbol: string): void;
isUniformEnabled(symbol: string): void;
set(symbol: string, value: any): void;
set(object: Object): void;
get(symbol: string): any;
attachShader(shader: Shader, keepUniform?: boolean): void;
detachShader(): void;
define(shaderType: string, symbol: string, val?: number): void;
unDefine(shaderType: string, symbol: string): void;
isDefined(shaderType: string, symbol: string): void;
getDefine(shaderType: string, symbol: string): number;
enableTexture(symbol: string): void;
enableTexturesAll(): void;
disableTexture(symbol: string): void;
disableTexturesAll(): void;
isTextureEnabled(symbol: string): boolean;
hasUniform(symbol: string): boolean;
setUniform(gl: WebGLRenderingContext, type: string, symbol: string, value: any): boolean;
setUniformBySemantic(gl: WebGLRenderingContext, semantic: string, val: any): boolean;
}
| Material | identifier_name |
hearthisat.py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
HEADRequest,
KNOWN_EXTENSIONS,
sanitized_Request,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
)
class HearThisAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
_TESTS = [{
'url': 'https://hearthis.at/moofi/dr-kreep',
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
'info_dict': {
'id': '150939',
'ext': 'wav',
'title': 'Moofi - Dr. Kreep',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1421564134,
'description': 'Listen to Dr. Kreep by Moofi on hearthis.at - Modular, Eurorack, Mutable Intruments Braids, Valhalla-DSP',
'upload_date': '20150118',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 71,
'categories': ['Experimental'],
}
}, {
# 'download' link redirects to the original webpage
'url': 'https://hearthis.at/twitchsf/dj-jim-hopkins-totally-bitchin-80s-dance-mix/',
'md5': '5980ceb7c461605d30f1f039df160c6e',
'info_dict': {
'id': '811296',
'ext': 'mp3',
'title': 'TwitchSF - DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix!',
'description': 'Listen to DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix! by TwitchSF on hearthis.at - Dance',
'upload_date': '20160328',
'timestamp': 1459186146,
'thumbnail': r're:^https?://.*\.jpg$',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 4360,
'categories': ['Dance'],
},
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
webpage = self._download_webpage(url, display_id)
track_id = self._search_regex(
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
req = sanitized_Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
title = '{artist:s} - {title:s}'.format(**track)
categories = None
if track.get('category'):
categories = [track['category']]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
view_count = str_to_int(self._search_regex(
meta_span % 'plays_count', webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
meta_span % 'likes_count', webpage, 'like count', fatal=False))
comment_count = str_to_int(self._search_regex(
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
duration = str_to_int(self._search_regex(
r'data-length="(\d+)', webpage, 'duration', fatal=False))
timestamp = str_to_int(self._search_regex(
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
formats = []
mp3_url = self._search_regex( | 'vcodec': 'none',
'acodec': 'mp3',
'url': mp3_url,
})
download_path = self._search_regex(
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
webpage, 'download URL', default=None)
if download_path:
download_url = compat_urlparse.urljoin(url, download_path)
ext_req = HEADRequest(download_url)
ext_handle = self._request_webpage(
ext_req, display_id, note='Determining extension')
ext = urlhandle_detect_ext(ext_handle)
if ext in KNOWN_EXTENSIONS:
formats.append({
'format_id': 'download',
'vcodec': 'none',
'ext': ext,
'url': download_url,
'preference': 2, # Usually better quality
})
self._sort_formats(formats)
return {
'id': track_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'categories': categories,
} | r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
webpage, 'mp3 URL', fatal=False)
if mp3_url:
formats.append({
'format_id': 'mp3', | random_line_split |
hearthisat.py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
HEADRequest,
KNOWN_EXTENSIONS,
sanitized_Request,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
)
class HearThisAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
_TESTS = [{
'url': 'https://hearthis.at/moofi/dr-kreep',
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
'info_dict': {
'id': '150939',
'ext': 'wav',
'title': 'Moofi - Dr. Kreep',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1421564134,
'description': 'Listen to Dr. Kreep by Moofi on hearthis.at - Modular, Eurorack, Mutable Intruments Braids, Valhalla-DSP',
'upload_date': '20150118',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 71,
'categories': ['Experimental'],
}
}, {
# 'download' link redirects to the original webpage
'url': 'https://hearthis.at/twitchsf/dj-jim-hopkins-totally-bitchin-80s-dance-mix/',
'md5': '5980ceb7c461605d30f1f039df160c6e',
'info_dict': {
'id': '811296',
'ext': 'mp3',
'title': 'TwitchSF - DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix!',
'description': 'Listen to DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix! by TwitchSF on hearthis.at - Dance',
'upload_date': '20160328',
'timestamp': 1459186146,
'thumbnail': r're:^https?://.*\.jpg$',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 4360,
'categories': ['Dance'],
},
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
webpage = self._download_webpage(url, display_id)
track_id = self._search_regex(
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
req = sanitized_Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
title = '{artist:s} - {title:s}'.format(**track)
categories = None
if track.get('category'):
categories = [track['category']]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
view_count = str_to_int(self._search_regex(
meta_span % 'plays_count', webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
meta_span % 'likes_count', webpage, 'like count', fatal=False))
comment_count = str_to_int(self._search_regex(
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
duration = str_to_int(self._search_regex(
r'data-length="(\d+)', webpage, 'duration', fatal=False))
timestamp = str_to_int(self._search_regex(
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
formats = []
mp3_url = self._search_regex(
r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
webpage, 'mp3 URL', fatal=False)
if mp3_url:
formats.append({
'format_id': 'mp3',
'vcodec': 'none',
'acodec': 'mp3',
'url': mp3_url,
})
download_path = self._search_regex(
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
webpage, 'download URL', default=None)
if download_path:
|
self._sort_formats(formats)
return {
'id': track_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'categories': categories,
}
| download_url = compat_urlparse.urljoin(url, download_path)
ext_req = HEADRequest(download_url)
ext_handle = self._request_webpage(
ext_req, display_id, note='Determining extension')
ext = urlhandle_detect_ext(ext_handle)
if ext in KNOWN_EXTENSIONS:
formats.append({
'format_id': 'download',
'vcodec': 'none',
'ext': ext,
'url': download_url,
'preference': 2, # Usually better quality
}) | conditional_block |
hearthisat.py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
HEADRequest,
KNOWN_EXTENSIONS,
sanitized_Request,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
)
class HearThisAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
_TESTS = [{
'url': 'https://hearthis.at/moofi/dr-kreep',
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
'info_dict': {
'id': '150939',
'ext': 'wav',
'title': 'Moofi - Dr. Kreep',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1421564134,
'description': 'Listen to Dr. Kreep by Moofi on hearthis.at - Modular, Eurorack, Mutable Intruments Braids, Valhalla-DSP',
'upload_date': '20150118',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 71,
'categories': ['Experimental'],
}
}, {
# 'download' link redirects to the original webpage
'url': 'https://hearthis.at/twitchsf/dj-jim-hopkins-totally-bitchin-80s-dance-mix/',
'md5': '5980ceb7c461605d30f1f039df160c6e',
'info_dict': {
'id': '811296',
'ext': 'mp3',
'title': 'TwitchSF - DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix!',
'description': 'Listen to DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix! by TwitchSF on hearthis.at - Dance',
'upload_date': '20160328',
'timestamp': 1459186146,
'thumbnail': r're:^https?://.*\.jpg$',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 4360,
'categories': ['Dance'],
},
}]
def | (self, url):
m = re.match(self._VALID_URL, url)
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
webpage = self._download_webpage(url, display_id)
track_id = self._search_regex(
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
req = sanitized_Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
title = '{artist:s} - {title:s}'.format(**track)
categories = None
if track.get('category'):
categories = [track['category']]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
view_count = str_to_int(self._search_regex(
meta_span % 'plays_count', webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
meta_span % 'likes_count', webpage, 'like count', fatal=False))
comment_count = str_to_int(self._search_regex(
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
duration = str_to_int(self._search_regex(
r'data-length="(\d+)', webpage, 'duration', fatal=False))
timestamp = str_to_int(self._search_regex(
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
formats = []
mp3_url = self._search_regex(
r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
webpage, 'mp3 URL', fatal=False)
if mp3_url:
formats.append({
'format_id': 'mp3',
'vcodec': 'none',
'acodec': 'mp3',
'url': mp3_url,
})
download_path = self._search_regex(
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
webpage, 'download URL', default=None)
if download_path:
download_url = compat_urlparse.urljoin(url, download_path)
ext_req = HEADRequest(download_url)
ext_handle = self._request_webpage(
ext_req, display_id, note='Determining extension')
ext = urlhandle_detect_ext(ext_handle)
if ext in KNOWN_EXTENSIONS:
formats.append({
'format_id': 'download',
'vcodec': 'none',
'ext': ext,
'url': download_url,
'preference': 2, # Usually better quality
})
self._sort_formats(formats)
return {
'id': track_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'categories': categories,
}
| _real_extract | identifier_name |
hearthisat.py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
HEADRequest,
KNOWN_EXTENSIONS,
sanitized_Request,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
)
class HearThisAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
_TESTS = [{
'url': 'https://hearthis.at/moofi/dr-kreep',
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
'info_dict': {
'id': '150939',
'ext': 'wav',
'title': 'Moofi - Dr. Kreep',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1421564134,
'description': 'Listen to Dr. Kreep by Moofi on hearthis.at - Modular, Eurorack, Mutable Intruments Braids, Valhalla-DSP',
'upload_date': '20150118',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 71,
'categories': ['Experimental'],
}
}, {
# 'download' link redirects to the original webpage
'url': 'https://hearthis.at/twitchsf/dj-jim-hopkins-totally-bitchin-80s-dance-mix/',
'md5': '5980ceb7c461605d30f1f039df160c6e',
'info_dict': {
'id': '811296',
'ext': 'mp3',
'title': 'TwitchSF - DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix!',
'description': 'Listen to DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix! by TwitchSF on hearthis.at - Dance',
'upload_date': '20160328',
'timestamp': 1459186146,
'thumbnail': r're:^https?://.*\.jpg$',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 4360,
'categories': ['Dance'],
},
}]
def _real_extract(self, url):
| m = re.match(self._VALID_URL, url)
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
webpage = self._download_webpage(url, display_id)
track_id = self._search_regex(
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
req = sanitized_Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
title = '{artist:s} - {title:s}'.format(**track)
categories = None
if track.get('category'):
categories = [track['category']]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
view_count = str_to_int(self._search_regex(
meta_span % 'plays_count', webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
meta_span % 'likes_count', webpage, 'like count', fatal=False))
comment_count = str_to_int(self._search_regex(
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
duration = str_to_int(self._search_regex(
r'data-length="(\d+)', webpage, 'duration', fatal=False))
timestamp = str_to_int(self._search_regex(
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
formats = []
mp3_url = self._search_regex(
r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
webpage, 'mp3 URL', fatal=False)
if mp3_url:
formats.append({
'format_id': 'mp3',
'vcodec': 'none',
'acodec': 'mp3',
'url': mp3_url,
})
download_path = self._search_regex(
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
webpage, 'download URL', default=None)
if download_path:
download_url = compat_urlparse.urljoin(url, download_path)
ext_req = HEADRequest(download_url)
ext_handle = self._request_webpage(
ext_req, display_id, note='Determining extension')
ext = urlhandle_detect_ext(ext_handle)
if ext in KNOWN_EXTENSIONS:
formats.append({
'format_id': 'download',
'vcodec': 'none',
'ext': ext,
'url': download_url,
'preference': 2, # Usually better quality
})
self._sort_formats(formats)
return {
'id': track_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'categories': categories,
} | identifier_body | |
webpack.config.js | const path = require('path')
module.exports = {
context: __dirname,
entry: './js/ClientApp.js',
devtool: 'eval',
output: {
path: path.join(__dirname, '/public'),
publicPath: '/public/',
filename: 'bundle.js'
},
devServer: {
publicPath: '/public/',
historyApiFallback: true
},
resolve: {
extensions: ['.js', '.json']
},
stats: {
colors: true,
reasons: true,
chunks: true
},
module: {
rules: [
{
enforce: 'pre',
test: /\.js$/,
loader: 'eslint-loader',
exclude: /node_modlues/
},
{
test: /\.json$/,
loader: 'json-loader'
},
{
include: path.resolve(__dirname, 'js'),
test: /\.js$/,
loader: 'babel-loader'
},
{
test: /\.css$/,
use: [
'style-loader', | options: {
url: false
}
}
]
},
{
test: /\.scss$/,
loader: 'style-loader!css-loader!autoprefixer-loader!sass-loader'
},
{
test: /\.otf$/,
loader: 'file-loader?name=fonts/[name].[ext]'
}
]
}
} | {
loader: 'css-loader', | random_line_split |
FieldWithComponentProp.js | import React from 'react';
import {
Box,
Button,
CheckBox,
FileInput, | Form,
FormField,
Grommet,
RadioButtonGroup,
RangeInput,
Select,
TextArea,
} from 'grommet';
import { grommet } from 'grommet/themes';
export const FieldWithComponentProp = () => (
<Grommet full theme={grommet}>
<Box fill overflow="auto" align="center" justify="center" pad="large">
<Box flex={false} width="medium">
<Form
onReset={event => console.log(event)}
onSubmit={({ value, touched }) =>
console.log('Submit', value, touched)
}
>
<FormField
label="Name"
name="name"
required
validate={[
{ regexp: /^[a-z]/i },
name => {
if (name && name.length === 1) return 'must be >1 character';
return undefined;
},
name => {
if (name && name.length <= 2)
return { message: "that's short", status: 'info' };
return undefined;
},
]}
/>
<FormField label="Email" name="email" type="email" required />
<FormField
label="Employee ID"
name="employeeId"
required
validate={{ regexp: /^[0-9]{4,6}$/, message: '4-6 digits' }}
/>
<FormField name="subscribe" component={CheckBox} label="Subscribe?" />
<FormField
name="ampm"
component={RadioButtonGroup}
options={['morning', 'evening']}
/>
<FormField
label="Size"
name="size"
component={Select}
onChange={event => console.log(event)}
options={['small', 'medium', 'large', 'xlarge']}
/>
<FormField label="Comments" name="comments" component={TextArea} />
<FormField
label="Age"
name="age"
component={RangeInput}
pad
min={15}
max={75}
/>
<FormField label="File" name="file" component={FileInput} />
<FormField
label="Custom"
name="custom"
component={props => <input {...props} />}
/>
<Box direction="row" justify="between" margin={{ top: 'medium' }}>
<Button label="Cancel" />
<Button type="reset" label="Reset" />
<Button type="submit" label="Update" primary />
</Box>
</Form>
</Box>
</Box>
</Grommet>
);
FieldWithComponentProp.storyName = 'Field with component prop';
export default {
title: 'Input/Form/Field with component prop',
}; | random_line_split | |
queues.js | import React from "react";
import {map, sortBy, filter} from "lodash";
import BaseComponent from "../base-component";
import Config from "../../tools/config";
const cx = require("classnames");
var navigate = require('react-mini-router').navigate;
const EMPTY_QUEUE_CONFIG_KEY = "home.queues.empty";
export default class QueueList extends BaseComponent {
constructor(props) {
super(props);
this.state = {
query: "",
empty: Config.get(EMPTY_QUEUE_CONFIG_KEY, false)
};
this.bindThiz("getQueueRows", "doesQueueMatchQuery", "onQueryChange", "onRowClick", "changeEmpty")
}
onQueryChange(query) {
this.assignState({query: query});
}
onRowClick(queue) {
navigate(`/queues/${queue.name}`)
}
getQueueRows() {
if (!this.props.queues) {
return <tr/>
}
let queues = this.props.queues;
if (!this.state.empty) {
queues = filter(queues, (q)=> {
return q.size > 0
})
}
queues = sortBy(queues, "size").reverse();
queues = filter(queues, (q) => {
return this.doesQueueMatchQuery(q)
});
return map(queues, (q)=> {
return (
<tr className="clickable" key={q.name} onClick={()=> {
this.onRowClick(q)
}}>
<td>{q.name}</td>
<td>{q.delayed ? q.size + " (" + q.pending + ")" : q.size}</td>
</tr>
)
})
}
doesQueueMatchQuery(queue) |
changeEmpty() {
this.assignState({empty: !this.state.empty}, ()=> {
Config.set(EMPTY_QUEUE_CONFIG_KEY, this.state.empty)
})
}
render() {
let failedClasses = cx('clickable', {danger: this.props.failed > 0, info: this.props.failed === 0});
return (
<div className="queue-list">
<div className="page-header">
<h3>Queues</h3>
Jobs processed: {this.props.processed}
</div>
<div className="filter-form">
<div className="filter">
<input className="form-control" placeholder="Search for Queue name" type="text" value={this.state.query} onChange={(e)=> {
this.onQueryChange(e.target.value)
}}/>
</div>
<div className="filter">
<label htmlFor="empty">
<input id="empty" type="checkbox" checked={this.state.empty} onChange={(e)=> {
this.changeEmpty(e.target.checked)
}}/>
Show empty queues
</label>
</div>
</div>
<table className="table table-condensed table-striped table-hover">
<thead>
<tr>
<th>Name</th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>All</td>
<td>{this.props.pending}</td>
</tr>
{this.getQueueRows()}
<tr className={failedClasses} onClick={()=> {
navigate('/jobs/failed/')
}}>
<td>Failed</td>
<td>{this.props.failed}</td>
</tr>
</tbody>
</table>
</div>
)
}
}
| {
if (this.state.query === "") {
return true
}
let q = this.state.query.toLowerCase();
let name = queue.name.toLowerCase();
return name.indexOf(q) > -1
} | identifier_body |
queues.js | import React from "react";
import {map, sortBy, filter} from "lodash";
import BaseComponent from "../base-component";
import Config from "../../tools/config";
const cx = require("classnames");
var navigate = require('react-mini-router').navigate;
const EMPTY_QUEUE_CONFIG_KEY = "home.queues.empty";
export default class QueueList extends BaseComponent {
constructor(props) {
super(props);
this.state = {
query: "",
empty: Config.get(EMPTY_QUEUE_CONFIG_KEY, false)
};
this.bindThiz("getQueueRows", "doesQueueMatchQuery", "onQueryChange", "onRowClick", "changeEmpty")
}
onQueryChange(query) {
this.assignState({query: query});
}
onRowClick(queue) {
navigate(`/queues/${queue.name}`)
}
getQueueRows() {
if (!this.props.queues) {
return <tr/>
}
let queues = this.props.queues;
if (!this.state.empty) {
queues = filter(queues, (q)=> {
return q.size > 0
})
}
queues = sortBy(queues, "size").reverse();
queues = filter(queues, (q) => {
return this.doesQueueMatchQuery(q)
});
return map(queues, (q)=> {
return (
<tr className="clickable" key={q.name} onClick={()=> {
this.onRowClick(q)
}}>
<td>{q.name}</td>
<td>{q.delayed ? q.size + " (" + q.pending + ")" : q.size}</td>
</tr>
)
})
}
doesQueueMatchQuery(queue) {
if (this.state.query === "") |
let q = this.state.query.toLowerCase();
let name = queue.name.toLowerCase();
return name.indexOf(q) > -1
}
changeEmpty() {
this.assignState({empty: !this.state.empty}, ()=> {
Config.set(EMPTY_QUEUE_CONFIG_KEY, this.state.empty)
})
}
render() {
let failedClasses = cx('clickable', {danger: this.props.failed > 0, info: this.props.failed === 0});
return (
<div className="queue-list">
<div className="page-header">
<h3>Queues</h3>
Jobs processed: {this.props.processed}
</div>
<div className="filter-form">
<div className="filter">
<input className="form-control" placeholder="Search for Queue name" type="text" value={this.state.query} onChange={(e)=> {
this.onQueryChange(e.target.value)
}}/>
</div>
<div className="filter">
<label htmlFor="empty">
<input id="empty" type="checkbox" checked={this.state.empty} onChange={(e)=> {
this.changeEmpty(e.target.checked)
}}/>
Show empty queues
</label>
</div>
</div>
<table className="table table-condensed table-striped table-hover">
<thead>
<tr>
<th>Name</th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>All</td>
<td>{this.props.pending}</td>
</tr>
{this.getQueueRows()}
<tr className={failedClasses} onClick={()=> {
navigate('/jobs/failed/')
}}>
<td>Failed</td>
<td>{this.props.failed}</td>
</tr>
</tbody>
</table>
</div>
)
}
}
| {
return true
} | conditional_block |
queues.js | import React from "react";
import {map, sortBy, filter} from "lodash";
import BaseComponent from "../base-component";
import Config from "../../tools/config";
const cx = require("classnames");
var navigate = require('react-mini-router').navigate;
const EMPTY_QUEUE_CONFIG_KEY = "home.queues.empty";
export default class QueueList extends BaseComponent {
constructor(props) {
super(props);
this.state = {
query: "",
empty: Config.get(EMPTY_QUEUE_CONFIG_KEY, false)
};
this.bindThiz("getQueueRows", "doesQueueMatchQuery", "onQueryChange", "onRowClick", "changeEmpty")
}
|
onRowClick(queue) {
navigate(`/queues/${queue.name}`)
}
getQueueRows() {
if (!this.props.queues) {
return <tr/>
}
let queues = this.props.queues;
if (!this.state.empty) {
queues = filter(queues, (q)=> {
return q.size > 0
})
}
queues = sortBy(queues, "size").reverse();
queues = filter(queues, (q) => {
return this.doesQueueMatchQuery(q)
});
return map(queues, (q)=> {
return (
<tr className="clickable" key={q.name} onClick={()=> {
this.onRowClick(q)
}}>
<td>{q.name}</td>
<td>{q.delayed ? q.size + " (" + q.pending + ")" : q.size}</td>
</tr>
)
})
}
doesQueueMatchQuery(queue) {
if (this.state.query === "") {
return true
}
let q = this.state.query.toLowerCase();
let name = queue.name.toLowerCase();
return name.indexOf(q) > -1
}
changeEmpty() {
this.assignState({empty: !this.state.empty}, ()=> {
Config.set(EMPTY_QUEUE_CONFIG_KEY, this.state.empty)
})
}
render() {
let failedClasses = cx('clickable', {danger: this.props.failed > 0, info: this.props.failed === 0});
return (
<div className="queue-list">
<div className="page-header">
<h3>Queues</h3>
Jobs processed: {this.props.processed}
</div>
<div className="filter-form">
<div className="filter">
<input className="form-control" placeholder="Search for Queue name" type="text" value={this.state.query} onChange={(e)=> {
this.onQueryChange(e.target.value)
}}/>
</div>
<div className="filter">
<label htmlFor="empty">
<input id="empty" type="checkbox" checked={this.state.empty} onChange={(e)=> {
this.changeEmpty(e.target.checked)
}}/>
Show empty queues
</label>
</div>
</div>
<table className="table table-condensed table-striped table-hover">
<thead>
<tr>
<th>Name</th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>All</td>
<td>{this.props.pending}</td>
</tr>
{this.getQueueRows()}
<tr className={failedClasses} onClick={()=> {
navigate('/jobs/failed/')
}}>
<td>Failed</td>
<td>{this.props.failed}</td>
</tr>
</tbody>
</table>
</div>
)
}
} | onQueryChange(query) {
this.assignState({query: query});
} | random_line_split |
queues.js | import React from "react";
import {map, sortBy, filter} from "lodash";
import BaseComponent from "../base-component";
import Config from "../../tools/config";
const cx = require("classnames");
var navigate = require('react-mini-router').navigate;
const EMPTY_QUEUE_CONFIG_KEY = "home.queues.empty";
export default class QueueList extends BaseComponent {
| (props) {
super(props);
this.state = {
query: "",
empty: Config.get(EMPTY_QUEUE_CONFIG_KEY, false)
};
this.bindThiz("getQueueRows", "doesQueueMatchQuery", "onQueryChange", "onRowClick", "changeEmpty")
}
onQueryChange(query) {
this.assignState({query: query});
}
onRowClick(queue) {
navigate(`/queues/${queue.name}`)
}
getQueueRows() {
if (!this.props.queues) {
return <tr/>
}
let queues = this.props.queues;
if (!this.state.empty) {
queues = filter(queues, (q)=> {
return q.size > 0
})
}
queues = sortBy(queues, "size").reverse();
queues = filter(queues, (q) => {
return this.doesQueueMatchQuery(q)
});
return map(queues, (q)=> {
return (
<tr className="clickable" key={q.name} onClick={()=> {
this.onRowClick(q)
}}>
<td>{q.name}</td>
<td>{q.delayed ? q.size + " (" + q.pending + ")" : q.size}</td>
</tr>
)
})
}
doesQueueMatchQuery(queue) {
if (this.state.query === "") {
return true
}
let q = this.state.query.toLowerCase();
let name = queue.name.toLowerCase();
return name.indexOf(q) > -1
}
changeEmpty() {
this.assignState({empty: !this.state.empty}, ()=> {
Config.set(EMPTY_QUEUE_CONFIG_KEY, this.state.empty)
})
}
render() {
let failedClasses = cx('clickable', {danger: this.props.failed > 0, info: this.props.failed === 0});
return (
<div className="queue-list">
<div className="page-header">
<h3>Queues</h3>
Jobs processed: {this.props.processed}
</div>
<div className="filter-form">
<div className="filter">
<input className="form-control" placeholder="Search for Queue name" type="text" value={this.state.query} onChange={(e)=> {
this.onQueryChange(e.target.value)
}}/>
</div>
<div className="filter">
<label htmlFor="empty">
<input id="empty" type="checkbox" checked={this.state.empty} onChange={(e)=> {
this.changeEmpty(e.target.checked)
}}/>
Show empty queues
</label>
</div>
</div>
<table className="table table-condensed table-striped table-hover">
<thead>
<tr>
<th>Name</th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>All</td>
<td>{this.props.pending}</td>
</tr>
{this.getQueueRows()}
<tr className={failedClasses} onClick={()=> {
navigate('/jobs/failed/')
}}>
<td>Failed</td>
<td>{this.props.failed}</td>
</tr>
</tbody>
</table>
</div>
)
}
}
| constructor | identifier_name |
acfun.py | #!/usr/bin/env python
__all__ = ['acfun_download']
from ..common import *
from .letv import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_iid
from .youku import youku_download_by_vid
import json, re
def get_srt_json(id):
url = 'http://danmu.aixifan.com/V2/%s' % id
return get_html(url)
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
info = json.loads(get_html('http://www.acfun.tv/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
if 'sourceId' in info: sourceId = info['sourceId']
# danmakuId = info['danmakuId']
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
|
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'letv':
letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'zhuzhan':
a = 'http://api.aixifan.com/plays/%s/realSource' % vid
s = json.loads(get_content(a, headers={'deviceType': '1'}))
urls = s['data']['files'][-1]['url']
size = urls_size(urls)
print_info(site_info, title, 'mp4', size)
if not info_only:
download_urls(urls, title, 'mp4', size,
output_dir=output_dir, merge=merge)
else:
raise NotImplementedError(sourceType)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
try:
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(vid)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x:
x.write(cmt)
except:
pass
def acfun_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
assert re.match(r'http://[^\.]+.acfun.[^\.]+/\D/\D\D(\d+)', url)
html = get_html(url)
title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html)
title = unescape_html(title)
title = escape_file_path(title)
assert title
videos = re.findall("data-vid=\"(\d+)\".*href=\"[^\"]+\".*title=\"([^\"]+)\"", html)
for video in videos:
p_vid = video[0]
p_title = title + " - " + video[1] if video[1] != '删除标签' else title
acfun_download_by_vid(p_vid, p_title,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs)
site_info = "AcFun.tv"
download = acfun_download
download_playlist = playlist_not_supported('acfun')
| youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) | conditional_block |
acfun.py | #!/usr/bin/env python
__all__ = ['acfun_download']
from ..common import * |
from .letv import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_iid
from .youku import youku_download_by_vid
import json, re
def get_srt_json(id):
url = 'http://danmu.aixifan.com/V2/%s' % id
return get_html(url)
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
info = json.loads(get_html('http://www.acfun.tv/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
if 'sourceId' in info: sourceId = info['sourceId']
# danmakuId = info['danmakuId']
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'letv':
letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'zhuzhan':
a = 'http://api.aixifan.com/plays/%s/realSource' % vid
s = json.loads(get_content(a, headers={'deviceType': '1'}))
urls = s['data']['files'][-1]['url']
size = urls_size(urls)
print_info(site_info, title, 'mp4', size)
if not info_only:
download_urls(urls, title, 'mp4', size,
output_dir=output_dir, merge=merge)
else:
raise NotImplementedError(sourceType)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
try:
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(vid)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x:
x.write(cmt)
except:
pass
def acfun_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
assert re.match(r'http://[^\.]+.acfun.[^\.]+/\D/\D\D(\d+)', url)
html = get_html(url)
title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html)
title = unescape_html(title)
title = escape_file_path(title)
assert title
videos = re.findall("data-vid=\"(\d+)\".*href=\"[^\"]+\".*title=\"([^\"]+)\"", html)
for video in videos:
p_vid = video[0]
p_title = title + " - " + video[1] if video[1] != '删除标签' else title
acfun_download_by_vid(p_vid, p_title,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs)
site_info = "AcFun.tv"
download = acfun_download
download_playlist = playlist_not_supported('acfun') | random_line_split | |
acfun.py | #!/usr/bin/env python
__all__ = ['acfun_download']
from ..common import *
from .letv import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_iid
from .youku import youku_download_by_vid
import json, re
def get_srt_json(id):
url = 'http://danmu.aixifan.com/V2/%s' % id
return get_html(url)
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
info = json.loads(get_html('http://www.acfun.tv/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
if 'sourceId' in info: sourceId = info['sourceId']
# danmakuId = info['danmakuId']
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'letv':
letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'zhuzhan':
a = 'http://api.aixifan.com/plays/%s/realSource' % vid
s = json.loads(get_content(a, headers={'deviceType': '1'}))
urls = s['data']['files'][-1]['url']
size = urls_size(urls)
print_info(site_info, title, 'mp4', size)
if not info_only:
download_urls(urls, title, 'mp4', size,
output_dir=output_dir, merge=merge)
else:
raise NotImplementedError(sourceType)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
try:
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(vid)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x:
x.write(cmt)
except:
pass
def acfun_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
| nfo = "AcFun.tv"
download = acfun_download
download_playlist = playlist_not_supported('acfun')
| assert re.match(r'http://[^\.]+.acfun.[^\.]+/\D/\D\D(\d+)', url)
html = get_html(url)
title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html)
title = unescape_html(title)
title = escape_file_path(title)
assert title
videos = re.findall("data-vid=\"(\d+)\".*href=\"[^\"]+\".*title=\"([^\"]+)\"", html)
for video in videos:
p_vid = video[0]
p_title = title + " - " + video[1] if video[1] != '删除标签' else title
acfun_download_by_vid(p_vid, p_title,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs)
site_i | identifier_body |
acfun.py | #!/usr/bin/env python
__all__ = ['acfun_download']
from ..common import *
from .letv import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_iid
from .youku import youku_download_by_vid
import json, re
def get_srt_json(id):
url = 'http://danmu.aixifan.com/V2/%s' % id
return get_html(url)
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
info = json.loads(get_html('http://www.acfun.tv/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
if 'sourceId' in info: sourceId = info['sourceId']
# danmakuId = info['danmakuId']
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'letv':
letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'zhuzhan':
a = 'http://api.aixifan.com/plays/%s/realSource' % vid
s = json.loads(get_content(a, headers={'deviceType': '1'}))
urls = s['data']['files'][-1]['url']
size = urls_size(urls)
print_info(site_info, title, 'mp4', size)
if not info_only:
download_urls(urls, title, 'mp4', size,
output_dir=output_dir, merge=merge)
else:
raise NotImplementedError(sourceType)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
try:
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(vid)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x:
x.write(cmt)
except:
pass
def | (url, output_dir='.', merge=True, info_only=False, **kwargs):
assert re.match(r'http://[^\.]+.acfun.[^\.]+/\D/\D\D(\d+)', url)
html = get_html(url)
title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html)
title = unescape_html(title)
title = escape_file_path(title)
assert title
videos = re.findall("data-vid=\"(\d+)\".*href=\"[^\"]+\".*title=\"([^\"]+)\"", html)
for video in videos:
p_vid = video[0]
p_title = title + " - " + video[1] if video[1] != '删除标签' else title
acfun_download_by_vid(p_vid, p_title,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs)
site_info = "AcFun.tv"
download = acfun_download
download_playlist = playlist_not_supported('acfun')
| acfun_download | identifier_name |
one-use-in-struct.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we do not warn for named lifetimes in structs,
// even when they are only used once (since to not use a named
// lifetime is illegal!)
//
// compile-pass
#![deny(single_use_lifetimes)] | data: &'f u32
}
enum Bar<'f> {
Data(&'f u32)
}
trait Baz<'f> { }
fn main() { } | #![allow(dead_code)]
#![allow(unused_variables)]
struct Foo<'f> { | random_line_split |
one-use-in-struct.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we do not warn for named lifetimes in structs,
// even when they are only used once (since to not use a named
// lifetime is illegal!)
//
// compile-pass
#![deny(single_use_lifetimes)]
#![allow(dead_code)]
#![allow(unused_variables)]
struct Foo<'f> {
data: &'f u32
}
enum Bar<'f> {
Data(&'f u32)
}
trait Baz<'f> { }
fn | () { }
| main | identifier_name |
widgetfactory.ts | // Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
'use strict';
import {
IKernelId
} from 'jupyter-js-services';
import {
IWidgetFactory, IDocumentContext, findKernel
} from 'jupyter-js-ui/lib/docmanager';
import {
RenderMime
} from 'jupyter-js-ui/lib/rendermime';
import {
MimeData as IClipboard
} from 'phosphor-dragdrop';
import {
Widget
} from 'phosphor-widget';
import {
ToolbarItems
} from './default-toolbar'; | import {
INotebookModel
} from './model';
import {
NotebookPanel
} from './panel';
/**
* A widget factory for notebook panels.
*/
export
class NotebookWidgetFactory implements IWidgetFactory<NotebookPanel> {
/**
* Construct a new notebook widget factory.
*/
constructor(rendermime: RenderMime<Widget>, clipboard: IClipboard) {
this._rendermime = rendermime.clone();
this._clipboard = clipboard;
}
/**
* Get whether the factory has been disposed.
*/
get isDisposed(): boolean {
return this._rendermime === null;
}
/**
* Dispose of the resources used by the factory.
*/
dispose(): void {
this._rendermime = null;
this._clipboard = null;
}
/**
* Create a new widget.
*/
createNew(model: INotebookModel, context: IDocumentContext, kernel?: IKernelId): NotebookPanel {
let rendermime = this._rendermime.clone();
if (kernel) {
context.changeKernel(kernel);
} else {
let name = findKernel(model.defaultKernelName, model.defaultKernelLanguage, context.kernelspecs);
context.changeKernel({ name });
}
let panel = new NotebookPanel(model, rendermime, context, this._clipboard);
ToolbarItems.populateDefaults(panel);
return panel;
}
/**
* Take an action on a widget before closing it.
*
* @returns A promise that resolves to true if the document should close
* and false otherwise.
*/
beforeClose(model: INotebookModel, context: IDocumentContext, widget: NotebookPanel): Promise<boolean> {
// No special action required.
return Promise.resolve(true);
}
private _rendermime: RenderMime<Widget> = null;
private _clipboard: IClipboard = null;
} | random_line_split | |
widgetfactory.ts | // Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
'use strict';
import {
IKernelId
} from 'jupyter-js-services';
import {
IWidgetFactory, IDocumentContext, findKernel
} from 'jupyter-js-ui/lib/docmanager';
import {
RenderMime
} from 'jupyter-js-ui/lib/rendermime';
import {
MimeData as IClipboard
} from 'phosphor-dragdrop';
import {
Widget
} from 'phosphor-widget';
import {
ToolbarItems
} from './default-toolbar';
import {
INotebookModel
} from './model';
import {
NotebookPanel
} from './panel';
/**
* A widget factory for notebook panels.
*/
export
class NotebookWidgetFactory implements IWidgetFactory<NotebookPanel> {
/**
* Construct a new notebook widget factory.
*/
constructor(rendermime: RenderMime<Widget>, clipboard: IClipboard) {
this._rendermime = rendermime.clone();
this._clipboard = clipboard;
}
/**
* Get whether the factory has been disposed.
*/
get isDisposed(): boolean {
return this._rendermime === null;
}
/**
* Dispose of the resources used by the factory.
*/
dispose(): void |
/**
* Create a new widget.
*/
createNew(model: INotebookModel, context: IDocumentContext, kernel?: IKernelId): NotebookPanel {
let rendermime = this._rendermime.clone();
if (kernel) {
context.changeKernel(kernel);
} else {
let name = findKernel(model.defaultKernelName, model.defaultKernelLanguage, context.kernelspecs);
context.changeKernel({ name });
}
let panel = new NotebookPanel(model, rendermime, context, this._clipboard);
ToolbarItems.populateDefaults(panel);
return panel;
}
/**
* Take an action on a widget before closing it.
*
* @returns A promise that resolves to true if the document should close
* and false otherwise.
*/
beforeClose(model: INotebookModel, context: IDocumentContext, widget: NotebookPanel): Promise<boolean> {
// No special action required.
return Promise.resolve(true);
}
private _rendermime: RenderMime<Widget> = null;
private _clipboard: IClipboard = null;
}
| {
this._rendermime = null;
this._clipboard = null;
} | identifier_body |
widgetfactory.ts | // Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
'use strict';
import {
IKernelId
} from 'jupyter-js-services';
import {
IWidgetFactory, IDocumentContext, findKernel
} from 'jupyter-js-ui/lib/docmanager';
import {
RenderMime
} from 'jupyter-js-ui/lib/rendermime';
import {
MimeData as IClipboard
} from 'phosphor-dragdrop';
import {
Widget
} from 'phosphor-widget';
import {
ToolbarItems
} from './default-toolbar';
import {
INotebookModel
} from './model';
import {
NotebookPanel
} from './panel';
/**
* A widget factory for notebook panels.
*/
export
class NotebookWidgetFactory implements IWidgetFactory<NotebookPanel> {
/**
* Construct a new notebook widget factory.
*/
| (rendermime: RenderMime<Widget>, clipboard: IClipboard) {
this._rendermime = rendermime.clone();
this._clipboard = clipboard;
}
/**
* Get whether the factory has been disposed.
*/
get isDisposed(): boolean {
return this._rendermime === null;
}
/**
* Dispose of the resources used by the factory.
*/
dispose(): void {
this._rendermime = null;
this._clipboard = null;
}
/**
* Create a new widget.
*/
createNew(model: INotebookModel, context: IDocumentContext, kernel?: IKernelId): NotebookPanel {
let rendermime = this._rendermime.clone();
if (kernel) {
context.changeKernel(kernel);
} else {
let name = findKernel(model.defaultKernelName, model.defaultKernelLanguage, context.kernelspecs);
context.changeKernel({ name });
}
let panel = new NotebookPanel(model, rendermime, context, this._clipboard);
ToolbarItems.populateDefaults(panel);
return panel;
}
/**
* Take an action on a widget before closing it.
*
* @returns A promise that resolves to true if the document should close
* and false otherwise.
*/
beforeClose(model: INotebookModel, context: IDocumentContext, widget: NotebookPanel): Promise<boolean> {
// No special action required.
return Promise.resolve(true);
}
private _rendermime: RenderMime<Widget> = null;
private _clipboard: IClipboard = null;
}
| constructor | identifier_name |
widgetfactory.ts | // Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
'use strict';
import {
IKernelId
} from 'jupyter-js-services';
import {
IWidgetFactory, IDocumentContext, findKernel
} from 'jupyter-js-ui/lib/docmanager';
import {
RenderMime
} from 'jupyter-js-ui/lib/rendermime';
import {
MimeData as IClipboard
} from 'phosphor-dragdrop';
import {
Widget
} from 'phosphor-widget';
import {
ToolbarItems
} from './default-toolbar';
import {
INotebookModel
} from './model';
import {
NotebookPanel
} from './panel';
/**
* A widget factory for notebook panels.
*/
export
class NotebookWidgetFactory implements IWidgetFactory<NotebookPanel> {
/**
* Construct a new notebook widget factory.
*/
constructor(rendermime: RenderMime<Widget>, clipboard: IClipboard) {
this._rendermime = rendermime.clone();
this._clipboard = clipboard;
}
/**
* Get whether the factory has been disposed.
*/
get isDisposed(): boolean {
return this._rendermime === null;
}
/**
* Dispose of the resources used by the factory.
*/
dispose(): void {
this._rendermime = null;
this._clipboard = null;
}
/**
* Create a new widget.
*/
createNew(model: INotebookModel, context: IDocumentContext, kernel?: IKernelId): NotebookPanel {
let rendermime = this._rendermime.clone();
if (kernel) {
context.changeKernel(kernel);
} else |
let panel = new NotebookPanel(model, rendermime, context, this._clipboard);
ToolbarItems.populateDefaults(panel);
return panel;
}
/**
* Take an action on a widget before closing it.
*
* @returns A promise that resolves to true if the document should close
* and false otherwise.
*/
beforeClose(model: INotebookModel, context: IDocumentContext, widget: NotebookPanel): Promise<boolean> {
// No special action required.
return Promise.resolve(true);
}
private _rendermime: RenderMime<Widget> = null;
private _clipboard: IClipboard = null;
}
| {
let name = findKernel(model.defaultKernelName, model.defaultKernelLanguage, context.kernelspecs);
context.changeKernel({ name });
} | conditional_block |
test_pdfbase_encodings.py | from reportlab.test import unittest
from reportlab.test.utils import makeSuiteForClasses, outputfile, printLocation, NearTestCase
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators.
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
"""
found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def subsetToUnicode(ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
|
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.luxi = TTFont("Luxi", "luxiserif.ttf")
pdfmetrics.registerFont(self.luxi)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='luxi')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Luxi', 10),50.263671875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Luxi', 10),50.263671875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Luxi', 10),224.638671875)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Luxi', 10),224.638671875)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Luxi', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Luxi', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Luxi', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.luxi, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvNoEuro', 'Helvetica-Oblique', 'MacRomanEncoding'))
c.setFont('MacHelvNoEuro', 12)
c.drawString(125, 600, 'Standard MacRoman, no Euro: Character 219 = "\333"') # oct(219)=0333
# now make our hacked encoding
euroMac = pdfmetrics.Encoding('MacWithEuro', 'MacRomanEncoding')
euroMac[219] = 'Euro'
pdfmetrics.registerEncoding(euroMac)
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvWithEuro', 'Helvetica-Oblique', 'MacWithEuro'))
c.setFont('MacHelvWithEuro', 12)
c.drawString(125, 575, 'Hacked MacRoman with Euro: Character 219 = "\333"') # oct(219)=0333
# now test width setting with and without _rl_accel - harder
# make an encoding where 'm' becomes 'i'
c.setFont('Helvetica', 12)
c.drawString(100, 500, "Recode 'm' to 'i' and check we can measure widths. Boxes should surround letters.")
sample = 'Mmmmm. ' * 6 + 'Mmmm'
c.setFont('Helvetica-Oblique',12)
c.drawString(125, 475, sample)
w = c.stringWidth(sample, 'Helvetica-Oblique', 12)
c.rect(125, 475, w, 12)
narrowEnc = pdfmetrics.Encoding('m-to-i')
narrowEnc[ord('m')] = 'i'
narrowEnc[ord('M')] = 'I'
pdfmetrics.registerEncoding(narrowEnc)
pdfmetrics.registerFont(pdfmetrics.Font('narrow', 'Helvetica-Oblique', 'm-to-i'))
c.setFont('narrow', 12)
c.drawString(125, 450, sample)
w = c.stringWidth(sample, 'narrow', 12)
c.rect(125, 450, w, 12)
c.setFont('Helvetica', 12)
c.drawString(100, 400, "Symbol & Dingbats fonts - check we still get valid PDF in StandardEncoding")
c.setFont('Symbol', 12)
c.drawString(100, 375, 'abcdefghijklmn')
c.setFont('ZapfDingbats', 12)
c.drawString(300, 375, 'abcdefghijklmn')
c.save()
def makeSuite():
return makeSuiteForClasses(
TextEncodingTestCase,
#FontEncodingTestCase - nobbled for now due to old stuff which needs removing.
)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)])) | conditional_block |
test_pdfbase_encodings.py | from reportlab.test import unittest
from reportlab.test.utils import makeSuiteForClasses, outputfile, printLocation, NearTestCase
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators.
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
"""
found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def subsetToUnicode(ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)]))
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.luxi = TTFont("Luxi", "luxiserif.ttf")
pdfmetrics.registerFont(self.luxi)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='luxi')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Luxi', 10),50.263671875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Luxi', 10),50.263671875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Luxi', 10),224.638671875)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Luxi', 10),224.638671875)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Luxi', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Luxi', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Luxi', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.luxi, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvNoEuro', 'Helvetica-Oblique', 'MacRomanEncoding'))
c.setFont('MacHelvNoEuro', 12)
c.drawString(125, 600, 'Standard MacRoman, no Euro: Character 219 = "\333"') # oct(219)=0333
# now make our hacked encoding
euroMac = pdfmetrics.Encoding('MacWithEuro', 'MacRomanEncoding')
euroMac[219] = 'Euro'
pdfmetrics.registerEncoding(euroMac)
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvWithEuro', 'Helvetica-Oblique', 'MacWithEuro'))
c.setFont('MacHelvWithEuro', 12)
c.drawString(125, 575, 'Hacked MacRoman with Euro: Character 219 = "\333"') # oct(219)=0333
# now test width setting with and without _rl_accel - harder
# make an encoding where 'm' becomes 'i'
c.setFont('Helvetica', 12)
c.drawString(100, 500, "Recode 'm' to 'i' and check we can measure widths. Boxes should surround letters.")
sample = 'Mmmmm. ' * 6 + 'Mmmm'
c.setFont('Helvetica-Oblique',12)
c.drawString(125, 475, sample)
w = c.stringWidth(sample, 'Helvetica-Oblique', 12)
c.rect(125, 475, w, 12)
narrowEnc = pdfmetrics.Encoding('m-to-i')
narrowEnc[ord('m')] = 'i'
narrowEnc[ord('M')] = 'I'
pdfmetrics.registerEncoding(narrowEnc)
pdfmetrics.registerFont(pdfmetrics.Font('narrow', 'Helvetica-Oblique', 'm-to-i'))
c.setFont('narrow', 12)
c.drawString(125, 450, sample)
w = c.stringWidth(sample, 'narrow', 12)
c.rect(125, 450, w, 12)
c.setFont('Helvetica', 12)
c.drawString(100, 400, "Symbol & Dingbats fonts - check we still get valid PDF in StandardEncoding")
c.setFont('Symbol', 12)
c.drawString(100, 375, 'abcdefghijklmn')
c.setFont('ZapfDingbats', 12)
c.drawString(300, 375, 'abcdefghijklmn')
c.save()
def makeSuite():
|
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| return makeSuiteForClasses(
TextEncodingTestCase,
#FontEncodingTestCase - nobbled for now due to old stuff which needs removing.
) | identifier_body |
test_pdfbase_encodings.py | from reportlab.test import unittest
from reportlab.test.utils import makeSuiteForClasses, outputfile, printLocation, NearTestCase
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators. | found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def subsetToUnicode(ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)]))
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.luxi = TTFont("Luxi", "luxiserif.ttf")
pdfmetrics.registerFont(self.luxi)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='luxi')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Luxi', 10),50.263671875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Luxi', 10),50.263671875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Luxi', 10),224.638671875)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Luxi', 10),224.638671875)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Luxi', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Luxi', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Luxi', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.luxi, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvNoEuro', 'Helvetica-Oblique', 'MacRomanEncoding'))
c.setFont('MacHelvNoEuro', 12)
c.drawString(125, 600, 'Standard MacRoman, no Euro: Character 219 = "\333"') # oct(219)=0333
# now make our hacked encoding
euroMac = pdfmetrics.Encoding('MacWithEuro', 'MacRomanEncoding')
euroMac[219] = 'Euro'
pdfmetrics.registerEncoding(euroMac)
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvWithEuro', 'Helvetica-Oblique', 'MacWithEuro'))
c.setFont('MacHelvWithEuro', 12)
c.drawString(125, 575, 'Hacked MacRoman with Euro: Character 219 = "\333"') # oct(219)=0333
# now test width setting with and without _rl_accel - harder
# make an encoding where 'm' becomes 'i'
c.setFont('Helvetica', 12)
c.drawString(100, 500, "Recode 'm' to 'i' and check we can measure widths. Boxes should surround letters.")
sample = 'Mmmmm. ' * 6 + 'Mmmm'
c.setFont('Helvetica-Oblique',12)
c.drawString(125, 475, sample)
w = c.stringWidth(sample, 'Helvetica-Oblique', 12)
c.rect(125, 475, w, 12)
narrowEnc = pdfmetrics.Encoding('m-to-i')
narrowEnc[ord('m')] = 'i'
narrowEnc[ord('M')] = 'I'
pdfmetrics.registerEncoding(narrowEnc)
pdfmetrics.registerFont(pdfmetrics.Font('narrow', 'Helvetica-Oblique', 'm-to-i'))
c.setFont('narrow', 12)
c.drawString(125, 450, sample)
w = c.stringWidth(sample, 'narrow', 12)
c.rect(125, 450, w, 12)
c.setFont('Helvetica', 12)
c.drawString(100, 400, "Symbol & Dingbats fonts - check we still get valid PDF in StandardEncoding")
c.setFont('Symbol', 12)
c.drawString(100, 375, 'abcdefghijklmn')
c.setFont('ZapfDingbats', 12)
c.drawString(300, 375, 'abcdefghijklmn')
c.save()
def makeSuite():
return makeSuiteForClasses(
TextEncodingTestCase,
#FontEncodingTestCase - nobbled for now due to old stuff which needs removing.
)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation() |
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
""" | random_line_split |
test_pdfbase_encodings.py | from reportlab.test import unittest
from reportlab.test.utils import makeSuiteForClasses, outputfile, printLocation, NearTestCase
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators.
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
"""
found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def | (ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)]))
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.luxi = TTFont("Luxi", "luxiserif.ttf")
pdfmetrics.registerFont(self.luxi)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='luxi')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Luxi', 10),50.263671875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Luxi', 10),50.263671875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Luxi', 10),224.638671875)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Luxi', 10),224.638671875)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Luxi', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Luxi', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Luxi', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.luxi, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvNoEuro', 'Helvetica-Oblique', 'MacRomanEncoding'))
c.setFont('MacHelvNoEuro', 12)
c.drawString(125, 600, 'Standard MacRoman, no Euro: Character 219 = "\333"') # oct(219)=0333
# now make our hacked encoding
euroMac = pdfmetrics.Encoding('MacWithEuro', 'MacRomanEncoding')
euroMac[219] = 'Euro'
pdfmetrics.registerEncoding(euroMac)
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvWithEuro', 'Helvetica-Oblique', 'MacWithEuro'))
c.setFont('MacHelvWithEuro', 12)
c.drawString(125, 575, 'Hacked MacRoman with Euro: Character 219 = "\333"') # oct(219)=0333
# now test width setting with and without _rl_accel - harder
# make an encoding where 'm' becomes 'i'
c.setFont('Helvetica', 12)
c.drawString(100, 500, "Recode 'm' to 'i' and check we can measure widths. Boxes should surround letters.")
sample = 'Mmmmm. ' * 6 + 'Mmmm'
c.setFont('Helvetica-Oblique',12)
c.drawString(125, 475, sample)
w = c.stringWidth(sample, 'Helvetica-Oblique', 12)
c.rect(125, 475, w, 12)
narrowEnc = pdfmetrics.Encoding('m-to-i')
narrowEnc[ord('m')] = 'i'
narrowEnc[ord('M')] = 'I'
pdfmetrics.registerEncoding(narrowEnc)
pdfmetrics.registerFont(pdfmetrics.Font('narrow', 'Helvetica-Oblique', 'm-to-i'))
c.setFont('narrow', 12)
c.drawString(125, 450, sample)
w = c.stringWidth(sample, 'narrow', 12)
c.rect(125, 450, w, 12)
c.setFont('Helvetica', 12)
c.drawString(100, 400, "Symbol & Dingbats fonts - check we still get valid PDF in StandardEncoding")
c.setFont('Symbol', 12)
c.drawString(100, 375, 'abcdefghijklmn')
c.setFont('ZapfDingbats', 12)
c.drawString(300, 375, 'abcdefghijklmn')
c.save()
def makeSuite():
return makeSuiteForClasses(
TextEncodingTestCase,
#FontEncodingTestCase - nobbled for now due to old stuff which needs removing.
)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| subsetToUnicode | identifier_name |
user_defined.py | # Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS
# Local imports.
from mayavi.filters.filter_base import FilterBase
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `UserDefined` class.
################################################################################
class UserDefined(FilterBase):
"""
This filter lets the user define their own filter
dynamically/interactively. It is like `FilterBase` but allows a
user to specify the class without writing any code.
"""
# The version of this class. Used for persistence.
__version__ = 0
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface.
######################################################################
def __set_pure_state__(self, state):
# Create and set the filter.
children = [f for f in [self.filter] if f is not None]
handle_children_state(children, [state.filter])
self.filter = children[0]
self.update_pipeline()
# Restore our state.
super(UserDefined, self).__set_pure_state__(state)
######################################################################
# `UserDefined` interface.
######################################################################
def setup_filter(self):
|
######################################################################
# Non-public interface.
######################################################################
def _choose_filter(self):
chooser = TVTKFilterChooser()
chooser.edit_traits(kind='livemodal')
obj = chooser.object
if obj is None:
error('Invalid filter chosen! Try again!')
return obj
def _check_object(self, obj):
if obj is None:
return False
if obj.__class__.__name__ in TVTK_FILTERS:
return True
return False
def _filter_changed(self, old, new):
self.name = 'UserDefined:%s'%new.__class__.__name__
super(UserDefined, self)._filter_changed(old, new)
| """Setup the filter if none has been set or check it if it
already has been."""
obj = self.filter
if not self._check_object(obj):
if obj is not None:
cname = obj.__class__.__name__
error('Invalid filter %s chosen! Try again!'%cname)
obj = self._choose_filter()
self.filter = obj | identifier_body |
user_defined.py | # Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS
# Local imports.
from mayavi.filters.filter_base import FilterBase
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `UserDefined` class.
################################################################################
class UserDefined(FilterBase):
"""
This filter lets the user define their own filter
dynamically/interactively. It is like `FilterBase` but allows a
user to specify the class without writing any code.
"""
# The version of this class. Used for persistence.
__version__ = 0
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface.
######################################################################
def __set_pure_state__(self, state):
# Create and set the filter.
children = [f for f in [self.filter] if f is not None]
handle_children_state(children, [state.filter])
self.filter = children[0]
self.update_pipeline()
# Restore our state.
super(UserDefined, self).__set_pure_state__(state)
######################################################################
# `UserDefined` interface.
######################################################################
def setup_filter(self): | cname = obj.__class__.__name__
error('Invalid filter %s chosen! Try again!'%cname)
obj = self._choose_filter()
self.filter = obj
######################################################################
# Non-public interface.
######################################################################
def _choose_filter(self):
chooser = TVTKFilterChooser()
chooser.edit_traits(kind='livemodal')
obj = chooser.object
if obj is None:
error('Invalid filter chosen! Try again!')
return obj
def _check_object(self, obj):
if obj is None:
return False
if obj.__class__.__name__ in TVTK_FILTERS:
return True
return False
def _filter_changed(self, old, new):
self.name = 'UserDefined:%s'%new.__class__.__name__
super(UserDefined, self)._filter_changed(old, new) | """Setup the filter if none has been set or check it if it
already has been."""
obj = self.filter
if not self._check_object(obj):
if obj is not None: | random_line_split |
user_defined.py | # Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS
# Local imports.
from mayavi.filters.filter_base import FilterBase
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `UserDefined` class.
################################################################################
class UserDefined(FilterBase):
"""
This filter lets the user define their own filter
dynamically/interactively. It is like `FilterBase` but allows a
user to specify the class without writing any code.
"""
# The version of this class. Used for persistence.
__version__ = 0
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface.
######################################################################
def __set_pure_state__(self, state):
# Create and set the filter.
children = [f for f in [self.filter] if f is not None]
handle_children_state(children, [state.filter])
self.filter = children[0]
self.update_pipeline()
# Restore our state.
super(UserDefined, self).__set_pure_state__(state)
######################################################################
# `UserDefined` interface.
######################################################################
def setup_filter(self):
"""Setup the filter if none has been set or check it if it
already has been."""
obj = self.filter
if not self._check_object(obj):
if obj is not None:
cname = obj.__class__.__name__
error('Invalid filter %s chosen! Try again!'%cname)
obj = self._choose_filter()
self.filter = obj
######################################################################
# Non-public interface.
######################################################################
def | (self):
chooser = TVTKFilterChooser()
chooser.edit_traits(kind='livemodal')
obj = chooser.object
if obj is None:
error('Invalid filter chosen! Try again!')
return obj
def _check_object(self, obj):
if obj is None:
return False
if obj.__class__.__name__ in TVTK_FILTERS:
return True
return False
def _filter_changed(self, old, new):
self.name = 'UserDefined:%s'%new.__class__.__name__
super(UserDefined, self)._filter_changed(old, new)
| _choose_filter | identifier_name |
user_defined.py | # Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS
# Local imports.
from mayavi.filters.filter_base import FilterBase
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `UserDefined` class.
################################################################################
class UserDefined(FilterBase):
"""
This filter lets the user define their own filter
dynamically/interactively. It is like `FilterBase` but allows a
user to specify the class without writing any code.
"""
# The version of this class. Used for persistence.
__version__ = 0
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface.
######################################################################
def __set_pure_state__(self, state):
# Create and set the filter.
children = [f for f in [self.filter] if f is not None]
handle_children_state(children, [state.filter])
self.filter = children[0]
self.update_pipeline()
# Restore our state.
super(UserDefined, self).__set_pure_state__(state)
######################################################################
# `UserDefined` interface.
######################################################################
def setup_filter(self):
"""Setup the filter if none has been set or check it if it
already has been."""
obj = self.filter
if not self._check_object(obj):
if obj is not None:
|
obj = self._choose_filter()
self.filter = obj
######################################################################
# Non-public interface.
######################################################################
def _choose_filter(self):
chooser = TVTKFilterChooser()
chooser.edit_traits(kind='livemodal')
obj = chooser.object
if obj is None:
error('Invalid filter chosen! Try again!')
return obj
def _check_object(self, obj):
if obj is None:
return False
if obj.__class__.__name__ in TVTK_FILTERS:
return True
return False
def _filter_changed(self, old, new):
self.name = 'UserDefined:%s'%new.__class__.__name__
super(UserDefined, self)._filter_changed(old, new)
| cname = obj.__class__.__name__
error('Invalid filter %s chosen! Try again!'%cname) | conditional_block |
_hasnotematchingsubstringof.py | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale | #-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .._hasnotesubstrbase import HasNoteSubstrBase
#-------------------------------------------------------------------------
# "Media having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteMatchingSubstringOf(HasNoteSubstrBase):
"""Media having notes containing <substring>"""
name = _('Media objects having notes containing <substring>')
description = _("Matches media objects whose notes contain text "
"matching a substring") | _ = glocale.translation.gettext
| random_line_split |
_hasnotematchingsubstringof.py | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .._hasnotesubstrbase import HasNoteSubstrBase
#-------------------------------------------------------------------------
# "Media having notes that contain a substring"
#-------------------------------------------------------------------------
class | (HasNoteSubstrBase):
"""Media having notes containing <substring>"""
name = _('Media objects having notes containing <substring>')
description = _("Matches media objects whose notes contain text "
"matching a substring")
| HasNoteMatchingSubstringOf | identifier_name |
_hasnotematchingsubstringof.py | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .._hasnotesubstrbase import HasNoteSubstrBase
#-------------------------------------------------------------------------
# "Media having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteMatchingSubstringOf(HasNoteSubstrBase):
| """Media having notes containing <substring>"""
name = _('Media objects having notes containing <substring>')
description = _("Matches media objects whose notes contain text "
"matching a substring") | identifier_body | |
membership.js | import React, { Component } from 'react';
import { Menu, Dropdown, Button, Icon, message } from 'antd';
import { Link } from 'react-router';
import EmployeeTable from './table';
import { memberShip, employee, columns } from '../../accessConfig/employeeList';
import DropdownList from './../../commons/dropdown';
import { inject, observer } from 'mobx-react';
function handleButtonClick(e) {
message.info('Click on left button.');
console.log('click left button', e);
}
function handleMenuClick(e) {
message.info('Click on menu item.');
console.log('click', e);
}
const menu = (
<Menu onClick={handleMenuClick}>
<Menu.Item key="1">1st menu item</Menu.Item>
<Menu.Item key="2">2nd menu item</Menu.Item>
<Menu.Item key="3">3d menu item</Menu.Item>
</Menu>
);
// <DevTools />
@inject('store') @observer
export default class MemberShip extends Component {
constructor(props, context) {
super(props, context);
this.state = {
current: '1',
};
}
handleClick(e) {
console.log('click ', e);
this.setState({
current: e.key,
});
}
render() {
const { memberShipStore } = this.props.store; | </Button>
</Dropdown>
<Link to="/setemployee">link</Link>
<DropdownList dropdownList={memberShipStore.toJS()} mode={1} />
<Link to=""><Button type="primary">新增员工</Button></Link>
<EmployeeTable columns={columns} dataSource={employee}/>
</div>
)
}
} | return (
<div>
<Dropdown overlay={menu}>
<Button type="ghost" style={{ marginLeft: 8 }}>
Button <Icon type="down" /> | random_line_split |
membership.js | import React, { Component } from 'react';
import { Menu, Dropdown, Button, Icon, message } from 'antd';
import { Link } from 'react-router';
import EmployeeTable from './table';
import { memberShip, employee, columns } from '../../accessConfig/employeeList';
import DropdownList from './../../commons/dropdown';
import { inject, observer } from 'mobx-react';
function handleButtonClick(e) {
message.info('Click on left button.');
console.log('click left button', e);
}
function handleMenuClick(e) {
message.info('Click on menu item.');
console.log('click', e);
}
const menu = (
<Menu onClick={handleMenuClick}>
<Menu.Item key="1">1st menu item</Menu.Item>
<Menu.Item key="2">2nd menu item</Menu.Item>
<Menu.Item key="3">3d menu item</Menu.Item>
</Menu>
);
// <DevTools />
@inject('store') @observer
export default class MemberShip extends Component {
constructor(props, context) {
super(props, context);
this.state = {
current: '1',
};
}
| (e) {
console.log('click ', e);
this.setState({
current: e.key,
});
}
render() {
const { memberShipStore } = this.props.store;
return (
<div>
<Dropdown overlay={menu}>
<Button type="ghost" style={{ marginLeft: 8 }}>
Button <Icon type="down" />
</Button>
</Dropdown>
<Link to="/setemployee">link</Link>
<DropdownList dropdownList={memberShipStore.toJS()} mode={1} />
<Link to=""><Button type="primary">新增员工</Button></Link>
<EmployeeTable columns={columns} dataSource={employee}/>
</div>
)
}
} | handleClick | identifier_name |
membership.js | import React, { Component } from 'react';
import { Menu, Dropdown, Button, Icon, message } from 'antd';
import { Link } from 'react-router';
import EmployeeTable from './table';
import { memberShip, employee, columns } from '../../accessConfig/employeeList';
import DropdownList from './../../commons/dropdown';
import { inject, observer } from 'mobx-react';
function handleButtonClick(e) {
message.info('Click on left button.');
console.log('click left button', e);
}
function handleMenuClick(e) {
message.info('Click on menu item.');
console.log('click', e);
}
const menu = (
<Menu onClick={handleMenuClick}>
<Menu.Item key="1">1st menu item</Menu.Item>
<Menu.Item key="2">2nd menu item</Menu.Item>
<Menu.Item key="3">3d menu item</Menu.Item>
</Menu>
);
// <DevTools />
@inject('store') @observer
export default class MemberShip extends Component {
constructor(props, context) {
super(props, context);
this.state = {
current: '1',
};
}
handleClick(e) {
console.log('click ', e);
this.setState({
current: e.key,
});
}
render() | {
const { memberShipStore } = this.props.store;
return (
<div>
<Dropdown overlay={menu}>
<Button type="ghost" style={{ marginLeft: 8 }}>
Button <Icon type="down" />
</Button>
</Dropdown>
<Link to="/setemployee">link</Link>
<DropdownList dropdownList={memberShipStore.toJS()} mode={1} />
<Link to=""><Button type="primary">新增员工</Button></Link>
<EmployeeTable columns={columns} dataSource={employee}/>
</div>
)
}
} | identifier_body | |
ed_handler.py | # Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from contextlib import contextmanager
import glob
import os
from navitiacommon import utils, launch_exec
from navitiacommon.launch_exec import launch_exec
import psycopg2
import zipfile
import logging
ALEMBIC_PATH_ED = os.environ.get('ALEMBIC_PATH', '../sql')
ALEMBIC_PATH_CITIES = os.environ.get('ALEMBIC_PATH_CITIES', '../cities')
@contextmanager
def cd(new_dir):
"""
small helper to change the current dir
"""
prev_dir = os.getcwd()
os.chdir(os.path.expanduser(new_dir))
try:
yield
finally:
os.chdir(prev_dir)
def binarize(ed_db_params, output, ed_component_path, cities_db_params):
logger = logging.getLogger(__name__)
logger.info('creating data.nav')
ed2nav = 'ed2nav'
if ed_component_path:
ed2nav = os.path.join(ed_component_path, ed2nav)
launch_exec(
ed2nav,
[
"-o",
output,
"--connection-string",
ed_db_params.old_school_cnx_string(),
"--cities-connection-string",
cities_db_params.old_school_cnx_string(),
],
logger,
)
logger.info("data.nav is created successfully: {}".format(output))
def import_data(data_dir, db_params, ed_component_path):
"""
call the right component to import the data in the directory
we loop through all files until we recognize one on them
"""
log = logging.getLogger(__name__)
files = glob.glob(data_dir + "/*")
data_type, file_to_load = utils.type_of_data(files)
if not data_type:
log.info('unknown data type for dir {}, skipping'.format(data_dir))
return
# Note, we consider that we only have to load one kind of data per directory
import_component = data_type + '2ed'
if ed_component_path:
import_component = os.path.join(ed_component_path, import_component)
if file_to_load.endswith('.zip') or file_to_load.endswith('.geopal'):
# TODO: handle geopal as non zip
# if it's a zip, we unzip it
zip_file = zipfile.ZipFile(file_to_load)
zip_file.extractall(path=data_dir)
file_to_load = data_dir
if launch_exec(
import_component, ["-i", file_to_load, "--connection-string", db_params.old_school_cnx_string()], log
):
raise Exception('Error: problem with running {}, stoping'.format(import_component))
def load_cities(cities_file, cities_db_params, cities_exec_path):
logger = logging.getLogger(__name__)
cities_exec = os.path.join(cities_exec_path, 'cities')
if launch_exec(
cities_exec, ["-i", cities_file, "--connection-string", cities_db_params.old_school_cnx_string()], logger
):
raise Exception('Error: problem with running {}, stoping'.format(cities_exec))
def load_data(data_dirs, ed_db_params, ed_component_path): | logging.getLogger(__name__).info('loading {}'.format(data_dirs))
for d in data_dirs:
import_data(d, ed_db_params, ed_component_path)
def update_db(db_params, alembic_path):
"""
enable postgis on the db and update it's scheme
"""
cnx_string = db_params.cnx_string()
# we need to enable postgis on the db
cnx = psycopg2.connect(
database=db_params.dbname, user=db_params.user, password=db_params.password, host=db_params.host
)
c = cnx.cursor()
c.execute("create extension postgis;")
c.close()
cnx.commit()
logging.getLogger(__name__).info('message = {}'.format(c.statusmessage))
with cd(alembic_path):
res = os.system('PYTHONPATH=. alembic -x dbname="{cnx}" upgrade head'.format(cnx=cnx_string))
if res:
raise Exception('problem with db update')
def generate_nav(
data_dir, docker_ed, docker_cities, output_file, ed_component_path, cities_exec_path, import_cities
):
"""
load all data either directly in data_dir if there is no sub dir, or all data in the subdir
"""
cities_db_params = docker_cities.get_db_params()
update_db(cities_db_params, ALEMBIC_PATH_CITIES)
ed_db_params = docker_ed.get_db_params()
update_db(ed_db_params, ALEMBIC_PATH_ED)
if import_cities:
if not os.path.exists(import_cities):
raise Exception('Error: impossible to find {}, exiting'.format(import_cities))
load_cities(import_cities, cities_db_params, cities_exec_path)
if not os.path.exists(data_dir):
raise Exception('Error: impossible to find {}, exiting'.format(data_dir))
data_dirs = [
os.path.join(data_dir, sub_dir_name)
for sub_dir_name in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, sub_dir_name))
] or [
data_dir
] # if there is no sub dir, we import only the files in the dir
load_data(data_dirs, ed_db_params, ed_component_path)
binarize(ed_db_params, output_file, ed_component_path, cities_db_params) | random_line_split | |
ed_handler.py | # Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from contextlib import contextmanager
import glob
import os
from navitiacommon import utils, launch_exec
from navitiacommon.launch_exec import launch_exec
import psycopg2
import zipfile
import logging
ALEMBIC_PATH_ED = os.environ.get('ALEMBIC_PATH', '../sql')
ALEMBIC_PATH_CITIES = os.environ.get('ALEMBIC_PATH_CITIES', '../cities')
@contextmanager
def cd(new_dir):
"""
small helper to change the current dir
"""
prev_dir = os.getcwd()
os.chdir(os.path.expanduser(new_dir))
try:
yield
finally:
os.chdir(prev_dir)
def binarize(ed_db_params, output, ed_component_path, cities_db_params):
logger = logging.getLogger(__name__)
logger.info('creating data.nav')
ed2nav = 'ed2nav'
if ed_component_path:
ed2nav = os.path.join(ed_component_path, ed2nav)
launch_exec(
ed2nav,
[
"-o",
output,
"--connection-string",
ed_db_params.old_school_cnx_string(),
"--cities-connection-string",
cities_db_params.old_school_cnx_string(),
],
logger,
)
logger.info("data.nav is created successfully: {}".format(output))
def import_data(data_dir, db_params, ed_component_path):
"""
call the right component to import the data in the directory
we loop through all files until we recognize one on them
"""
log = logging.getLogger(__name__)
files = glob.glob(data_dir + "/*")
data_type, file_to_load = utils.type_of_data(files)
if not data_type:
log.info('unknown data type for dir {}, skipping'.format(data_dir))
return
# Note, we consider that we only have to load one kind of data per directory
import_component = data_type + '2ed'
if ed_component_path:
import_component = os.path.join(ed_component_path, import_component)
if file_to_load.endswith('.zip') or file_to_load.endswith('.geopal'):
# TODO: handle geopal as non zip
# if it's a zip, we unzip it
zip_file = zipfile.ZipFile(file_to_load)
zip_file.extractall(path=data_dir)
file_to_load = data_dir
if launch_exec(
import_component, ["-i", file_to_load, "--connection-string", db_params.old_school_cnx_string()], log
):
raise Exception('Error: problem with running {}, stoping'.format(import_component))
def load_cities(cities_file, cities_db_params, cities_exec_path):
logger = logging.getLogger(__name__)
cities_exec = os.path.join(cities_exec_path, 'cities')
if launch_exec(
cities_exec, ["-i", cities_file, "--connection-string", cities_db_params.old_school_cnx_string()], logger
):
raise Exception('Error: problem with running {}, stoping'.format(cities_exec))
def load_data(data_dirs, ed_db_params, ed_component_path):
logging.getLogger(__name__).info('loading {}'.format(data_dirs))
for d in data_dirs:
import_data(d, ed_db_params, ed_component_path)
def | (db_params, alembic_path):
"""
enable postgis on the db and update it's scheme
"""
cnx_string = db_params.cnx_string()
# we need to enable postgis on the db
cnx = psycopg2.connect(
database=db_params.dbname, user=db_params.user, password=db_params.password, host=db_params.host
)
c = cnx.cursor()
c.execute("create extension postgis;")
c.close()
cnx.commit()
logging.getLogger(__name__).info('message = {}'.format(c.statusmessage))
with cd(alembic_path):
res = os.system('PYTHONPATH=. alembic -x dbname="{cnx}" upgrade head'.format(cnx=cnx_string))
if res:
raise Exception('problem with db update')
def generate_nav(
data_dir, docker_ed, docker_cities, output_file, ed_component_path, cities_exec_path, import_cities
):
"""
load all data either directly in data_dir if there is no sub dir, or all data in the subdir
"""
cities_db_params = docker_cities.get_db_params()
update_db(cities_db_params, ALEMBIC_PATH_CITIES)
ed_db_params = docker_ed.get_db_params()
update_db(ed_db_params, ALEMBIC_PATH_ED)
if import_cities:
if not os.path.exists(import_cities):
raise Exception('Error: impossible to find {}, exiting'.format(import_cities))
load_cities(import_cities, cities_db_params, cities_exec_path)
if not os.path.exists(data_dir):
raise Exception('Error: impossible to find {}, exiting'.format(data_dir))
data_dirs = [
os.path.join(data_dir, sub_dir_name)
for sub_dir_name in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, sub_dir_name))
] or [
data_dir
] # if there is no sub dir, we import only the files in the dir
load_data(data_dirs, ed_db_params, ed_component_path)
binarize(ed_db_params, output_file, ed_component_path, cities_db_params)
| update_db | identifier_name |
ed_handler.py | # Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from contextlib import contextmanager
import glob
import os
from navitiacommon import utils, launch_exec
from navitiacommon.launch_exec import launch_exec
import psycopg2
import zipfile
import logging
ALEMBIC_PATH_ED = os.environ.get('ALEMBIC_PATH', '../sql')
ALEMBIC_PATH_CITIES = os.environ.get('ALEMBIC_PATH_CITIES', '../cities')
@contextmanager
def cd(new_dir):
"""
small helper to change the current dir
"""
prev_dir = os.getcwd()
os.chdir(os.path.expanduser(new_dir))
try:
yield
finally:
os.chdir(prev_dir)
def binarize(ed_db_params, output, ed_component_path, cities_db_params):
logger = logging.getLogger(__name__)
logger.info('creating data.nav')
ed2nav = 'ed2nav'
if ed_component_path:
ed2nav = os.path.join(ed_component_path, ed2nav)
launch_exec(
ed2nav,
[
"-o",
output,
"--connection-string",
ed_db_params.old_school_cnx_string(),
"--cities-connection-string",
cities_db_params.old_school_cnx_string(),
],
logger,
)
logger.info("data.nav is created successfully: {}".format(output))
def import_data(data_dir, db_params, ed_component_path):
"""
call the right component to import the data in the directory
we loop through all files until we recognize one on them
"""
log = logging.getLogger(__name__)
files = glob.glob(data_dir + "/*")
data_type, file_to_load = utils.type_of_data(files)
if not data_type:
log.info('unknown data type for dir {}, skipping'.format(data_dir))
return
# Note, we consider that we only have to load one kind of data per directory
import_component = data_type + '2ed'
if ed_component_path:
import_component = os.path.join(ed_component_path, import_component)
if file_to_load.endswith('.zip') or file_to_load.endswith('.geopal'):
# TODO: handle geopal as non zip
# if it's a zip, we unzip it
|
if launch_exec(
import_component, ["-i", file_to_load, "--connection-string", db_params.old_school_cnx_string()], log
):
raise Exception('Error: problem with running {}, stoping'.format(import_component))
def load_cities(cities_file, cities_db_params, cities_exec_path):
logger = logging.getLogger(__name__)
cities_exec = os.path.join(cities_exec_path, 'cities')
if launch_exec(
cities_exec, ["-i", cities_file, "--connection-string", cities_db_params.old_school_cnx_string()], logger
):
raise Exception('Error: problem with running {}, stoping'.format(cities_exec))
def load_data(data_dirs, ed_db_params, ed_component_path):
logging.getLogger(__name__).info('loading {}'.format(data_dirs))
for d in data_dirs:
import_data(d, ed_db_params, ed_component_path)
def update_db(db_params, alembic_path):
"""
enable postgis on the db and update it's scheme
"""
cnx_string = db_params.cnx_string()
# we need to enable postgis on the db
cnx = psycopg2.connect(
database=db_params.dbname, user=db_params.user, password=db_params.password, host=db_params.host
)
c = cnx.cursor()
c.execute("create extension postgis;")
c.close()
cnx.commit()
logging.getLogger(__name__).info('message = {}'.format(c.statusmessage))
with cd(alembic_path):
res = os.system('PYTHONPATH=. alembic -x dbname="{cnx}" upgrade head'.format(cnx=cnx_string))
if res:
raise Exception('problem with db update')
def generate_nav(
data_dir, docker_ed, docker_cities, output_file, ed_component_path, cities_exec_path, import_cities
):
"""
load all data either directly in data_dir if there is no sub dir, or all data in the subdir
"""
cities_db_params = docker_cities.get_db_params()
update_db(cities_db_params, ALEMBIC_PATH_CITIES)
ed_db_params = docker_ed.get_db_params()
update_db(ed_db_params, ALEMBIC_PATH_ED)
if import_cities:
if not os.path.exists(import_cities):
raise Exception('Error: impossible to find {}, exiting'.format(import_cities))
load_cities(import_cities, cities_db_params, cities_exec_path)
if not os.path.exists(data_dir):
raise Exception('Error: impossible to find {}, exiting'.format(data_dir))
data_dirs = [
os.path.join(data_dir, sub_dir_name)
for sub_dir_name in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, sub_dir_name))
] or [
data_dir
] # if there is no sub dir, we import only the files in the dir
load_data(data_dirs, ed_db_params, ed_component_path)
binarize(ed_db_params, output_file, ed_component_path, cities_db_params)
| zip_file = zipfile.ZipFile(file_to_load)
zip_file.extractall(path=data_dir)
file_to_load = data_dir | conditional_block |
ed_handler.py | # Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from contextlib import contextmanager
import glob
import os
from navitiacommon import utils, launch_exec
from navitiacommon.launch_exec import launch_exec
import psycopg2
import zipfile
import logging
ALEMBIC_PATH_ED = os.environ.get('ALEMBIC_PATH', '../sql')
ALEMBIC_PATH_CITIES = os.environ.get('ALEMBIC_PATH_CITIES', '../cities')
@contextmanager
def cd(new_dir):
"""
small helper to change the current dir
"""
prev_dir = os.getcwd()
os.chdir(os.path.expanduser(new_dir))
try:
yield
finally:
os.chdir(prev_dir)
def binarize(ed_db_params, output, ed_component_path, cities_db_params):
logger = logging.getLogger(__name__)
logger.info('creating data.nav')
ed2nav = 'ed2nav'
if ed_component_path:
ed2nav = os.path.join(ed_component_path, ed2nav)
launch_exec(
ed2nav,
[
"-o",
output,
"--connection-string",
ed_db_params.old_school_cnx_string(),
"--cities-connection-string",
cities_db_params.old_school_cnx_string(),
],
logger,
)
logger.info("data.nav is created successfully: {}".format(output))
def import_data(data_dir, db_params, ed_component_path):
"""
call the right component to import the data in the directory
we loop through all files until we recognize one on them
"""
log = logging.getLogger(__name__)
files = glob.glob(data_dir + "/*")
data_type, file_to_load = utils.type_of_data(files)
if not data_type:
log.info('unknown data type for dir {}, skipping'.format(data_dir))
return
# Note, we consider that we only have to load one kind of data per directory
import_component = data_type + '2ed'
if ed_component_path:
import_component = os.path.join(ed_component_path, import_component)
if file_to_load.endswith('.zip') or file_to_load.endswith('.geopal'):
# TODO: handle geopal as non zip
# if it's a zip, we unzip it
zip_file = zipfile.ZipFile(file_to_load)
zip_file.extractall(path=data_dir)
file_to_load = data_dir
if launch_exec(
import_component, ["-i", file_to_load, "--connection-string", db_params.old_school_cnx_string()], log
):
raise Exception('Error: problem with running {}, stoping'.format(import_component))
def load_cities(cities_file, cities_db_params, cities_exec_path):
|
def load_data(data_dirs, ed_db_params, ed_component_path):
logging.getLogger(__name__).info('loading {}'.format(data_dirs))
for d in data_dirs:
import_data(d, ed_db_params, ed_component_path)
def update_db(db_params, alembic_path):
"""
enable postgis on the db and update it's scheme
"""
cnx_string = db_params.cnx_string()
# we need to enable postgis on the db
cnx = psycopg2.connect(
database=db_params.dbname, user=db_params.user, password=db_params.password, host=db_params.host
)
c = cnx.cursor()
c.execute("create extension postgis;")
c.close()
cnx.commit()
logging.getLogger(__name__).info('message = {}'.format(c.statusmessage))
with cd(alembic_path):
res = os.system('PYTHONPATH=. alembic -x dbname="{cnx}" upgrade head'.format(cnx=cnx_string))
if res:
raise Exception('problem with db update')
def generate_nav(
data_dir, docker_ed, docker_cities, output_file, ed_component_path, cities_exec_path, import_cities
):
"""
load all data either directly in data_dir if there is no sub dir, or all data in the subdir
"""
cities_db_params = docker_cities.get_db_params()
update_db(cities_db_params, ALEMBIC_PATH_CITIES)
ed_db_params = docker_ed.get_db_params()
update_db(ed_db_params, ALEMBIC_PATH_ED)
if import_cities:
if not os.path.exists(import_cities):
raise Exception('Error: impossible to find {}, exiting'.format(import_cities))
load_cities(import_cities, cities_db_params, cities_exec_path)
if not os.path.exists(data_dir):
raise Exception('Error: impossible to find {}, exiting'.format(data_dir))
data_dirs = [
os.path.join(data_dir, sub_dir_name)
for sub_dir_name in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, sub_dir_name))
] or [
data_dir
] # if there is no sub dir, we import only the files in the dir
load_data(data_dirs, ed_db_params, ed_component_path)
binarize(ed_db_params, output_file, ed_component_path, cities_db_params)
| logger = logging.getLogger(__name__)
cities_exec = os.path.join(cities_exec_path, 'cities')
if launch_exec(
cities_exec, ["-i", cities_file, "--connection-string", cities_db_params.old_school_cnx_string()], logger
):
raise Exception('Error: problem with running {}, stoping'.format(cities_exec)) | identifier_body |
MasterRegistry.ts | import { Reducer, ReducersMapObject, ActionCreatorsMapObject } from "redux";
import { store } from "../store/store";
import combineReducersWithRoot from "./combineReducersWithRoot";
import SeqPartRegistry from "./SeqPartRegistry";
import SeqPart from "../seqpart/SeqPart";
import PluginRegistry from "./PluginRegistry";
import { AudioEndpoint } from "../interfaces/interfaces";
import { IPlugin, IPluginConstructor } from "../interfaces/IRegistryItems"; |
// this file has a method also called "addPlugin"
// so we'll rename it to avoid confusion.
const addPluginAction = addPlugin;
const removePluginAction = removePlugin;
/**
* Calls the appropriate sub-registries
* and adds new items to the state and reducers map
*
* This is not derived from AbstractRegistry,
* it's more like an orchestrator class.
*/
export default class MasterRegistry {
private plugins: PluginRegistry;
private seqParts: SeqPartRegistry;
public constructor(private ac: AudioContext) {
this.plugins = new PluginRegistry(ac);
this.seqParts = new SeqPartRegistry();
}
public addPlugin(pluginClass: IPluginConstructor): string {
const newPlugin: IPlugin = this.plugins.add(pluginClass);
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPluginAction(newPlugin.uid));
// make the new item observe the store
newPlugin.unsubscribe = this.plugins.observeStore(store, newPlugin);
return newPlugin.uid;
}
public removePlugin(itemId: string): void {
this.plugins.remove(itemId);
this.replaceReducer();
store.dispatch(removePluginAction(itemId));
}
public addSeqPart(lengthInStepsPerBar?: number): string {
let newPart: SeqPart;
// add new item to the seqPart Registry
if (lengthInStepsPerBar) {
newPart = this.seqParts.add(lengthInStepsPerBar);
} else {
newPart = this.seqParts.add();
}
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPart(newPart.uid));
// make the new item observe the store
newPart.unsubscribe = this.seqParts.observeStore(store, newPart);
return newPart.uid;
}
public removeSeqPart(itemId: string): void {
this.seqParts.remove(itemId);
this.replaceReducer();
store.dispatch(removePart(itemId));
}
public getActionCreators(itemId: string, bound?: string): ActionCreatorsMapObject {
const pluginKeys = this.plugins.getUidList();
const seqPartKeys = this.seqParts.getUidList();
const actionCreatorsType = bound === "unbound" ? "unboundActionCreators" : "actionCreators";
let actionCreators: ActionCreatorsMapObject = {};
if (pluginKeys.includes(itemId)) {
const item = this.plugins.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
} else if (seqPartKeys.includes(itemId)) {
const item = this.seqParts.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
}
return actionCreators;
}
/**
* Connects two audio endpoints and dispatches the new state.
* If the id of the input plugin is not valid, it connects to the soundcard input.
* If the id of the output plugin is not valid, it cancels the operation.
* @param connection Audio endpoints to be connected
*/
public connectAudioNodes(connection: [AudioEndpoint, AudioEndpoint]): void {
const output = connection[0];
const input = connection[1];
const pluginOut = this.plugins.itemList.get(output[0]);
const pluginIn = this.plugins.itemList.get(input[0]);
if (typeof pluginOut === "undefined") {
return;
}
const audioNodeOut = pluginOut.outputs[output[1]];
const audioNodeIn = pluginIn ? pluginIn.inputs[input[1]] : this.ac.destination;
if (audioNodeIn.numberOfOutputs === 0) {
input[0] = "destination"; // enshure that no wrong id will be dispatched
}
audioNodeOut.disconnect();
audioNodeOut.connect(audioNodeIn);
store.dispatch(connectAudioNodes(connection));
}
/**
* Combines all sub reducers with the root reducer
* and replaces the current reducer
*/
private replaceReducer(): void {
const pluginReducers: ReducersMapObject = this.plugins.getAllSubReducers();
const seqPartReducers: ReducersMapObject = this.seqParts.getAllSubReducers();
const subReducers: ReducersMapObject = this.getSubReducer(pluginReducers, seqPartReducers);
const reducerTree: Reducer = this.getCompleteReducer(rootReducer, subReducers);
store.replaceReducer(reducerTree);
}
private getSubReducer(...subReducers: ReducersMapObject[]): ReducersMapObject {
return Object.assign({}, ...subReducers);
}
private getCompleteReducer(rootReducer: Reducer, subReducers: ReducersMapObject): Reducer {
return combineReducersWithRoot(subReducers, rootReducer);
}
} | import rootReducer from "../store/rootReducer";
import { addPlugin, addPart, removePlugin, removePart, connectAudioNodes } from "../store/rootActions"; | random_line_split |
MasterRegistry.ts | import { Reducer, ReducersMapObject, ActionCreatorsMapObject } from "redux";
import { store } from "../store/store";
import combineReducersWithRoot from "./combineReducersWithRoot";
import SeqPartRegistry from "./SeqPartRegistry";
import SeqPart from "../seqpart/SeqPart";
import PluginRegistry from "./PluginRegistry";
import { AudioEndpoint } from "../interfaces/interfaces";
import { IPlugin, IPluginConstructor } from "../interfaces/IRegistryItems";
import rootReducer from "../store/rootReducer";
import { addPlugin, addPart, removePlugin, removePart, connectAudioNodes } from "../store/rootActions";
// this file has a method also called "addPlugin"
// so we'll rename it to avoid confusion.
const addPluginAction = addPlugin;
const removePluginAction = removePlugin;
/**
* Calls the appropriate sub-registries
* and adds new items to the state and reducers map
*
* This is not derived from AbstractRegistry,
* it's more like an orchestrator class.
*/
export default class MasterRegistry {
private plugins: PluginRegistry;
private seqParts: SeqPartRegistry;
public constructor(private ac: AudioContext) {
this.plugins = new PluginRegistry(ac);
this.seqParts = new SeqPartRegistry();
}
public addPlugin(pluginClass: IPluginConstructor): string {
const newPlugin: IPlugin = this.plugins.add(pluginClass);
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPluginAction(newPlugin.uid));
// make the new item observe the store
newPlugin.unsubscribe = this.plugins.observeStore(store, newPlugin);
return newPlugin.uid;
}
public removePlugin(itemId: string): void {
this.plugins.remove(itemId);
this.replaceReducer();
store.dispatch(removePluginAction(itemId));
}
public addSeqPart(lengthInStepsPerBar?: number): string {
let newPart: SeqPart;
// add new item to the seqPart Registry
if (lengthInStepsPerBar) {
newPart = this.seqParts.add(lengthInStepsPerBar);
} else {
newPart = this.seqParts.add();
}
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPart(newPart.uid));
// make the new item observe the store
newPart.unsubscribe = this.seqParts.observeStore(store, newPart);
return newPart.uid;
}
public removeSeqPart(itemId: string): void {
this.seqParts.remove(itemId);
this.replaceReducer();
store.dispatch(removePart(itemId));
}
public getActionCreators(itemId: string, bound?: string): ActionCreatorsMapObject {
const pluginKeys = this.plugins.getUidList();
const seqPartKeys = this.seqParts.getUidList();
const actionCreatorsType = bound === "unbound" ? "unboundActionCreators" : "actionCreators";
let actionCreators: ActionCreatorsMapObject = {};
if (pluginKeys.includes(itemId)) {
const item = this.plugins.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
} else if (seqPartKeys.includes(itemId)) {
const item = this.seqParts.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
}
return actionCreators;
}
/**
* Connects two audio endpoints and dispatches the new state.
* If the id of the input plugin is not valid, it connects to the soundcard input.
* If the id of the output plugin is not valid, it cancels the operation.
* @param connection Audio endpoints to be connected
*/
public connectAudioNodes(connection: [AudioEndpoint, AudioEndpoint]): void {
const output = connection[0];
const input = connection[1];
const pluginOut = this.plugins.itemList.get(output[0]);
const pluginIn = this.plugins.itemList.get(input[0]);
if (typeof pluginOut === "undefined") {
return;
}
const audioNodeOut = pluginOut.outputs[output[1]];
const audioNodeIn = pluginIn ? pluginIn.inputs[input[1]] : this.ac.destination;
if (audioNodeIn.numberOfOutputs === 0) {
input[0] = "destination"; // enshure that no wrong id will be dispatched
}
audioNodeOut.disconnect();
audioNodeOut.connect(audioNodeIn);
store.dispatch(connectAudioNodes(connection));
}
/**
* Combines all sub reducers with the root reducer
* and replaces the current reducer
*/
private | (): void {
const pluginReducers: ReducersMapObject = this.plugins.getAllSubReducers();
const seqPartReducers: ReducersMapObject = this.seqParts.getAllSubReducers();
const subReducers: ReducersMapObject = this.getSubReducer(pluginReducers, seqPartReducers);
const reducerTree: Reducer = this.getCompleteReducer(rootReducer, subReducers);
store.replaceReducer(reducerTree);
}
private getSubReducer(...subReducers: ReducersMapObject[]): ReducersMapObject {
return Object.assign({}, ...subReducers);
}
private getCompleteReducer(rootReducer: Reducer, subReducers: ReducersMapObject): Reducer {
return combineReducersWithRoot(subReducers, rootReducer);
}
}
| replaceReducer | identifier_name |
MasterRegistry.ts | import { Reducer, ReducersMapObject, ActionCreatorsMapObject } from "redux";
import { store } from "../store/store";
import combineReducersWithRoot from "./combineReducersWithRoot";
import SeqPartRegistry from "./SeqPartRegistry";
import SeqPart from "../seqpart/SeqPart";
import PluginRegistry from "./PluginRegistry";
import { AudioEndpoint } from "../interfaces/interfaces";
import { IPlugin, IPluginConstructor } from "../interfaces/IRegistryItems";
import rootReducer from "../store/rootReducer";
import { addPlugin, addPart, removePlugin, removePart, connectAudioNodes } from "../store/rootActions";
// this file has a method also called "addPlugin"
// so we'll rename it to avoid confusion.
const addPluginAction = addPlugin;
const removePluginAction = removePlugin;
/**
* Calls the appropriate sub-registries
* and adds new items to the state and reducers map
*
* This is not derived from AbstractRegistry,
* it's more like an orchestrator class.
*/
export default class MasterRegistry {
private plugins: PluginRegistry;
private seqParts: SeqPartRegistry;
public constructor(private ac: AudioContext) {
this.plugins = new PluginRegistry(ac);
this.seqParts = new SeqPartRegistry();
}
public addPlugin(pluginClass: IPluginConstructor): string {
const newPlugin: IPlugin = this.plugins.add(pluginClass);
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPluginAction(newPlugin.uid));
// make the new item observe the store
newPlugin.unsubscribe = this.plugins.observeStore(store, newPlugin);
return newPlugin.uid;
}
public removePlugin(itemId: string): void {
this.plugins.remove(itemId);
this.replaceReducer();
store.dispatch(removePluginAction(itemId));
}
public addSeqPart(lengthInStepsPerBar?: number): string {
let newPart: SeqPart;
// add new item to the seqPart Registry
if (lengthInStepsPerBar) {
newPart = this.seqParts.add(lengthInStepsPerBar);
} else {
newPart = this.seqParts.add();
}
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPart(newPart.uid));
// make the new item observe the store
newPart.unsubscribe = this.seqParts.observeStore(store, newPart);
return newPart.uid;
}
public removeSeqPart(itemId: string): void {
this.seqParts.remove(itemId);
this.replaceReducer();
store.dispatch(removePart(itemId));
}
public getActionCreators(itemId: string, bound?: string): ActionCreatorsMapObject {
const pluginKeys = this.plugins.getUidList();
const seqPartKeys = this.seqParts.getUidList();
const actionCreatorsType = bound === "unbound" ? "unboundActionCreators" : "actionCreators";
let actionCreators: ActionCreatorsMapObject = {};
if (pluginKeys.includes(itemId)) {
const item = this.plugins.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
} else if (seqPartKeys.includes(itemId)) {
const item = this.seqParts.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
}
return actionCreators;
}
/**
* Connects two audio endpoints and dispatches the new state.
* If the id of the input plugin is not valid, it connects to the soundcard input.
* If the id of the output plugin is not valid, it cancels the operation.
* @param connection Audio endpoints to be connected
*/
public connectAudioNodes(connection: [AudioEndpoint, AudioEndpoint]): void {
const output = connection[0];
const input = connection[1];
const pluginOut = this.plugins.itemList.get(output[0]);
const pluginIn = this.plugins.itemList.get(input[0]);
if (typeof pluginOut === "undefined") {
return;
}
const audioNodeOut = pluginOut.outputs[output[1]];
const audioNodeIn = pluginIn ? pluginIn.inputs[input[1]] : this.ac.destination;
if (audioNodeIn.numberOfOutputs === 0) {
input[0] = "destination"; // enshure that no wrong id will be dispatched
}
audioNodeOut.disconnect();
audioNodeOut.connect(audioNodeIn);
store.dispatch(connectAudioNodes(connection));
}
/**
* Combines all sub reducers with the root reducer
* and replaces the current reducer
*/
private replaceReducer(): void {
const pluginReducers: ReducersMapObject = this.plugins.getAllSubReducers();
const seqPartReducers: ReducersMapObject = this.seqParts.getAllSubReducers();
const subReducers: ReducersMapObject = this.getSubReducer(pluginReducers, seqPartReducers);
const reducerTree: Reducer = this.getCompleteReducer(rootReducer, subReducers);
store.replaceReducer(reducerTree);
}
private getSubReducer(...subReducers: ReducersMapObject[]): ReducersMapObject {
return Object.assign({}, ...subReducers);
}
private getCompleteReducer(rootReducer: Reducer, subReducers: ReducersMapObject): Reducer |
}
| {
return combineReducersWithRoot(subReducers, rootReducer);
} | identifier_body |
MasterRegistry.ts | import { Reducer, ReducersMapObject, ActionCreatorsMapObject } from "redux";
import { store } from "../store/store";
import combineReducersWithRoot from "./combineReducersWithRoot";
import SeqPartRegistry from "./SeqPartRegistry";
import SeqPart from "../seqpart/SeqPart";
import PluginRegistry from "./PluginRegistry";
import { AudioEndpoint } from "../interfaces/interfaces";
import { IPlugin, IPluginConstructor } from "../interfaces/IRegistryItems";
import rootReducer from "../store/rootReducer";
import { addPlugin, addPart, removePlugin, removePart, connectAudioNodes } from "../store/rootActions";
// this file has a method also called "addPlugin"
// so we'll rename it to avoid confusion.
const addPluginAction = addPlugin;
const removePluginAction = removePlugin;
/**
* Calls the appropriate sub-registries
* and adds new items to the state and reducers map
*
* This is not derived from AbstractRegistry,
* it's more like an orchestrator class.
*/
export default class MasterRegistry {
private plugins: PluginRegistry;
private seqParts: SeqPartRegistry;
public constructor(private ac: AudioContext) {
this.plugins = new PluginRegistry(ac);
this.seqParts = new SeqPartRegistry();
}
public addPlugin(pluginClass: IPluginConstructor): string {
const newPlugin: IPlugin = this.plugins.add(pluginClass);
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPluginAction(newPlugin.uid));
// make the new item observe the store
newPlugin.unsubscribe = this.plugins.observeStore(store, newPlugin);
return newPlugin.uid;
}
public removePlugin(itemId: string): void {
this.plugins.remove(itemId);
this.replaceReducer();
store.dispatch(removePluginAction(itemId));
}
public addSeqPart(lengthInStepsPerBar?: number): string {
let newPart: SeqPart;
// add new item to the seqPart Registry
if (lengthInStepsPerBar) | else {
newPart = this.seqParts.add();
}
// build a new root reducer and replace the current one
this.replaceReducer();
store.dispatch(addPart(newPart.uid));
// make the new item observe the store
newPart.unsubscribe = this.seqParts.observeStore(store, newPart);
return newPart.uid;
}
public removeSeqPart(itemId: string): void {
this.seqParts.remove(itemId);
this.replaceReducer();
store.dispatch(removePart(itemId));
}
public getActionCreators(itemId: string, bound?: string): ActionCreatorsMapObject {
const pluginKeys = this.plugins.getUidList();
const seqPartKeys = this.seqParts.getUidList();
const actionCreatorsType = bound === "unbound" ? "unboundActionCreators" : "actionCreators";
let actionCreators: ActionCreatorsMapObject = {};
if (pluginKeys.includes(itemId)) {
const item = this.plugins.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
} else if (seqPartKeys.includes(itemId)) {
const item = this.seqParts.itemList.get(itemId);
if (item) {
actionCreators = Object.assign({}, item[actionCreatorsType]);
}
}
return actionCreators;
}
/**
* Connects two audio endpoints and dispatches the new state.
* If the id of the input plugin is not valid, it connects to the soundcard input.
* If the id of the output plugin is not valid, it cancels the operation.
* @param connection Audio endpoints to be connected
*/
public connectAudioNodes(connection: [AudioEndpoint, AudioEndpoint]): void {
const output = connection[0];
const input = connection[1];
const pluginOut = this.plugins.itemList.get(output[0]);
const pluginIn = this.plugins.itemList.get(input[0]);
if (typeof pluginOut === "undefined") {
return;
}
const audioNodeOut = pluginOut.outputs[output[1]];
const audioNodeIn = pluginIn ? pluginIn.inputs[input[1]] : this.ac.destination;
if (audioNodeIn.numberOfOutputs === 0) {
input[0] = "destination"; // enshure that no wrong id will be dispatched
}
audioNodeOut.disconnect();
audioNodeOut.connect(audioNodeIn);
store.dispatch(connectAudioNodes(connection));
}
/**
* Combines all sub reducers with the root reducer
* and replaces the current reducer
*/
private replaceReducer(): void {
const pluginReducers: ReducersMapObject = this.plugins.getAllSubReducers();
const seqPartReducers: ReducersMapObject = this.seqParts.getAllSubReducers();
const subReducers: ReducersMapObject = this.getSubReducer(pluginReducers, seqPartReducers);
const reducerTree: Reducer = this.getCompleteReducer(rootReducer, subReducers);
store.replaceReducer(reducerTree);
}
private getSubReducer(...subReducers: ReducersMapObject[]): ReducersMapObject {
return Object.assign({}, ...subReducers);
}
private getCompleteReducer(rootReducer: Reducer, subReducers: ReducersMapObject): Reducer {
return combineReducersWithRoot(subReducers, rootReducer);
}
}
| {
newPart = this.seqParts.add(lengthInStepsPerBar);
} | conditional_block |
code_input.ts | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/cr_elements/cr_input/cr_input.m.js';
import {CrInputElement} from 'chrome://resources/cr_elements/cr_input/cr_input.m.js';
import {html, PolymerElement} from 'chrome://resources/polymer/v3_0/polymer/polymer_bundled.min.js';
export interface CodeInputElement {
$: {
accessCodeInput: CrInputElement;
}
}
export class CodeInputElement extends PolymerElement {
static get is() {
return 'c2c-code-input';
}
static get template() {
return html`{__html_template__}`;
}
static get properties() {
return {
length: Number,
value: {
type: String,
value: '',
}
};
}
get crInput() {
return this.$.accessCodeInput;
}
value: string;
ready() |
clearInput() {
this.$.accessCodeInput.value = '';
}
focusInput() {
this.$.accessCodeInput.focusInput();
}
private handleInput() {
this.$.accessCodeInput.value = this.$.accessCodeInput.value.toUpperCase();
this.dispatchEvent(new CustomEvent('access-code-input', {
detail: {value: this.$.accessCodeInput.value}
}));
}
}
customElements.define(CodeInputElement.is, CodeInputElement); | {
super.ready();
this.$.accessCodeInput.addEventListener('input', () => {
this.handleInput();
});
} | identifier_body |
code_input.ts | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/cr_elements/cr_input/cr_input.m.js';
import {CrInputElement} from 'chrome://resources/cr_elements/cr_input/cr_input.m.js';
import {html, PolymerElement} from 'chrome://resources/polymer/v3_0/polymer/polymer_bundled.min.js';
export interface CodeInputElement {
$: {
accessCodeInput: CrInputElement;
}
}
export class CodeInputElement extends PolymerElement {
static get | () {
return 'c2c-code-input';
}
static get template() {
return html`{__html_template__}`;
}
static get properties() {
return {
length: Number,
value: {
type: String,
value: '',
}
};
}
get crInput() {
return this.$.accessCodeInput;
}
value: string;
ready() {
super.ready();
this.$.accessCodeInput.addEventListener('input', () => {
this.handleInput();
});
}
clearInput() {
this.$.accessCodeInput.value = '';
}
focusInput() {
this.$.accessCodeInput.focusInput();
}
private handleInput() {
this.$.accessCodeInput.value = this.$.accessCodeInput.value.toUpperCase();
this.dispatchEvent(new CustomEvent('access-code-input', {
detail: {value: this.$.accessCodeInput.value}
}));
}
}
customElements.define(CodeInputElement.is, CodeInputElement); | is | identifier_name |
code_input.ts | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/cr_elements/cr_input/cr_input.m.js';
import {CrInputElement} from 'chrome://resources/cr_elements/cr_input/cr_input.m.js';
import {html, PolymerElement} from 'chrome://resources/polymer/v3_0/polymer/polymer_bundled.min.js';
export interface CodeInputElement {
$: {
accessCodeInput: CrInputElement;
}
}
export class CodeInputElement extends PolymerElement {
static get is() {
return 'c2c-code-input';
}
static get template() {
return html`{__html_template__}`;
}
static get properties() {
return {
length: Number,
value: {
type: String,
value: '',
}
};
}
get crInput() {
return this.$.accessCodeInput;
}
value: string;
ready() {
super.ready();
this.$.accessCodeInput.addEventListener('input', () => {
this.handleInput();
});
}
clearInput() {
this.$.accessCodeInput.value = '';
} | private handleInput() {
this.$.accessCodeInput.value = this.$.accessCodeInput.value.toUpperCase();
this.dispatchEvent(new CustomEvent('access-code-input', {
detail: {value: this.$.accessCodeInput.value}
}));
}
}
customElements.define(CodeInputElement.is, CodeInputElement); |
focusInput() {
this.$.accessCodeInput.focusInput();
}
| random_line_split |
gc_engine.py | # -*- encoding: UTF-8 -*-
import re
import sys
import os
import traceback
from ..ibdawg import IBDAWG
from ..echo import echo
from . import gc_options
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getDictionary", \
"setOptions", "getOptions", "getOptionsLabels", "resetOptions", \
"ignoreRule", "resetIgnoreRules" ]
__version__ = u"${version}"
lang = u"${lang}"
locales = ${loc}
pkg = u"${implname}"
name = u"${name}"
version = u"${version}"
author = u"${author}"
# commons regexes
_zEndOfSentence = re.compile(u'([.?!:;…][ .?!… »”")]*|.$)')
_zBeginOfParagraph = re.compile(u"^\W*")
_zEndOfParagraph = re.compile(u"\W*$")
_zNextWord = re.compile(u" +(\w[\w-]*)")
_zPrevWord = re.compile(u"(\w[\w-]*) +$")
# grammar rules and dictionary
_rules = None
_dOptions = dict(gc_options.dOpt) # duplication necessary, to be able to reset to default
_aIgnoredRules = set()
_oDict = None
_dAnalyses = {} # cache for data from dictionary
_GLOBALS = globals()
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None):
"analyses the paragraph sText and returns list of errors"
aErrors = None
sAlt = sText
dDA = {}
dOpt = _dOptions if not dOptions else dOptions
# parse paragraph
try:
sNew, aErrors = _proofread(sText, sAlt, 0, True, dDA, sCountry, dOpt, bDebug)
if sNew:
sText = sNew
except:
raise
# parse sentences
for iStart, iEnd in _getSentenceBoundaries(sText):
if 4 < (iEnd - iStart) < 2000:
dDA.clear()
try:
_, errs = _proofread(sText[iStart:iEnd], sAlt[iStart:iEnd], iStart, False, dDA, sCountry, dOpt, bDebug)
aErrors.extend(errs)
except:
raise
return aErrors
def _getSentenceBoundaries (sText):
iStart = _zBeginOfParagraph.match(sText).end()
for m in _zEndOfSentence.finditer(sText):
yield (iStart, m.end())
iStart = m.end()
def _proofread (s, sx, nOffset, bParagraph, dDA, sCountry, dOptions, bDebug):
aErrs = []
bChange = False
if not bParagraph:
# after the first pass, we modify automatically some characters
if u" " in s:
s = s.replace(u" ", u' ') # nbsp
bChange = True
if u" " in s:
s = s.replace(u" ", u' ') # nnbsp
bChange = True
if u"@" in s:
s = s.replace(u"@", u' ')
bChange = True
if u"'" in s:
s = s.replace(u"'", u"’")
bChange = True
if u"‑" in s:
s = s.replace(u"‑", u"-") # nobreakdash
bChange = True
bIdRule = option('idrule')
for sOption, lRuleGroup in _getRules(bParagraph):
if not sOption or dOptions.get(sOption, False):
for zRegex, bUppercase, sRuleId, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(s):
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
if not sFuncCond or _GLOBALS[sFuncCond](s, sx, m, dDA, sCountry):
if cActionType == "-":
# grammar error
# (text, replacement, nOffset, m, iGroup, sId, bUppercase, sURL, bIdRule)
aErrs.append(_createError(s, sWhat, nOffset, m, eAct[0], sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption))
elif cActionType == "~":
# text processor
s = _rewrite(s, sWhat, eAct[0], m, bUppercase)
bChange = True
if bDebug:
echo(u"~ " + s + " -- " + m.group(eAct[0]) + " # " + sRuleId)
elif cActionType == "=":
# disambiguation
_GLOBALS[sWhat](s, m, dDA)
if bDebug:
echo(u"= " + m.group(0) + " # " + sRuleId + "\nDA: " + str(dDA))
else:
echo("# error: unknown action at " + sRuleId)
except Exception as e:
raise Exception(str(e), sRuleId)
if bChange:
return (s, aErrs)
return (False, aErrs)
def _createWriterError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error for Writer (LO/OO)"
xErr = SingleProofreadingError()
#xErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
xErr.nErrorStart = nOffset + m.start(iGroup)
xErr.nErrorLength = m.end(iGroup) - m.start(iGroup)
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sId
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, sugg.split("|")))
else:
xErr.aSuggestions = tuple(sugg.split("|"))
else:
xErr.aSuggestions = ()
elif sRepl == "_":
xErr.aSuggestions = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, m.expand(sRepl).split("|")))
else:
xErr.aSuggestions = tuple(m.expand(sRepl).split("|"))
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
if bIdRule:
xErr.aShortComment += " # " + sId
# URL
if sURL:
p = PropertyValue()
p.Name = "FullCommentURL"
p.Value = sURL
xErr.aProperties = (p,)
else:
xErr.aProperties = ()
return xErr
def _createDictError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error as a dictionary"
dErr = {}
dErr["nStart"] = nOffset + m.start(iGroup)
dErr["nEnd"] = nOffset + m.end(iGroup)
dErr["sRuleId"] = sId
dErr["sType"] = sOption if sOption else "notype"
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, sugg.split("|")))
else:
dErr["aSuggestions"] = sugg.split("|")
else:
dErr["aSuggestions"] = ()
elif sRepl == "_":
dErr["aSuggestions"] = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, m.expand(sRepl).split("|")))
else:
dErr["aSuggestions"] = m.expand(sRepl).split("|")
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
dErr["sMessage"] = sMessage
if bIdRule:
dErr["sMessage"] += " # " + sId
# URL
dErr["URL"] = sURL if sURL else ""
return dErr
def _rewrite (s, sRepl, iGroup, m, bUppercase):
"text processor: write sRepl in s at iGroup position"
ln = m.end(iGroup) - m.start(iGroup)
if sRepl == "*":
sNew = " " * ln
elif sRepl == ">" or sRepl == "_" or sRepl == u"~":
sNew = sRepl + " " * (ln-1)
elif sRepl == "@":
sNew = "@" * ln
elif sRepl[0:1] == "=":
if sRepl[1:2] != "@":
sNew = _GLOBALS[sRepl[1:]](s, m)
sNew = sNew + " " * (ln-len(sNew))
else:
sNew = _GLOBALS[sRepl[2:]](s, m)
sNew = sNew + "@" * (ln-len(sNew))
if bUppercase and m.group(iGroup)[0:1].isupper():
sNew = sNew.capitalize()
else:
sNew = m.expand(sRepl)
sNew = sNew + " " * (ln-len(sNew))
return s[0:m.start(iGroup)] + sNew + s[m.end(iGroup):]
def ignoreRule (sId):
_aIgnoredRules.add(sId)
def resetIgnoreRules ():
_aIgnoredRules.clear()
#### init
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_createError = _createWriterError
except ImportError:
_createError = _createDictError
def load ():
global _oDict
try:
_oDict = IBDAWG("${binary_dic}")
except:
traceback.print_exc()
def setOptions (dOpt):
_dOptions.update(dOpt)
def getOptions ():
return _dOptions
def getOptionsLabels (sLang):
return gc_options.getUI(sLang)
def resetOptions ():
global _dOptions
_dOptions = dict(gc_options.dOpt)
def getDictionary ():
return _oDict
def _getRules (bParagraph):
try:
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
except:
_loadRules()
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
def _loadRules2 ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rule in chain(_rules.lParagraphRules, _rules.lSentenceRules):
try:
rule[1] = re.compile(rule[1])
except:
echo("Bad regular expression in # " + str(rule[3]))
rule[1] = "(?i)<Grammalecte>"
def _loadRules ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rulegroup in chain(_rules.lParagraphRules, _rules.lSentenceRules):
for rule in rulegroup[1]:
try:
rule[0] = re.compile(rule[0])
except:
echo("Bad regular expression in # " + str(rule[2]))
rule[0] = "(?i)<Grammalecte>"
def _getPath ():
return os.path.join(os.path.dirname(sys.modules[__name__].__file__), __name__ + ".py")
#### common functions
def option (sOpt):
"return True if option sOpt is active"
return _dOptions.get(sOpt, False)
def displayInfo (dDA, tWord):
"for debugging: retrieve info of word"
if not tWord:
echo("> nothing to find")
return True
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
echo("> not in FSA")
return True
if tWord[0] in dDA:
echo("DA: " + str(dDA[tWord[0]]))
echo("FSA: " + str(_dAnalyses[tWord[1]]))
return True
def _storeMorphFromFSA (sWord):
"retrieves morphologies list from _oDict -> _dAnalyses"
global _dAnalyses
_dAnalyses[sWord] = _oDict.getMorph(sWord)
return True if _dAnalyses[sWord] else False
def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
"analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
if not lMorph:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in lMorph)
return any(p.search(s) for s in lMorph)
def morphex (dDA, tWord, sPattern, sNegPattern, bNoWord=False):
"analyse a tuple (p | rd, sPattern, bStrict=True):
"analyse a word, return True if sPattern in morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
if not _dAnalyses[sWord]:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in _dAnalyses[sWord])
return any(p.search(s) for s in _dAnalyses[sWord])
def analysex (sWord, sPattern, sNegPattern):
"analyse a word, returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in _dAnalyses[sWord]):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in _dAnalyses[sWord])
def stem (sWord):
"returns a list of sWord's stems"
if not sWord:
return []
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return []
return [ s[1:s.find(" ")] for s in _dAnalyses[sWord] ]
## functions to get text outside pattern scope
# warning: check compile_rules.py to understand how it works
def nextword (s, iStart, n):
"get the nth word of the input string or empty string"
m = re.match(u"( +[\\w%-]+){" + str(n-1) + u"} +([\\w%-]+)", s[iStart:])
if not m:
return None
return (iStart+m.start(2), m.group(2))
def prevword (s, iEnd, n):
"get the (-)nth word of the input string or empty string"
m = re.search(u"([\\w%-]+) +([\\w%-]+ +){" + str(n-1) + u"}$", s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def nextword1 (s, iStart):
"get next word (optimization)"
m = _zNextWord.match(s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword1 (s, iEnd):
"get previous word (optimization)"
m = _zPrevWord.search(s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def look (s, sPattern, sNegPattern=None):
"seek sPattern in s (before/after/fulltext), if sNegPattern not in s"
if sNegPattern and re.search(sNegPattern, s):
return False
if re.search(sPattern, s):
return True
return False
def look_chk1 (dDA, s, nOffset, sPattern, sPatternGroup1, sNegPatternGroup1=None):
"returns True if s has pattern sPattern and m.group(1) has pattern sPatternGroup1"
m = re.search(sPattern, s)
if not m:
return False
try:
sWord = m.group(1)
nPos = m.start(1) + nOffset
except:
#print("Missing group 1")
return False
if sNegPatternGroup1:
return morphex(dDA, (nPos, sWord), sPatternGroup1, sNegPatternGroup1)
return morph(dDA, (nPos, sWord), sPatternGroup1, False)
#### Disambiguator
def select (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def exclude (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def define (dDA, nPos, lMorph):
dDA[nPos] = lMorph
#echo("= "+str(nPos)+" "+str(dDA[nPos]))
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
${generated}
| osition, word), returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in lMorph):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in lMorph)
def analyse (sWo | identifier_body |
gc_engine.py | # -*- encoding: UTF-8 -*-
import re
import sys
import os
import traceback
from ..ibdawg import IBDAWG
from ..echo import echo
from . import gc_options
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getDictionary", \
"setOptions", "getOptions", "getOptionsLabels", "resetOptions", \
"ignoreRule", "resetIgnoreRules" ]
__version__ = u"${version}"
lang = u"${lang}"
locales = ${loc}
pkg = u"${implname}"
name = u"${name}"
version = u"${version}"
author = u"${author}"
# commons regexes
_zEndOfSentence = re.compile(u'([.?!:;…][ .?!… »”")]*|.$)')
_zBeginOfParagraph = re.compile(u"^\W*")
_zEndOfParagraph = re.compile(u"\W*$")
_zNextWord = re.compile(u" +(\w[\w-]*)")
_zPrevWord = re.compile(u"(\w[\w-]*) +$")
# grammar rules and dictionary
_rules = None
_dOptions = dict(gc_options.dOpt) # duplication necessary, to be able to reset to default
_aIgnoredRules = set()
_oDict = None
_dAnalyses = {} # cache for data from dictionary
_GLOBALS = globals()
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None):
"analyses the paragraph sText and returns list of errors"
aErrors = None
sAlt = sText
dDA = {}
dOpt = _dOptions if not dOptions else dOptions
# parse paragraph
try:
sNew, aErrors = _proofread(sText, sAlt, 0, True, dDA, sCountry, dOpt, bDebug)
if sNew:
sText = sNew
except:
raise
# parse sentences
for iStart, iEnd in _getSentenceBoundaries(sText):
if 4 < (iEnd - iStart) < 2000:
dDA.clear()
try:
_, errs = _proofread(sText[iStart:iEnd], sAlt[iStart:iEnd], iStart, False, dDA, sCountry, dOpt, bDebug)
aErrors.extend(errs)
except:
raise
return aErrors
def _getSentenceBoundaries (sText):
iStart = _zBeginOfParagraph.match(sText).end()
for m in _zEndOfSentence.finditer(sText):
yield (iStart, m.end())
iStart = m.end()
def _proofread (s, sx, nOffset, bParagraph, dDA, sCountry, dOptions, bDebug):
aErrs = []
bChange = False
if not bParagraph:
# after the first pass, we modify automatically some characters
if u" " in s:
s = s.replace(u" ", u' ') # nbsp
bChange = True
if u" " in s:
s = s.replace(u" ", u' ') # nnbsp
bChange = True
if u"@" in s:
s = s.replace(u"@", u' ')
bChange = True
if u"'" in s:
s = s.replace(u"'", u"’")
bChange = True
if u"‑" in s:
s = s.replace(u"‑", u"-") # nobreakdash
bChange = True
bIdRule = option('idrule')
for sOption, lRuleGroup in _getRules(bParagraph):
if not sOption or dOptions.get(sOption, False):
for zRegex, bUppercase, sRuleId, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(s):
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
if not sFuncCond or _GLOBALS[sFuncCond](s, sx, m, dDA, sCountry):
if cActionType == "-":
# grammar error
# (text, replacement, nOffset, m, iGroup, sId, bUppercase, sURL, bIdRule)
aErrs.append(_createError(s, sWhat, nOffset, m, eAct[0], sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption))
elif cActionType == "~":
# text processor
s = _rewrite(s, sWhat, eAct[0], m, bUppercase)
bChange = True
if bDebug:
echo(u"~ " + s + " -- " + m.group(eAct[0]) + " # " + sRuleId)
elif cActionType == "=":
# disambiguation
_GLOBALS[sWhat](s, m, dDA)
if bDebug:
echo(u"= " + m.group(0) + " # " + sRuleId + "\nDA: " + str(dDA))
else:
echo("# error: unknown action at " + sRuleId)
except Exception as e:
raise Exception(str(e), sRuleId)
if bChange:
return (s, aErrs)
return (False, aErrs)
def _createWriterError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error for Writer (LO/OO)"
xErr = SingleProofreadingError()
#xErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
xErr.nErrorStart = nOffset + m.start(iGroup)
xErr.nErrorLength = m.end(iGroup) - m.start(iGroup)
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sId
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, sugg.split("|")))
else:
xErr.aSuggestions = tuple(sugg.split("|"))
else:
xErr.aSuggestions = ()
elif sRepl == "_":
xErr.aSuggestions = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, m.expand(sRepl).split("|")))
else:
xErr.aSuggestions = tuple(m.expand(sRepl).split("|"))
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
if bIdRule:
xErr.aShortComment += " # " + sId
# URL
if sURL:
p = PropertyValue()
p.Name = "FullCommentURL"
p.Value = sURL
xErr.aProperties = (p,)
else:
xErr.aProperties = ()
return xErr
def _createDictError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error as a dictionary"
dErr = {}
dErr["nStart"] = nOffset + m.start(iGroup)
dErr["nEnd"] = nOffset + m.end(iGroup)
dErr["sRuleId"] = sId
dErr["sType"] = sOption if sOption else "notype"
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, sugg.split("|")))
else:
dErr["aSuggestions"] = sugg.split("|")
else:
dErr["aSuggestions"] = ()
elif sRepl == "_":
dErr["aSuggestions"] = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, m.expand(sRepl).split("|")))
else:
dErr["aSuggestions"] = m.expand(sRepl).split("|")
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
dErr["sMessage"] = sMessage
if bIdRule:
dErr["sMessage"] += " # " + sId
# URL
dErr["URL"] = sURL if sURL else ""
return dErr
def _rewrite (s, sRepl, iGroup, m, bUppercase):
"text processor: write sRepl in s at iGroup position"
ln = m.end(iGroup) - m.start(iGroup)
if sRepl == "*":
sNew = " " * ln
elif sRepl == ">" or sRepl == "_" or sRepl == u"~":
sNew = sRepl + " " * (ln-1)
elif sRepl == "@":
sNew = "@" * ln
elif sRepl[0:1] == "=":
if sRepl[1:2] != "@":
sNew = _GLOBALS[sRepl[1:]](s, m)
sNew = sNew + " " * (ln-len(sNew))
else:
sNew = _GLOBALS[sRepl[2:]](s, m)
sNew = sNew + "@" * (ln-len(sNew))
if bUppercase and m.group(iGroup)[0:1].isupper():
sNew = sNew.capitalize()
else:
sNew = m.expand(sRepl)
sNew = sNew + " " * (ln-len(sNew))
return s[0:m.start(iGroup)] + sNew + s[m.end(iGroup):]
def ignoreRule (sId):
_aIgnoredRules.add(sId)
def resetIgnoreRules ():
_aIgnoredRules.clear()
#### init
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_createError = _createWriterError
except ImportError:
_createError = _createDictError
def load ():
global _oDict
try:
_oDict = IBDAWG("${binary_dic}")
except:
traceback.print_exc()
def setOptions (dOpt):
_dOptions.update(dOpt)
def getOptions ():
return _dOptions
def getOptionsLabels (sLang):
return gc_options.getUI(sLang)
def resetOptions ():
global _dOptions
_dOptions = dict(gc_options.dOpt)
def getDictionary ():
return _oDict
def _getRules (bParagraph):
try:
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
except:
_loadRules()
if not bParagraph:
return _rules.lSent | lParagraphRules
def _loadRules2 ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rule in chain(_rules.lParagraphRules, _rules.lSentenceRules):
try:
rule[1] = re.compile(rule[1])
except:
echo("Bad regular expression in # " + str(rule[3]))
rule[1] = "(?i)<Grammalecte>"
def _loadRules ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rulegroup in chain(_rules.lParagraphRules, _rules.lSentenceRules):
for rule in rulegroup[1]:
try:
rule[0] = re.compile(rule[0])
except:
echo("Bad regular expression in # " + str(rule[2]))
rule[0] = "(?i)<Grammalecte>"
def _getPath ():
return os.path.join(os.path.dirname(sys.modules[__name__].__file__), __name__ + ".py")
#### common functions
def option (sOpt):
"return True if option sOpt is active"
return _dOptions.get(sOpt, False)
def displayInfo (dDA, tWord):
"for debugging: retrieve info of word"
if not tWord:
echo("> nothing to find")
return True
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
echo("> not in FSA")
return True
if tWord[0] in dDA:
echo("DA: " + str(dDA[tWord[0]]))
echo("FSA: " + str(_dAnalyses[tWord[1]]))
return True
def _storeMorphFromFSA (sWord):
"retrieves morphologies list from _oDict -> _dAnalyses"
global _dAnalyses
_dAnalyses[sWord] = _oDict.getMorph(sWord)
return True if _dAnalyses[sWord] else False
def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
"analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
if not lMorph:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in lMorph)
return any(p.search(s) for s in lMorph)
def morphex (dDA, tWord, sPattern, sNegPattern, bNoWord=False):
"analyse a tuple (position, word), returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in lMorph):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in lMorph)
def analyse (sWord, sPattern, bStrict=True):
"analyse a word, return True if sPattern in morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
if not _dAnalyses[sWord]:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in _dAnalyses[sWord])
return any(p.search(s) for s in _dAnalyses[sWord])
def analysex (sWord, sPattern, sNegPattern):
"analyse a word, returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in _dAnalyses[sWord]):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in _dAnalyses[sWord])
def stem (sWord):
"returns a list of sWord's stems"
if not sWord:
return []
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return []
return [ s[1:s.find(" ")] for s in _dAnalyses[sWord] ]
## functions to get text outside pattern scope
# warning: check compile_rules.py to understand how it works
def nextword (s, iStart, n):
"get the nth word of the input string or empty string"
m = re.match(u"( +[\\w%-]+){" + str(n-1) + u"} +([\\w%-]+)", s[iStart:])
if not m:
return None
return (iStart+m.start(2), m.group(2))
def prevword (s, iEnd, n):
"get the (-)nth word of the input string or empty string"
m = re.search(u"([\\w%-]+) +([\\w%-]+ +){" + str(n-1) + u"}$", s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def nextword1 (s, iStart):
"get next word (optimization)"
m = _zNextWord.match(s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword1 (s, iEnd):
"get previous word (optimization)"
m = _zPrevWord.search(s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def look (s, sPattern, sNegPattern=None):
"seek sPattern in s (before/after/fulltext), if sNegPattern not in s"
if sNegPattern and re.search(sNegPattern, s):
return False
if re.search(sPattern, s):
return True
return False
def look_chk1 (dDA, s, nOffset, sPattern, sPatternGroup1, sNegPatternGroup1=None):
"returns True if s has pattern sPattern and m.group(1) has pattern sPatternGroup1"
m = re.search(sPattern, s)
if not m:
return False
try:
sWord = m.group(1)
nPos = m.start(1) + nOffset
except:
#print("Missing group 1")
return False
if sNegPatternGroup1:
return morphex(dDA, (nPos, sWord), sPatternGroup1, sNegPatternGroup1)
return morph(dDA, (nPos, sWord), sPatternGroup1, False)
#### Disambiguator
def select (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def exclude (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def define (dDA, nPos, lMorph):
dDA[nPos] = lMorph
#echo("= "+str(nPos)+" "+str(dDA[nPos]))
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
${generated}
| enceRules
return _rules. | conditional_block |
gc_engine.py | # -*- encoding: UTF-8 -*-
import re
import sys
import os
import traceback
from ..ibdawg import IBDAWG
from ..echo import echo
from . import gc_options
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getDictionary", \
"setOptions", "getOptions", "getOptionsLabels", "resetOptions", \
"ignoreRule", "resetIgnoreRules" ]
__version__ = u"${version}"
lang = u"${lang}"
locales = ${loc}
pkg = u"${implname}"
name = u"${name}"
version = u"${version}"
author = u"${author}"
# commons regexes
_zEndOfSentence = re.compile(u'([.?!:;…][ .?!… »”")]*|.$)')
_zBeginOfParagraph = re.compile(u"^\W*")
_zEndOfParagraph = re.compile(u"\W*$")
_zNextWord = re.compile(u" +(\w[\w-]*)")
_zPrevWord = re.compile(u"(\w[\w-]*) +$")
# grammar rules and dictionary
_rules = None
_dOptions = dict(gc_options.dOpt) # duplication necessary, to be able to reset to default
_aIgnoredRules = set()
_oDict = None
_dAnalyses = {} # cache for data from dictionary
_GLOBALS = globals()
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None):
"analyses the paragraph sText and returns list of errors"
aErrors = None
sAlt = sText
dDA = {}
dOpt = _dOptions if not dOptions else dOptions
# parse paragraph
try:
sNew, aErrors = _proofread(sText, sAlt, 0, True, dDA, sCountry, dOpt, bDebug)
if sNew:
sText = sNew
except:
raise
# parse sentences
for iStart, iEnd in _getSentenceBoundaries(sText):
if 4 < (iEnd - iStart) < 2000:
dDA.clear()
try:
_, errs = _proofread(sText[iStart:iEnd], sAlt[iStart:iEnd], iStart, False, dDA, sCountry, dOpt, bDebug)
aErrors.extend(errs)
except:
raise
return aErrors
def _getSentenceBoundaries (sText):
iStart = _zBeginOfParagraph.match(sText).end()
for m in _zEndOfSentence.finditer(sText):
yield (iStart, m.end())
iStart = m.end()
def _proofread (s, sx, nOffset, bParagraph, dDA, sCountry, dOptions, bDebug):
aErrs = []
bChange = False
if not bParagraph:
# after the first pass, we modify automatically some characters
if u" " in s:
s = s.replace(u" ", u' ') # nbsp
bChange = True
if u" " in s:
s = s.replace(u" ", u' ') # nnbsp
bChange = True
if u"@" in s:
s = s.replace(u"@", u' ')
bChange = True
if u"'" in s:
s = s.replace(u"'", u"’")
bChange = True
if u"‑" in s:
s = s.replace(u"‑", u"-") # nobreakdash
bChange = True
bIdRule = option('idrule')
for sOption, lRuleGroup in _getRules(bParagraph):
if not sOption or dOptions.get(sOption, False):
for zRegex, bUppercase, sRuleId, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(s):
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
if not sFuncCond or _GLOBALS[sFuncCond](s, sx, m, dDA, sCountry):
if cActionType == "-":
# grammar error
# (text, replacement, nOffset, m, iGroup, sId, bUppercase, sURL, bIdRule)
aErrs.append(_createError(s, sWhat, nOffset, m, eAct[0], sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption))
elif cActionType == "~":
# text processor
s = _rewrite(s, sWhat, eAct[0], m, bUppercase)
bChange = True
if bDebug:
echo(u"~ " + s + " -- " + m.group(eAct[0]) + " # " + sRuleId)
elif cActionType == "=":
# disambiguation | _GLOBALS[sWhat](s, m, dDA)
if bDebug:
echo(u"= " + m.group(0) + " # " + sRuleId + "\nDA: " + str(dDA))
else:
echo("# error: unknown action at " + sRuleId)
except Exception as e:
raise Exception(str(e), sRuleId)
if bChange:
return (s, aErrs)
return (False, aErrs)
def _createWriterError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error for Writer (LO/OO)"
xErr = SingleProofreadingError()
#xErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
xErr.nErrorStart = nOffset + m.start(iGroup)
xErr.nErrorLength = m.end(iGroup) - m.start(iGroup)
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sId
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, sugg.split("|")))
else:
xErr.aSuggestions = tuple(sugg.split("|"))
else:
xErr.aSuggestions = ()
elif sRepl == "_":
xErr.aSuggestions = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, m.expand(sRepl).split("|")))
else:
xErr.aSuggestions = tuple(m.expand(sRepl).split("|"))
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
if bIdRule:
xErr.aShortComment += " # " + sId
# URL
if sURL:
p = PropertyValue()
p.Name = "FullCommentURL"
p.Value = sURL
xErr.aProperties = (p,)
else:
xErr.aProperties = ()
return xErr
def _createDictError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error as a dictionary"
dErr = {}
dErr["nStart"] = nOffset + m.start(iGroup)
dErr["nEnd"] = nOffset + m.end(iGroup)
dErr["sRuleId"] = sId
dErr["sType"] = sOption if sOption else "notype"
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, sugg.split("|")))
else:
dErr["aSuggestions"] = sugg.split("|")
else:
dErr["aSuggestions"] = ()
elif sRepl == "_":
dErr["aSuggestions"] = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, m.expand(sRepl).split("|")))
else:
dErr["aSuggestions"] = m.expand(sRepl).split("|")
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
dErr["sMessage"] = sMessage
if bIdRule:
dErr["sMessage"] += " # " + sId
# URL
dErr["URL"] = sURL if sURL else ""
return dErr
def _rewrite (s, sRepl, iGroup, m, bUppercase):
"text processor: write sRepl in s at iGroup position"
ln = m.end(iGroup) - m.start(iGroup)
if sRepl == "*":
sNew = " " * ln
elif sRepl == ">" or sRepl == "_" or sRepl == u"~":
sNew = sRepl + " " * (ln-1)
elif sRepl == "@":
sNew = "@" * ln
elif sRepl[0:1] == "=":
if sRepl[1:2] != "@":
sNew = _GLOBALS[sRepl[1:]](s, m)
sNew = sNew + " " * (ln-len(sNew))
else:
sNew = _GLOBALS[sRepl[2:]](s, m)
sNew = sNew + "@" * (ln-len(sNew))
if bUppercase and m.group(iGroup)[0:1].isupper():
sNew = sNew.capitalize()
else:
sNew = m.expand(sRepl)
sNew = sNew + " " * (ln-len(sNew))
return s[0:m.start(iGroup)] + sNew + s[m.end(iGroup):]
def ignoreRule (sId):
_aIgnoredRules.add(sId)
def resetIgnoreRules ():
_aIgnoredRules.clear()
#### init
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_createError = _createWriterError
except ImportError:
_createError = _createDictError
def load ():
global _oDict
try:
_oDict = IBDAWG("${binary_dic}")
except:
traceback.print_exc()
def setOptions (dOpt):
_dOptions.update(dOpt)
def getOptions ():
return _dOptions
def getOptionsLabels (sLang):
return gc_options.getUI(sLang)
def resetOptions ():
global _dOptions
_dOptions = dict(gc_options.dOpt)
def getDictionary ():
return _oDict
def _getRules (bParagraph):
try:
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
except:
_loadRules()
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
def _loadRules2 ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rule in chain(_rules.lParagraphRules, _rules.lSentenceRules):
try:
rule[1] = re.compile(rule[1])
except:
echo("Bad regular expression in # " + str(rule[3]))
rule[1] = "(?i)<Grammalecte>"
def _loadRules ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rulegroup in chain(_rules.lParagraphRules, _rules.lSentenceRules):
for rule in rulegroup[1]:
try:
rule[0] = re.compile(rule[0])
except:
echo("Bad regular expression in # " + str(rule[2]))
rule[0] = "(?i)<Grammalecte>"
def _getPath ():
return os.path.join(os.path.dirname(sys.modules[__name__].__file__), __name__ + ".py")
#### common functions
def option (sOpt):
"return True if option sOpt is active"
return _dOptions.get(sOpt, False)
def displayInfo (dDA, tWord):
"for debugging: retrieve info of word"
if not tWord:
echo("> nothing to find")
return True
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
echo("> not in FSA")
return True
if tWord[0] in dDA:
echo("DA: " + str(dDA[tWord[0]]))
echo("FSA: " + str(_dAnalyses[tWord[1]]))
return True
def _storeMorphFromFSA (sWord):
"retrieves morphologies list from _oDict -> _dAnalyses"
global _dAnalyses
_dAnalyses[sWord] = _oDict.getMorph(sWord)
return True if _dAnalyses[sWord] else False
def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
"analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
if not lMorph:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in lMorph)
return any(p.search(s) for s in lMorph)
def morphex (dDA, tWord, sPattern, sNegPattern, bNoWord=False):
"analyse a tuple (position, word), returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in lMorph):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in lMorph)
def analyse (sWord, sPattern, bStrict=True):
"analyse a word, return True if sPattern in morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
if not _dAnalyses[sWord]:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in _dAnalyses[sWord])
return any(p.search(s) for s in _dAnalyses[sWord])
def analysex (sWord, sPattern, sNegPattern):
"analyse a word, returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in _dAnalyses[sWord]):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in _dAnalyses[sWord])
def stem (sWord):
"returns a list of sWord's stems"
if not sWord:
return []
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return []
return [ s[1:s.find(" ")] for s in _dAnalyses[sWord] ]
## functions to get text outside pattern scope
# warning: check compile_rules.py to understand how it works
def nextword (s, iStart, n):
"get the nth word of the input string or empty string"
m = re.match(u"( +[\\w%-]+){" + str(n-1) + u"} +([\\w%-]+)", s[iStart:])
if not m:
return None
return (iStart+m.start(2), m.group(2))
def prevword (s, iEnd, n):
"get the (-)nth word of the input string or empty string"
m = re.search(u"([\\w%-]+) +([\\w%-]+ +){" + str(n-1) + u"}$", s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def nextword1 (s, iStart):
"get next word (optimization)"
m = _zNextWord.match(s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword1 (s, iEnd):
"get previous word (optimization)"
m = _zPrevWord.search(s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def look (s, sPattern, sNegPattern=None):
"seek sPattern in s (before/after/fulltext), if sNegPattern not in s"
if sNegPattern and re.search(sNegPattern, s):
return False
if re.search(sPattern, s):
return True
return False
def look_chk1 (dDA, s, nOffset, sPattern, sPatternGroup1, sNegPatternGroup1=None):
"returns True if s has pattern sPattern and m.group(1) has pattern sPatternGroup1"
m = re.search(sPattern, s)
if not m:
return False
try:
sWord = m.group(1)
nPos = m.start(1) + nOffset
except:
#print("Missing group 1")
return False
if sNegPatternGroup1:
return morphex(dDA, (nPos, sWord), sPatternGroup1, sNegPatternGroup1)
return morph(dDA, (nPos, sWord), sPatternGroup1, False)
#### Disambiguator
def select (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def exclude (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def define (dDA, nPos, lMorph):
dDA[nPos] = lMorph
#echo("= "+str(nPos)+" "+str(dDA[nPos]))
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
${generated} | random_line_split | |
gc_engine.py | # -*- encoding: UTF-8 -*-
import re
import sys
import os
import traceback
from ..ibdawg import IBDAWG
from ..echo import echo
from . import gc_options
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getDictionary", \
"setOptions", "getOptions", "getOptionsLabels", "resetOptions", \
"ignoreRule", "resetIgnoreRules" ]
__version__ = u"${version}"
lang = u"${lang}"
locales = ${loc}
pkg = u"${implname}"
name = u"${name}"
version = u"${version}"
author = u"${author}"
# commons regexes
_zEndOfSentence = re.compile(u'([.?!:;…][ .?!… »”")]*|.$)')
_zBeginOfParagraph = re.compile(u"^\W*")
_zEndOfParagraph = re.compile(u"\W*$")
_zNextWord = re.compile(u" +(\w[\w-]*)")
_zPrevWord = re.compile(u"(\w[\w-]*) +$")
# grammar rules and dictionary
_rules = None
_dOptions = dict(gc_options.dOpt) # duplication necessary, to be able to reset to default
_aIgnoredRules = set()
_oDict = None
_dAnalyses = {} # cache for data from dictionary
_GLOBALS = globals()
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None):
"analyses the paragraph sText and returns list of errors"
aErrors = None
sAlt = sText
dDA = {}
dOpt = _dOptions if not dOptions else dOptions
# parse paragraph
try:
sNew, aErrors = _proofread(sText, sAlt, 0, True, dDA, sCountry, dOpt, bDebug)
if sNew:
sText = sNew
except:
raise
# parse sentences
for iStart, iEnd in _getSentenceBoundaries(sText):
if 4 < (iEnd - iStart) < 2000:
dDA.clear()
try:
_, errs = _proofread(sText[iStart:iEnd], sAlt[iStart:iEnd], iStart, False, dDA, sCountry, dOpt, bDebug)
aErrors.extend(errs)
except:
raise
return aErrors
def _getSentenceBoundaries (sText):
iStart = _zBeginOfParagraph.match(sText).end()
for m in _zEndOfSentence.finditer(sText):
yield (iStart, m.end())
iStart = m.end()
def _proofread (s, sx, nOffset, bParagraph, dDA, sCountry, dOptions, bDebug):
aErrs = []
bChange = False
if not bParagraph:
# after the first pass, we modify automatically some characters
if u" " in s:
s = s.replace(u" ", u' ') # nbsp
bChange = True
if u" " in s:
s = s.replace(u" ", u' ') # nnbsp
bChange = True
if u"@" in s:
s = s.replace(u"@", u' ')
bChange = True
if u"'" in s:
s = s.replace(u"'", u"’")
bChange = True
if u"‑" in s:
s = s.replace(u"‑", u"-") # nobreakdash
bChange = True
bIdRule = option('idrule')
for sOption, lRuleGroup in _getRules(bParagraph):
if not sOption or dOptions.get(sOption, False):
for zRegex, bUppercase, sRuleId, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(s):
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
if not sFuncCond or _GLOBALS[sFuncCond](s, sx, m, dDA, sCountry):
if cActionType == "-":
# grammar error
# (text, replacement, nOffset, m, iGroup, sId, bUppercase, sURL, bIdRule)
aErrs.append(_createError(s, sWhat, nOffset, m, eAct[0], sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption))
elif cActionType == "~":
# text processor
s = _rewrite(s, sWhat, eAct[0], m, bUppercase)
bChange = True
if bDebug:
echo(u"~ " + s + " -- " + m.group(eAct[0]) + " # " + sRuleId)
elif cActionType == "=":
# disambiguation
_GLOBALS[sWhat](s, m, dDA)
if bDebug:
echo(u"= " + m.group(0) + " # " + sRuleId + "\nDA: " + str(dDA))
else:
echo("# error: unknown action at " + sRuleId)
except Exception as e:
raise Exception(str(e), sRuleId)
if bChange:
return (s, aErrs)
return (False, aErrs)
def _createWriterError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error for Writer (LO/OO)"
xErr = SingleProofreadingError()
#xErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
xErr.nErrorStart = nOffset + m.start(iGroup)
xErr.nErrorLength = m.end(iGroup) - m.start(iGroup)
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sId
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, sugg.split("|")))
else:
xErr.aSuggestions = tuple(sugg.split("|"))
else:
xErr.aSuggestions = ()
elif sRepl == "_":
xErr.aSuggestions = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, m.expand(sRepl).split("|")))
else:
xErr.aSuggestions = tuple(m.expand(sRepl).split("|"))
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
if bIdRule:
xErr.aShortComment += " # " + sId
# URL
if sURL:
p = PropertyValue()
p.Name = "FullCommentURL"
p.Value = sURL
xErr.aProperties = (p,)
else:
xErr.aProperties = ()
return xErr
def _createDictError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error as a dictionary"
dErr = {}
dErr["nStart"] = nOffset + m.start(iGroup)
dErr["nEnd"] = nOffset + m.end(iGroup)
dErr["sRuleId"] = sId
dErr["sType"] = sOption if sOption else "notype"
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, sugg.split("|")))
else:
dErr["aSuggestions"] = sugg.split("|")
else:
dErr["aSuggestions"] = ()
elif sRepl == "_":
dErr["aSuggestions"] = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, m.expand(sRepl).split("|")))
else:
dErr["aSuggestions"] = m.expand(sRepl).split("|")
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
dErr["sMessage"] = sMessage
if bIdRule:
dErr["sMessage"] += " # " + sId
# URL
dErr["URL"] = sURL if sURL else ""
return dErr
def _rewrite (s, sRepl, iGroup, m, bUppercase):
"text processor: write sRepl in s at iGroup position"
ln = m.end(iGroup) - m.start(iGroup)
if sRepl == "*":
sNew = " " * ln
elif sRepl == ">" or sRepl == "_" or sRepl == u"~":
sNew = sRepl + " " * (ln-1)
elif sRepl == "@":
sNew = "@" * ln
elif sRepl[0:1] == "=":
if sRepl[1:2] != "@":
sNew = _GLOBALS[sRepl[1:]](s, m)
sNew = sNew + " " * (ln-len(sNew))
else:
sNew = _GLOBALS[sRepl[2:]](s, m)
sNew = sNew + "@" * (ln-len(sNew))
if bUppercase and m.group(iGroup)[0:1].isupper():
sNew = sNew.capitalize()
else:
sNew = m.expand(sRepl)
sNew = sNew + " " * (ln-len(sNew))
return s[0:m.start(iGroup)] + sNew + s[m.end(iGroup):]
def ignoreRule (sId):
_aIgnoredRules.add(sId)
def resetIgnoreRules ():
_aIgnoredRules.clear()
#### init
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_createError = _createWriterError
except ImportError:
_createError = _createDictError
def load ():
global _oDict
try:
_oDict = IBDAWG("${binary_dic}")
except:
traceback.print_exc()
def setOptions (dOpt):
_dOptions.update(dOpt)
def getOptions ():
return _dOptions
def getOptionsLabels (sLang):
return gc_options.getUI(sLang)
def resetOptions ():
global _dOptions
_dOptions = dict(gc_options.dOpt)
def getDictionary ():
return _oDict
def _getRules (bParagraph):
try:
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
except:
_loadRules()
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
def _loadRules2 ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rule in chain(_rules.lParagraphRules, _rules.lSentenceRules):
try:
rule[1] = re.compile(rule[1])
except:
echo("Bad regular expression in # " + str(rule[3]))
rule[1] = "(?i)<Grammalecte>"
def _loadRules ():
from itertools import chain
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for rulegroup in chain(_rules.lParagraphRules, _rules.lSentenceRules):
for rule in rulegroup[1]:
try:
rule[0] = re.compile(rule[0])
except:
echo("Bad regular expression in # " + str(rule[2]))
rule[0] = "(?i)<Grammalecte>"
def _getPath ():
return os.path.join(os.path.dirname(sys.modules[__name__].__file__), __name__ + ".py")
#### common functions
def option (sOpt):
| n True if option sOpt is active"
return _dOptions.get(sOpt, False)
def displayInfo (dDA, tWord):
"for debugging: retrieve info of word"
if not tWord:
echo("> nothing to find")
return True
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
echo("> not in FSA")
return True
if tWord[0] in dDA:
echo("DA: " + str(dDA[tWord[0]]))
echo("FSA: " + str(_dAnalyses[tWord[1]]))
return True
def _storeMorphFromFSA (sWord):
"retrieves morphologies list from _oDict -> _dAnalyses"
global _dAnalyses
_dAnalyses[sWord] = _oDict.getMorph(sWord)
return True if _dAnalyses[sWord] else False
def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
"analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
if not lMorph:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in lMorph)
return any(p.search(s) for s in lMorph)
def morphex (dDA, tWord, sPattern, sNegPattern, bNoWord=False):
"analyse a tuple (position, word), returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)"
if not tWord:
return bNoWord
if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
return False
lMorph = dDA[tWord[0]] if tWord[0] in dDA else _dAnalyses[tWord[1]]
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in lMorph):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in lMorph)
def analyse (sWord, sPattern, bStrict=True):
"analyse a word, return True if sPattern in morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
if not _dAnalyses[sWord]:
return False
p = re.compile(sPattern)
if bStrict:
return all(p.search(s) for s in _dAnalyses[sWord])
return any(p.search(s) for s in _dAnalyses[sWord])
def analysex (sWord, sPattern, sNegPattern):
"analyse a word, returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation off)"
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return False
# check negative condition
np = re.compile(sNegPattern)
if any(np.search(s) for s in _dAnalyses[sWord]):
return False
# search sPattern
p = re.compile(sPattern)
return any(p.search(s) for s in _dAnalyses[sWord])
def stem (sWord):
"returns a list of sWord's stems"
if not sWord:
return []
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return []
return [ s[1:s.find(" ")] for s in _dAnalyses[sWord] ]
## functions to get text outside pattern scope
# warning: check compile_rules.py to understand how it works
def nextword (s, iStart, n):
"get the nth word of the input string or empty string"
m = re.match(u"( +[\\w%-]+){" + str(n-1) + u"} +([\\w%-]+)", s[iStart:])
if not m:
return None
return (iStart+m.start(2), m.group(2))
def prevword (s, iEnd, n):
"get the (-)nth word of the input string or empty string"
m = re.search(u"([\\w%-]+) +([\\w%-]+ +){" + str(n-1) + u"}$", s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def nextword1 (s, iStart):
"get next word (optimization)"
m = _zNextWord.match(s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword1 (s, iEnd):
"get previous word (optimization)"
m = _zPrevWord.search(s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def look (s, sPattern, sNegPattern=None):
"seek sPattern in s (before/after/fulltext), if sNegPattern not in s"
if sNegPattern and re.search(sNegPattern, s):
return False
if re.search(sPattern, s):
return True
return False
def look_chk1 (dDA, s, nOffset, sPattern, sPatternGroup1, sNegPatternGroup1=None):
"returns True if s has pattern sPattern and m.group(1) has pattern sPatternGroup1"
m = re.search(sPattern, s)
if not m:
return False
try:
sWord = m.group(1)
nPos = m.start(1) + nOffset
except:
#print("Missing group 1")
return False
if sNegPatternGroup1:
return morphex(dDA, (nPos, sWord), sPatternGroup1, sNegPatternGroup1)
return morph(dDA, (nPos, sWord), sPatternGroup1, False)
#### Disambiguator
def select (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def exclude (dDA, nPos, sWord, sPattern, lDefault=None):
if not sWord:
return True
if nPos in dDA:
return True
if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
return True
if len(_dAnalyses[sWord]) == 1:
return True
lSelect = [ sMorph for sMorph in _dAnalyses[sWord] if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(_dAnalyses[sWord]):
dDA[nPos] = lSelect
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
elif lDefault:
dDA[nPos] = lDefault
#echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
return True
def define (dDA, nPos, lMorph):
dDA[nPos] = lMorph
#echo("= "+str(nPos)+" "+str(dDA[nPos]))
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
${generated}
| "retur | identifier_name |
_stream_transform.js | // Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a transform stream is a readable/writable stream where you do
// something with the data. Sometimes it's called a "filter",
// but that's not a great name for it, since that implies a thing where
// some bits pass through, and others are simply ignored. (That would
// be a valid example of a transform, of course.)
//
// While the output is causally related to the input, it's not a
// necessarily symmetric or synchronous transformation. For example,
// a zlib stream might take multiple plain-text writes(), and then
// emit a single compressed chunk some time in the future.
//
// Here's how this works:
//
// The Transform stream has all the aspects of the readable and writable
// stream services. When you write(chunk), that calls _write(chunk,cb)
// internally, and returns false if there's a lot of pending writes
// buffered up. When you call read(), that calls _read(n) until
// there's enough pending readable data buffered up.
//
// In a transform stream, the written data is placed in a buffer. When
// _read(n) is called, it transforms the queued up data, calling the
// buffered _write cb's as it consumes chunks. If consuming a single
// written chunk would result in multiple output chunks, then the first
// outputted bit calls the readcb, and subsequent chunks just go into
// the read buffer, and will cause it to emit 'readable' if necessary.
//
// This way, back-pressure is actually determined by the reading side,
// since _read has to be called to start processing a new chunk. However,
// a pathological inflate type of transform can cause excessive buffering
// here. For example, imagine a stream where every byte of input is
// interpreted as an integer from 0-255, and then results in that many
// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in
// 1kb of data being output. In this case, you could write a very small
// amount of input, and end up with a very large amount of output. In
// such a pathological inflating mechanism, there'd be no way to tell
// the system to stop doing the transform. A single 4MB write could
// cause the system to run out of memory.
//
// However, even in such a pathological case, only a single written chunk
// would be consumed, and then the rest would wait (un-transformed) until
// the results of the previous transformed chunk were consumed.
module.exports = Transform;
var Duplex = require('./_stream_duplex');
/*<replacement>*/
var util = require('core-util-is');
util.inherits = require('inherits');
/*</replacement>*/
util.inherits(Transform, Duplex);
function TransformState(options, stream) {
this.afterTransform = function(er, data) {
return afterTransform(stream, er, data);
};
this.needTransform = false;
this.transforming = false;
this.writecb = null;
this.writechunk = null;
}
function afterTransform(stream, er, data) {
var ts = stream._transformState;
ts.transforming = false;
var cb = ts.writecb;
if (!cb)
return stream.emit('error', new Error('no writecb in Transform class'));
ts.writechunk = null;
ts.writecb = null;
if (!util.isNullOrUndefined(data))
stream.push(data);
if (cb)
cb(er);
var rs = stream._readableState;
rs.reading = false;
if (rs.needReadable || rs.length < rs.highWaterMark) {
stream._read(rs.highWaterMark);
}
}
function Transform(options) {
if (!(this instanceof Transform))
return new Transform(options);
Duplex.call(this, options); | this._transformState = new TransformState(options, this);
// when the writable side finishes, then flush out anything remaining.
var stream = this;
// start out asking for a readable event once data is transformed.
this._readableState.needReadable = true;
// we have implemented the _read method, and done the other things
// that Readable wants before the first _read call, so unset the
// sync guard flag.
this._readableState.sync = false;
this.once('prefinish', function() {
if (util.isFunction(this._flush))
this._flush(function(er) {
done(stream, er);
});
else
done(stream);
});
}
Transform.prototype.push = function(chunk, encoding) {
this._transformState.needTransform = false;
return Duplex.prototype.push.call(this, chunk, encoding);
};
// This is the part where you do stuff!
// override this function in implementation services.
// 'chunk' is an input chunk.
//
// Call `push(newChunk)` to pass along transformed output
// to the readable side. You may call 'push' zero or more times.
//
// Call `cb(err)` when you are done with this chunk. If you pass
// an error, then that'll put the hurt on the whole operation. If you
// never call cb(), then you'll never get another chunk.
Transform.prototype._transform = function(chunk, encoding, cb) {
throw new Error('not implemented');
};
Transform.prototype._write = function(chunk, encoding, cb) {
var ts = this._transformState;
ts.writecb = cb;
ts.writechunk = chunk;
ts.writeencoding = encoding;
if (!ts.transforming) {
var rs = this._readableState;
if (ts.needTransform ||
rs.needReadable ||
rs.length < rs.highWaterMark)
this._read(rs.highWaterMark);
}
};
// Doesn't matter what the args are here.
// _transform does all the work.
// That we got here means that the readable side wants more data.
Transform.prototype._read = function(n) {
var ts = this._transformState;
if (!util.isNull(ts.writechunk) && ts.writecb && !ts.transforming) {
ts.transforming = true;
this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);
} else {
// mark that we need a transform, so that any data that comes in
// will get processed, now that we've asked for it.
ts.needTransform = true;
}
};
function done(stream, er) {
if (er)
return stream.emit('error', er);
// if there's nothing in the write buffer, then that means
// that nothing more will ever be provided
var ws = stream._writableState;
var ts = stream._transformState;
if (ws.length)
throw new Error('calling transform done when ws.length != 0');
if (ts.transforming)
throw new Error('calling transform done when still transforming');
return stream.push(null);
} | random_line_split | |
_stream_transform.js | // Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a transform stream is a readable/writable stream where you do
// something with the data. Sometimes it's called a "filter",
// but that's not a great name for it, since that implies a thing where
// some bits pass through, and others are simply ignored. (That would
// be a valid example of a transform, of course.)
//
// While the output is causally related to the input, it's not a
// necessarily symmetric or synchronous transformation. For example,
// a zlib stream might take multiple plain-text writes(), and then
// emit a single compressed chunk some time in the future.
//
// Here's how this works:
//
// The Transform stream has all the aspects of the readable and writable
// stream services. When you write(chunk), that calls _write(chunk,cb)
// internally, and returns false if there's a lot of pending writes
// buffered up. When you call read(), that calls _read(n) until
// there's enough pending readable data buffered up.
//
// In a transform stream, the written data is placed in a buffer. When
// _read(n) is called, it transforms the queued up data, calling the
// buffered _write cb's as it consumes chunks. If consuming a single
// written chunk would result in multiple output chunks, then the first
// outputted bit calls the readcb, and subsequent chunks just go into
// the read buffer, and will cause it to emit 'readable' if necessary.
//
// This way, back-pressure is actually determined by the reading side,
// since _read has to be called to start processing a new chunk. However,
// a pathological inflate type of transform can cause excessive buffering
// here. For example, imagine a stream where every byte of input is
// interpreted as an integer from 0-255, and then results in that many
// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in
// 1kb of data being output. In this case, you could write a very small
// amount of input, and end up with a very large amount of output. In
// such a pathological inflating mechanism, there'd be no way to tell
// the system to stop doing the transform. A single 4MB write could
// cause the system to run out of memory.
//
// However, even in such a pathological case, only a single written chunk
// would be consumed, and then the rest would wait (un-transformed) until
// the results of the previous transformed chunk were consumed.
module.exports = Transform;
var Duplex = require('./_stream_duplex');
/*<replacement>*/
var util = require('core-util-is');
util.inherits = require('inherits');
/*</replacement>*/
util.inherits(Transform, Duplex);
function TransformState(options, stream) {
this.afterTransform = function(er, data) {
return afterTransform(stream, er, data);
};
this.needTransform = false;
this.transforming = false;
this.writecb = null;
this.writechunk = null;
}
function | (stream, er, data) {
var ts = stream._transformState;
ts.transforming = false;
var cb = ts.writecb;
if (!cb)
return stream.emit('error', new Error('no writecb in Transform class'));
ts.writechunk = null;
ts.writecb = null;
if (!util.isNullOrUndefined(data))
stream.push(data);
if (cb)
cb(er);
var rs = stream._readableState;
rs.reading = false;
if (rs.needReadable || rs.length < rs.highWaterMark) {
stream._read(rs.highWaterMark);
}
}
function Transform(options) {
if (!(this instanceof Transform))
return new Transform(options);
Duplex.call(this, options);
this._transformState = new TransformState(options, this);
// when the writable side finishes, then flush out anything remaining.
var stream = this;
// start out asking for a readable event once data is transformed.
this._readableState.needReadable = true;
// we have implemented the _read method, and done the other things
// that Readable wants before the first _read call, so unset the
// sync guard flag.
this._readableState.sync = false;
this.once('prefinish', function() {
if (util.isFunction(this._flush))
this._flush(function(er) {
done(stream, er);
});
else
done(stream);
});
}
Transform.prototype.push = function(chunk, encoding) {
this._transformState.needTransform = false;
return Duplex.prototype.push.call(this, chunk, encoding);
};
// This is the part where you do stuff!
// override this function in implementation services.
// 'chunk' is an input chunk.
//
// Call `push(newChunk)` to pass along transformed output
// to the readable side. You may call 'push' zero or more times.
//
// Call `cb(err)` when you are done with this chunk. If you pass
// an error, then that'll put the hurt on the whole operation. If you
// never call cb(), then you'll never get another chunk.
Transform.prototype._transform = function(chunk, encoding, cb) {
throw new Error('not implemented');
};
Transform.prototype._write = function(chunk, encoding, cb) {
var ts = this._transformState;
ts.writecb = cb;
ts.writechunk = chunk;
ts.writeencoding = encoding;
if (!ts.transforming) {
var rs = this._readableState;
if (ts.needTransform ||
rs.needReadable ||
rs.length < rs.highWaterMark)
this._read(rs.highWaterMark);
}
};
// Doesn't matter what the args are here.
// _transform does all the work.
// That we got here means that the readable side wants more data.
Transform.prototype._read = function(n) {
var ts = this._transformState;
if (!util.isNull(ts.writechunk) && ts.writecb && !ts.transforming) {
ts.transforming = true;
this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);
} else {
// mark that we need a transform, so that any data that comes in
// will get processed, now that we've asked for it.
ts.needTransform = true;
}
};
function done(stream, er) {
if (er)
return stream.emit('error', er);
// if there's nothing in the write buffer, then that means
// that nothing more will ever be provided
var ws = stream._writableState;
var ts = stream._transformState;
if (ws.length)
throw new Error('calling transform done when ws.length != 0');
if (ts.transforming)
throw new Error('calling transform done when still transforming');
return stream.push(null);
}
| afterTransform | identifier_name |
_stream_transform.js | // Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a transform stream is a readable/writable stream where you do
// something with the data. Sometimes it's called a "filter",
// but that's not a great name for it, since that implies a thing where
// some bits pass through, and others are simply ignored. (That would
// be a valid example of a transform, of course.)
//
// While the output is causally related to the input, it's not a
// necessarily symmetric or synchronous transformation. For example,
// a zlib stream might take multiple plain-text writes(), and then
// emit a single compressed chunk some time in the future.
//
// Here's how this works:
//
// The Transform stream has all the aspects of the readable and writable
// stream services. When you write(chunk), that calls _write(chunk,cb)
// internally, and returns false if there's a lot of pending writes
// buffered up. When you call read(), that calls _read(n) until
// there's enough pending readable data buffered up.
//
// In a transform stream, the written data is placed in a buffer. When
// _read(n) is called, it transforms the queued up data, calling the
// buffered _write cb's as it consumes chunks. If consuming a single
// written chunk would result in multiple output chunks, then the first
// outputted bit calls the readcb, and subsequent chunks just go into
// the read buffer, and will cause it to emit 'readable' if necessary.
//
// This way, back-pressure is actually determined by the reading side,
// since _read has to be called to start processing a new chunk. However,
// a pathological inflate type of transform can cause excessive buffering
// here. For example, imagine a stream where every byte of input is
// interpreted as an integer from 0-255, and then results in that many
// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in
// 1kb of data being output. In this case, you could write a very small
// amount of input, and end up with a very large amount of output. In
// such a pathological inflating mechanism, there'd be no way to tell
// the system to stop doing the transform. A single 4MB write could
// cause the system to run out of memory.
//
// However, even in such a pathological case, only a single written chunk
// would be consumed, and then the rest would wait (un-transformed) until
// the results of the previous transformed chunk were consumed.
module.exports = Transform;
var Duplex = require('./_stream_duplex');
/*<replacement>*/
var util = require('core-util-is');
util.inherits = require('inherits');
/*</replacement>*/
util.inherits(Transform, Duplex);
function TransformState(options, stream) |
function afterTransform(stream, er, data) {
var ts = stream._transformState;
ts.transforming = false;
var cb = ts.writecb;
if (!cb)
return stream.emit('error', new Error('no writecb in Transform class'));
ts.writechunk = null;
ts.writecb = null;
if (!util.isNullOrUndefined(data))
stream.push(data);
if (cb)
cb(er);
var rs = stream._readableState;
rs.reading = false;
if (rs.needReadable || rs.length < rs.highWaterMark) {
stream._read(rs.highWaterMark);
}
}
function Transform(options) {
if (!(this instanceof Transform))
return new Transform(options);
Duplex.call(this, options);
this._transformState = new TransformState(options, this);
// when the writable side finishes, then flush out anything remaining.
var stream = this;
// start out asking for a readable event once data is transformed.
this._readableState.needReadable = true;
// we have implemented the _read method, and done the other things
// that Readable wants before the first _read call, so unset the
// sync guard flag.
this._readableState.sync = false;
this.once('prefinish', function() {
if (util.isFunction(this._flush))
this._flush(function(er) {
done(stream, er);
});
else
done(stream);
});
}
Transform.prototype.push = function(chunk, encoding) {
this._transformState.needTransform = false;
return Duplex.prototype.push.call(this, chunk, encoding);
};
// This is the part where you do stuff!
// override this function in implementation services.
// 'chunk' is an input chunk.
//
// Call `push(newChunk)` to pass along transformed output
// to the readable side. You may call 'push' zero or more times.
//
// Call `cb(err)` when you are done with this chunk. If you pass
// an error, then that'll put the hurt on the whole operation. If you
// never call cb(), then you'll never get another chunk.
Transform.prototype._transform = function(chunk, encoding, cb) {
throw new Error('not implemented');
};
Transform.prototype._write = function(chunk, encoding, cb) {
var ts = this._transformState;
ts.writecb = cb;
ts.writechunk = chunk;
ts.writeencoding = encoding;
if (!ts.transforming) {
var rs = this._readableState;
if (ts.needTransform ||
rs.needReadable ||
rs.length < rs.highWaterMark)
this._read(rs.highWaterMark);
}
};
// Doesn't matter what the args are here.
// _transform does all the work.
// That we got here means that the readable side wants more data.
Transform.prototype._read = function(n) {
var ts = this._transformState;
if (!util.isNull(ts.writechunk) && ts.writecb && !ts.transforming) {
ts.transforming = true;
this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);
} else {
// mark that we need a transform, so that any data that comes in
// will get processed, now that we've asked for it.
ts.needTransform = true;
}
};
function done(stream, er) {
if (er)
return stream.emit('error', er);
// if there's nothing in the write buffer, then that means
// that nothing more will ever be provided
var ws = stream._writableState;
var ts = stream._transformState;
if (ws.length)
throw new Error('calling transform done when ws.length != 0');
if (ts.transforming)
throw new Error('calling transform done when still transforming');
return stream.push(null);
}
| {
this.afterTransform = function(er, data) {
return afterTransform(stream, er, data);
};
this.needTransform = false;
this.transforming = false;
this.writecb = null;
this.writechunk = null;
} | identifier_body |
hours.ts | import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams } from 'ionic-angular';
@IonicPage()
@Component({
selector: 'page-hours',
templateUrl: 'hours.html',
})
export class HoursPage {
started: boolean = false;
stopped: boolean = true;
constructor(public navCtrl: NavController, public navParams: NavParams) {
}
ionViewDidLoad() {
}
startstop() {
this.started = !this.started;
this.stopped = !this.stopped;
var date = new Date();
var month = date.getMonth() + 1;
var year = date.getFullYear();
var day = date.getUTCDate();
var hour = date.getHours();
var mins = date.getMinutes();
var time = `${month}/${day}/${year} ${hour}:${mins}`;
var msg = `Time ${this.started ? 'in' : 'out'} ${time}`;
document.getElementById('startstops').innerHTML = "<div class='time'>" + msg + "</div>" + document.getElementById('startstops').innerHTML; | } | } | random_line_split |
hours.ts | import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams } from 'ionic-angular';
@IonicPage()
@Component({
selector: 'page-hours',
templateUrl: 'hours.html',
})
export class HoursPage {
started: boolean = false;
stopped: boolean = true;
constructor(public navCtrl: NavController, public navParams: NavParams) {
| ionViewDidLoad() {
}
startstop() {
this.started = !this.started;
this.stopped = !this.stopped;
var date = new Date();
var month = date.getMonth() + 1;
var year = date.getFullYear();
var day = date.getUTCDate();
var hour = date.getHours();
var mins = date.getMinutes();
var time = `${month}/${day}/${year} ${hour}:${mins}`;
var msg = `Time ${this.started ? 'in' : 'out'} ${time}`;
document.getElementById('startstops').innerHTML = "<div class='time'>" + msg + "</div>" + document.getElementById('startstops').innerHTML;
}
}
| }
| identifier_body |
hours.ts | import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams } from 'ionic-angular';
@IonicPage()
@Component({
selector: 'page-hours',
templateUrl: 'hours.html',
})
export class Ho |
started: boolean = false;
stopped: boolean = true;
constructor(public navCtrl: NavController, public navParams: NavParams) {
}
ionViewDidLoad() {
}
startstop() {
this.started = !this.started;
this.stopped = !this.stopped;
var date = new Date();
var month = date.getMonth() + 1;
var year = date.getFullYear();
var day = date.getUTCDate();
var hour = date.getHours();
var mins = date.getMinutes();
var time = `${month}/${day}/${year} ${hour}:${mins}`;
var msg = `Time ${this.started ? 'in' : 'out'} ${time}`;
document.getElementById('startstops').innerHTML = "<div class='time'>" + msg + "</div>" + document.getElementById('startstops').innerHTML;
}
}
| ursPage { | identifier_name |
scheduler.py | from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from ..common import log
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
|
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
| self.max_delay_time = 1 | identifier_body |
scheduler.py | from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from ..common import log
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = [] | sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job() | random_line_split | |
scheduler.py | from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from ..common import log
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
|
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
| if job.get_expiration() <= now:
ready_jobs.append(job) | conditional_block |
scheduler.py | from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from ..common import log
class | (object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
| Scheduler | identifier_name |
cmake.py | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from catkin_tools.execution.io import IOBufferProtocol
from catkin_tools.execution.events import ExecutionEvent
from catkin_tools.terminal_color import fmt
from catkin_tools.terminal_color import sanitize
from catkin_tools.utils import which
CMAKE_EXEC = which('cmake')
CMAKE_INSTALL_MANIFEST_FILENAME = 'install_manifest.txt'
def split_to_last_line_break(data):
"""This splits a byte buffer into (head, tail) where head contains the
beginning of the buffer to the last line break (inclusive) and the tail
contains all bytes after that."""
last_break_index = 1 + data.rfind(b'\n')
return data[:last_break_index], data[last_break_index:]
class CMakeIOBufferProtocol(IOBufferProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def abspath(self, groups):
"""Group filter that turns source-relative paths into absolute paths."""
return (groups[0] if groups[0].startswith(os.sep) else os.path.join(self.source_path, groups[0]),) + groups[1:]
def __init__(self, label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs):
super(CMakeIOBufferProtocol, self).__init__(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
self.source_path = source_path
self.suppress_stdout = suppress_stdout
# These are buffers for incomplete lines that we want to wait to parse
# until we have received them completely
self.stdout_tail = b''
self.stderr_tail = b''
# Line formatting filters
# Each is a 3-tuple:
# - regular expression (with captured groups)
# - output formatting line (subs captured groups)
# - functor which filters captured groups
filters = [
(r'^-- :(.+)', '@{cf}--@| :@{yf}{}@|', None),
(r'^-- (.+)', '@{cf}--@| {}', None),
(r'CMake Error at (.+):(.+)', '@{rf}@!CMake Error@| at {}:{}', self.abspath),
(r'CMake Warning at (.+):(.+)', '@{yf}@!CMake Warning@| at {}:{}', self.abspath),
(r'CMake Warning (dev) at (.+):(.+)', '@{yf}@!CMake Warning (dev)@| at {}:{}', self.abspath),
(r'(?i)(warning.*)', '@{yf}{}@|', None),
(r'(?i)ERROR:(.*)', '@!@{rf}ERROR:@|{}@|', None),
(r'Call Stack \(most recent call first\):(.*)', '@{cf}Call Stack (most recent call first):@|{}', None),
]
self.filters = [(re.compile(p), r, f) for (p, r, f) in filters]
def on_stdout_received(self, data):
if not self.suppress_stdout:
data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
def on_stderr_received(self, data):
data_head, self.stderr_tail = split_to_last_line_break(self.stderr_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
def close(self):
# Make sure tail buffers are flushed
self.flush_tails()
super(CMakeIOBufferProtocol, self).close()
def flush_tails(self):
"""Write out any unprocessed tail buffers."""
colored = self.color_lines(self.stdout_tail)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
self.stdout_tail = b''
colored = self.color_lines(self.stderr_tail)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
self.stderr_tail = b''
def color_lines(self, data):
"""Apply colorization rules to each line in data"""
decoded_data = self._decode(data)
# TODO: This will only work if all lines are received at once. Instead
# of directly splitting lines, we should buffer the data lines until
# the last character is a line break
lines = decoded_data.splitlines(True) # Keep line breaks
colored_lines = [self.colorize_cmake(line) for line in lines]
colored_data = ''.join(colored_lines)
encoded_data = self._encode(colored_data)
return encoded_data
@classmethod
def factory_factory(cls, source_path, suppress_stdout=False):
"""Factory factory for constructing protocols that know the source path for this CMake package."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs)
return init_proxy
return factory
def colorize_cmake(self, line):
"""Colorizes output from CMake
This also prepends the source path to the locations of warnings and errors.
:param line: one, new line terminated, line from `cmake` which needs coloring.
:type line: str
"""
# return line
cline = sanitize(line).rstrip()
if len(cline.strip()) > 0:
for p, r, f in self.filters:
match = p.match(cline)
if match is not None:
cline = fmt(r, reset=False)
if f is not None:
cline = cline.format(*f(match.groups()))
else:
cline = cline.format(*match.groups())
break
return cline + '\n'
class CMakeMakeIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses CMake's progress prefixes and emits corresponding STAGE_PROGRESS events."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
super(CMakeMakeIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
def on_stdout_received(self, data):
super(CMakeMakeIOBufferProtocol, self).on_stdout_received(data)
self.send_progress(data)
def send_progress(self, data):
"""Parse CMake Make completion progress"""
progress_matches = re.match(r'\[\s*([0-9]+)%\]', self._decode(data))
if progress_matches is not None:
self.event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=self.job_id,
stage_label=self.stage_label,
percent=str(progress_matches.groups()[0])))
class CMakeMakeRunTestsIOBufferProtocol(CMakeMakeIOBufferProtocol):
"""An IOBufferProtocol which parses the output of `make run_tests`."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs):
|
def on_stdout_received(self, data):
self.send_progress(data)
data = self._decode(data)
if data.startswith('-- run_tests.py: execute command'):
self.in_test_output = True
elif data.startswith('-- run_tests.py: verify result'):
self.in_test_output = False
if self.verbose or self.in_test_output:
colored = self.colorize_run_tests(data)
super(CMakeMakeRunTestsIOBufferProtocol, self).on_stdout_received(colored.encode())
def colorize_run_tests(self, line):
cline = sanitize(line).rstrip()
for p, r in self.filters:
if p.match(cline):
lines = [fmt(r).format(line) for line in cline.splitlines()]
cline = '\n'.join(lines)
return cline + '\n'
@classmethod
def factory_factory(cls, verbose):
"""Factory factory for constructing protocols that know the verbosity."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs)
return init_proxy
return factory
def get_installed_files(path):
"""Get a set of files installed by a CMake package as specified by an
install_manifest.txt in a given directory."""
install_manifest_path = os.path.join(
path,
CMAKE_INSTALL_MANIFEST_FILENAME)
installed_files = set()
if os.path.exists(install_manifest_path):
with open(install_manifest_path) as f:
installed_files = set([line.strip() for line in f.readlines()])
return installed_files
| super(CMakeMakeRunTestsIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
# Line formatting filters
# Each is a 2-tuple:
# - regular expression
# - output formatting line
self.filters = [
(re.compile(r'^-- run_tests.py:'), '@!@{kf}{}@|'),
]
self.in_test_output = False
self.verbose = verbose | identifier_body |
cmake.py | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from catkin_tools.execution.io import IOBufferProtocol
from catkin_tools.execution.events import ExecutionEvent
from catkin_tools.terminal_color import fmt
from catkin_tools.terminal_color import sanitize
from catkin_tools.utils import which
CMAKE_EXEC = which('cmake')
CMAKE_INSTALL_MANIFEST_FILENAME = 'install_manifest.txt'
def split_to_last_line_break(data):
"""This splits a byte buffer into (head, tail) where head contains the
beginning of the buffer to the last line break (inclusive) and the tail
contains all bytes after that."""
last_break_index = 1 + data.rfind(b'\n')
return data[:last_break_index], data[last_break_index:]
class CMakeIOBufferProtocol(IOBufferProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def abspath(self, groups):
"""Group filter that turns source-relative paths into absolute paths."""
return (groups[0] if groups[0].startswith(os.sep) else os.path.join(self.source_path, groups[0]),) + groups[1:]
def __init__(self, label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs):
super(CMakeIOBufferProtocol, self).__init__(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
self.source_path = source_path
self.suppress_stdout = suppress_stdout
# These are buffers for incomplete lines that we want to wait to parse
# until we have received them completely
self.stdout_tail = b''
self.stderr_tail = b''
# Line formatting filters
# Each is a 3-tuple:
# - regular expression (with captured groups)
# - output formatting line (subs captured groups)
# - functor which filters captured groups
filters = [
(r'^-- :(.+)', '@{cf}--@| :@{yf}{}@|', None),
(r'^-- (.+)', '@{cf}--@| {}', None),
(r'CMake Error at (.+):(.+)', '@{rf}@!CMake Error@| at {}:{}', self.abspath),
(r'CMake Warning at (.+):(.+)', '@{yf}@!CMake Warning@| at {}:{}', self.abspath),
(r'CMake Warning (dev) at (.+):(.+)', '@{yf}@!CMake Warning (dev)@| at {}:{}', self.abspath),
(r'(?i)(warning.*)', '@{yf}{}@|', None),
(r'(?i)ERROR:(.*)', '@!@{rf}ERROR:@|{}@|', None),
(r'Call Stack \(most recent call first\):(.*)', '@{cf}Call Stack (most recent call first):@|{}', None),
]
self.filters = [(re.compile(p), r, f) for (p, r, f) in filters]
def on_stdout_received(self, data):
if not self.suppress_stdout:
data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
def on_stderr_received(self, data):
data_head, self.stderr_tail = split_to_last_line_break(self.stderr_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
def close(self):
# Make sure tail buffers are flushed
self.flush_tails()
super(CMakeIOBufferProtocol, self).close()
def flush_tails(self):
"""Write out any unprocessed tail buffers."""
colored = self.color_lines(self.stdout_tail)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
self.stdout_tail = b''
colored = self.color_lines(self.stderr_tail)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
self.stderr_tail = b''
def color_lines(self, data):
"""Apply colorization rules to each line in data"""
decoded_data = self._decode(data)
# TODO: This will only work if all lines are received at once. Instead
# of directly splitting lines, we should buffer the data lines until
# the last character is a line break
lines = decoded_data.splitlines(True) # Keep line breaks
colored_lines = [self.colorize_cmake(line) for line in lines]
colored_data = ''.join(colored_lines)
encoded_data = self._encode(colored_data)
return encoded_data
@classmethod
def factory_factory(cls, source_path, suppress_stdout=False):
"""Factory factory for constructing protocols that know the source path for this CMake package."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs)
return init_proxy
return factory
def colorize_cmake(self, line):
"""Colorizes output from CMake
This also prepends the source path to the locations of warnings and errors.
:param line: one, new line terminated, line from `cmake` which needs coloring.
:type line: str
"""
# return line
cline = sanitize(line).rstrip()
if len(cline.strip()) > 0:
|
return cline + '\n'
class CMakeMakeIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses CMake's progress prefixes and emits corresponding STAGE_PROGRESS events."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
super(CMakeMakeIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
def on_stdout_received(self, data):
super(CMakeMakeIOBufferProtocol, self).on_stdout_received(data)
self.send_progress(data)
def send_progress(self, data):
"""Parse CMake Make completion progress"""
progress_matches = re.match(r'\[\s*([0-9]+)%\]', self._decode(data))
if progress_matches is not None:
self.event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=self.job_id,
stage_label=self.stage_label,
percent=str(progress_matches.groups()[0])))
class CMakeMakeRunTestsIOBufferProtocol(CMakeMakeIOBufferProtocol):
"""An IOBufferProtocol which parses the output of `make run_tests`."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs):
super(CMakeMakeRunTestsIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
# Line formatting filters
# Each is a 2-tuple:
# - regular expression
# - output formatting line
self.filters = [
(re.compile(r'^-- run_tests.py:'), '@!@{kf}{}@|'),
]
self.in_test_output = False
self.verbose = verbose
def on_stdout_received(self, data):
self.send_progress(data)
data = self._decode(data)
if data.startswith('-- run_tests.py: execute command'):
self.in_test_output = True
elif data.startswith('-- run_tests.py: verify result'):
self.in_test_output = False
if self.verbose or self.in_test_output:
colored = self.colorize_run_tests(data)
super(CMakeMakeRunTestsIOBufferProtocol, self).on_stdout_received(colored.encode())
def colorize_run_tests(self, line):
cline = sanitize(line).rstrip()
for p, r in self.filters:
if p.match(cline):
lines = [fmt(r).format(line) for line in cline.splitlines()]
cline = '\n'.join(lines)
return cline + '\n'
@classmethod
def factory_factory(cls, verbose):
"""Factory factory for constructing protocols that know the verbosity."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs)
return init_proxy
return factory
def get_installed_files(path):
"""Get a set of files installed by a CMake package as specified by an
install_manifest.txt in a given directory."""
install_manifest_path = os.path.join(
path,
CMAKE_INSTALL_MANIFEST_FILENAME)
installed_files = set()
if os.path.exists(install_manifest_path):
with open(install_manifest_path) as f:
installed_files = set([line.strip() for line in f.readlines()])
return installed_files
| for p, r, f in self.filters:
match = p.match(cline)
if match is not None:
cline = fmt(r, reset=False)
if f is not None:
cline = cline.format(*f(match.groups()))
else:
cline = cline.format(*match.groups())
break | conditional_block |
cmake.py | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from catkin_tools.execution.io import IOBufferProtocol
from catkin_tools.execution.events import ExecutionEvent
from catkin_tools.terminal_color import fmt
from catkin_tools.terminal_color import sanitize
from catkin_tools.utils import which
CMAKE_EXEC = which('cmake')
CMAKE_INSTALL_MANIFEST_FILENAME = 'install_manifest.txt'
def split_to_last_line_break(data):
"""This splits a byte buffer into (head, tail) where head contains the
beginning of the buffer to the last line break (inclusive) and the tail
contains all bytes after that."""
last_break_index = 1 + data.rfind(b'\n')
return data[:last_break_index], data[last_break_index:]
class | (IOBufferProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def abspath(self, groups):
"""Group filter that turns source-relative paths into absolute paths."""
return (groups[0] if groups[0].startswith(os.sep) else os.path.join(self.source_path, groups[0]),) + groups[1:]
def __init__(self, label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs):
super(CMakeIOBufferProtocol, self).__init__(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
self.source_path = source_path
self.suppress_stdout = suppress_stdout
# These are buffers for incomplete lines that we want to wait to parse
# until we have received them completely
self.stdout_tail = b''
self.stderr_tail = b''
# Line formatting filters
# Each is a 3-tuple:
# - regular expression (with captured groups)
# - output formatting line (subs captured groups)
# - functor which filters captured groups
filters = [
(r'^-- :(.+)', '@{cf}--@| :@{yf}{}@|', None),
(r'^-- (.+)', '@{cf}--@| {}', None),
(r'CMake Error at (.+):(.+)', '@{rf}@!CMake Error@| at {}:{}', self.abspath),
(r'CMake Warning at (.+):(.+)', '@{yf}@!CMake Warning@| at {}:{}', self.abspath),
(r'CMake Warning (dev) at (.+):(.+)', '@{yf}@!CMake Warning (dev)@| at {}:{}', self.abspath),
(r'(?i)(warning.*)', '@{yf}{}@|', None),
(r'(?i)ERROR:(.*)', '@!@{rf}ERROR:@|{}@|', None),
(r'Call Stack \(most recent call first\):(.*)', '@{cf}Call Stack (most recent call first):@|{}', None),
]
self.filters = [(re.compile(p), r, f) for (p, r, f) in filters]
def on_stdout_received(self, data):
if not self.suppress_stdout:
data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
def on_stderr_received(self, data):
data_head, self.stderr_tail = split_to_last_line_break(self.stderr_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
def close(self):
# Make sure tail buffers are flushed
self.flush_tails()
super(CMakeIOBufferProtocol, self).close()
def flush_tails(self):
"""Write out any unprocessed tail buffers."""
colored = self.color_lines(self.stdout_tail)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
self.stdout_tail = b''
colored = self.color_lines(self.stderr_tail)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
self.stderr_tail = b''
def color_lines(self, data):
"""Apply colorization rules to each line in data"""
decoded_data = self._decode(data)
# TODO: This will only work if all lines are received at once. Instead
# of directly splitting lines, we should buffer the data lines until
# the last character is a line break
lines = decoded_data.splitlines(True) # Keep line breaks
colored_lines = [self.colorize_cmake(line) for line in lines]
colored_data = ''.join(colored_lines)
encoded_data = self._encode(colored_data)
return encoded_data
@classmethod
def factory_factory(cls, source_path, suppress_stdout=False):
"""Factory factory for constructing protocols that know the source path for this CMake package."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs)
return init_proxy
return factory
def colorize_cmake(self, line):
"""Colorizes output from CMake
This also prepends the source path to the locations of warnings and errors.
:param line: one, new line terminated, line from `cmake` which needs coloring.
:type line: str
"""
# return line
cline = sanitize(line).rstrip()
if len(cline.strip()) > 0:
for p, r, f in self.filters:
match = p.match(cline)
if match is not None:
cline = fmt(r, reset=False)
if f is not None:
cline = cline.format(*f(match.groups()))
else:
cline = cline.format(*match.groups())
break
return cline + '\n'
class CMakeMakeIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses CMake's progress prefixes and emits corresponding STAGE_PROGRESS events."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
super(CMakeMakeIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
def on_stdout_received(self, data):
super(CMakeMakeIOBufferProtocol, self).on_stdout_received(data)
self.send_progress(data)
def send_progress(self, data):
"""Parse CMake Make completion progress"""
progress_matches = re.match(r'\[\s*([0-9]+)%\]', self._decode(data))
if progress_matches is not None:
self.event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=self.job_id,
stage_label=self.stage_label,
percent=str(progress_matches.groups()[0])))
class CMakeMakeRunTestsIOBufferProtocol(CMakeMakeIOBufferProtocol):
"""An IOBufferProtocol which parses the output of `make run_tests`."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs):
super(CMakeMakeRunTestsIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
# Line formatting filters
# Each is a 2-tuple:
# - regular expression
# - output formatting line
self.filters = [
(re.compile(r'^-- run_tests.py:'), '@!@{kf}{}@|'),
]
self.in_test_output = False
self.verbose = verbose
def on_stdout_received(self, data):
self.send_progress(data)
data = self._decode(data)
if data.startswith('-- run_tests.py: execute command'):
self.in_test_output = True
elif data.startswith('-- run_tests.py: verify result'):
self.in_test_output = False
if self.verbose or self.in_test_output:
colored = self.colorize_run_tests(data)
super(CMakeMakeRunTestsIOBufferProtocol, self).on_stdout_received(colored.encode())
def colorize_run_tests(self, line):
cline = sanitize(line).rstrip()
for p, r in self.filters:
if p.match(cline):
lines = [fmt(r).format(line) for line in cline.splitlines()]
cline = '\n'.join(lines)
return cline + '\n'
@classmethod
def factory_factory(cls, verbose):
"""Factory factory for constructing protocols that know the verbosity."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs)
return init_proxy
return factory
def get_installed_files(path):
"""Get a set of files installed by a CMake package as specified by an
install_manifest.txt in a given directory."""
install_manifest_path = os.path.join(
path,
CMAKE_INSTALL_MANIFEST_FILENAME)
installed_files = set()
if os.path.exists(install_manifest_path):
with open(install_manifest_path) as f:
installed_files = set([line.strip() for line in f.readlines()])
return installed_files
| CMakeIOBufferProtocol | identifier_name |
cmake.py | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from catkin_tools.execution.io import IOBufferProtocol
from catkin_tools.execution.events import ExecutionEvent
from catkin_tools.terminal_color import fmt
from catkin_tools.terminal_color import sanitize
from catkin_tools.utils import which
CMAKE_EXEC = which('cmake')
CMAKE_INSTALL_MANIFEST_FILENAME = 'install_manifest.txt'
def split_to_last_line_break(data):
"""This splits a byte buffer into (head, tail) where head contains the
beginning of the buffer to the last line break (inclusive) and the tail
contains all bytes after that."""
last_break_index = 1 + data.rfind(b'\n')
return data[:last_break_index], data[last_break_index:]
class CMakeIOBufferProtocol(IOBufferProtocol): | """An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def abspath(self, groups):
"""Group filter that turns source-relative paths into absolute paths."""
return (groups[0] if groups[0].startswith(os.sep) else os.path.join(self.source_path, groups[0]),) + groups[1:]
def __init__(self, label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs):
super(CMakeIOBufferProtocol, self).__init__(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
self.source_path = source_path
self.suppress_stdout = suppress_stdout
# These are buffers for incomplete lines that we want to wait to parse
# until we have received them completely
self.stdout_tail = b''
self.stderr_tail = b''
# Line formatting filters
# Each is a 3-tuple:
# - regular expression (with captured groups)
# - output formatting line (subs captured groups)
# - functor which filters captured groups
filters = [
(r'^-- :(.+)', '@{cf}--@| :@{yf}{}@|', None),
(r'^-- (.+)', '@{cf}--@| {}', None),
(r'CMake Error at (.+):(.+)', '@{rf}@!CMake Error@| at {}:{}', self.abspath),
(r'CMake Warning at (.+):(.+)', '@{yf}@!CMake Warning@| at {}:{}', self.abspath),
(r'CMake Warning (dev) at (.+):(.+)', '@{yf}@!CMake Warning (dev)@| at {}:{}', self.abspath),
(r'(?i)(warning.*)', '@{yf}{}@|', None),
(r'(?i)ERROR:(.*)', '@!@{rf}ERROR:@|{}@|', None),
(r'Call Stack \(most recent call first\):(.*)', '@{cf}Call Stack (most recent call first):@|{}', None),
]
self.filters = [(re.compile(p), r, f) for (p, r, f) in filters]
def on_stdout_received(self, data):
if not self.suppress_stdout:
data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
def on_stderr_received(self, data):
data_head, self.stderr_tail = split_to_last_line_break(self.stderr_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
def close(self):
# Make sure tail buffers are flushed
self.flush_tails()
super(CMakeIOBufferProtocol, self).close()
def flush_tails(self):
"""Write out any unprocessed tail buffers."""
colored = self.color_lines(self.stdout_tail)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
self.stdout_tail = b''
colored = self.color_lines(self.stderr_tail)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
self.stderr_tail = b''
def color_lines(self, data):
"""Apply colorization rules to each line in data"""
decoded_data = self._decode(data)
# TODO: This will only work if all lines are received at once. Instead
# of directly splitting lines, we should buffer the data lines until
# the last character is a line break
lines = decoded_data.splitlines(True) # Keep line breaks
colored_lines = [self.colorize_cmake(line) for line in lines]
colored_data = ''.join(colored_lines)
encoded_data = self._encode(colored_data)
return encoded_data
@classmethod
def factory_factory(cls, source_path, suppress_stdout=False):
"""Factory factory for constructing protocols that know the source path for this CMake package."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs)
return init_proxy
return factory
def colorize_cmake(self, line):
"""Colorizes output from CMake
This also prepends the source path to the locations of warnings and errors.
:param line: one, new line terminated, line from `cmake` which needs coloring.
:type line: str
"""
# return line
cline = sanitize(line).rstrip()
if len(cline.strip()) > 0:
for p, r, f in self.filters:
match = p.match(cline)
if match is not None:
cline = fmt(r, reset=False)
if f is not None:
cline = cline.format(*f(match.groups()))
else:
cline = cline.format(*match.groups())
break
return cline + '\n'
class CMakeMakeIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses CMake's progress prefixes and emits corresponding STAGE_PROGRESS events."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
super(CMakeMakeIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
def on_stdout_received(self, data):
super(CMakeMakeIOBufferProtocol, self).on_stdout_received(data)
self.send_progress(data)
def send_progress(self, data):
"""Parse CMake Make completion progress"""
progress_matches = re.match(r'\[\s*([0-9]+)%\]', self._decode(data))
if progress_matches is not None:
self.event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=self.job_id,
stage_label=self.stage_label,
percent=str(progress_matches.groups()[0])))
class CMakeMakeRunTestsIOBufferProtocol(CMakeMakeIOBufferProtocol):
"""An IOBufferProtocol which parses the output of `make run_tests`."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs):
super(CMakeMakeRunTestsIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
# Line formatting filters
# Each is a 2-tuple:
# - regular expression
# - output formatting line
self.filters = [
(re.compile(r'^-- run_tests.py:'), '@!@{kf}{}@|'),
]
self.in_test_output = False
self.verbose = verbose
def on_stdout_received(self, data):
self.send_progress(data)
data = self._decode(data)
if data.startswith('-- run_tests.py: execute command'):
self.in_test_output = True
elif data.startswith('-- run_tests.py: verify result'):
self.in_test_output = False
if self.verbose or self.in_test_output:
colored = self.colorize_run_tests(data)
super(CMakeMakeRunTestsIOBufferProtocol, self).on_stdout_received(colored.encode())
def colorize_run_tests(self, line):
cline = sanitize(line).rstrip()
for p, r in self.filters:
if p.match(cline):
lines = [fmt(r).format(line) for line in cline.splitlines()]
cline = '\n'.join(lines)
return cline + '\n'
@classmethod
def factory_factory(cls, verbose):
"""Factory factory for constructing protocols that know the verbosity."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs)
return init_proxy
return factory
def get_installed_files(path):
"""Get a set of files installed by a CMake package as specified by an
install_manifest.txt in a given directory."""
install_manifest_path = os.path.join(
path,
CMAKE_INSTALL_MANIFEST_FILENAME)
installed_files = set()
if os.path.exists(install_manifest_path):
with open(install_manifest_path) as f:
installed_files = set([line.strip() for line in f.readlines()])
return installed_files | random_line_split | |
dstr-dflt-obj-ptrn-id-init-fn-name-class.js | // This file was procedurally generated from the following sources:
// - src/dstr-binding/obj-ptrn-id-init-fn-name-class.case
// - src/dstr-binding/default/async-gen-func-decl-dflt.template
/*---
description: SingleNameBinding assigns `name` to "anonymous" classes (async generator function declaration (default parameter))
esid: sec-asyncgenerator-definitions-instantiatefunctionobject
features: [async-iteration]
flags: [generated, async]
info: |
AsyncGeneratorDeclaration : async [no LineTerminator here] function * BindingIdentifier
( FormalParameters ) { AsyncGeneratorBody }
[...]
3. Let F be ! AsyncGeneratorFunctionCreate(Normal, FormalParameters, AsyncGeneratorBody,
scope, strict).
[...]
13.3.3.7 Runtime Semantics: KeyedBindingInitialization
SingleNameBinding : BindingIdentifier Initializeropt
[...]
6. If Initializer is present and v is undefined, then
[...]
d. If IsAnonymousFunctionDefinition(Initializer) is true, then
i. Let hasNameProperty be HasOwnProperty(v, "name").
ii. ReturnIfAbrupt(hasNameProperty).
iii. If hasNameProperty is false, perform SetFunctionName(v,
bindingId).
---*/
var callCount = 0;
async function* f({ cls = class {}, xCls = class X {}, xCls2 = class { static name() {} } } = {}) {
assert.sameValue(cls.name, 'cls');
assert.notSameValue(xCls.name, 'xCls');
assert.notSameValue(xCls2.name, 'xCls2');
callCount = callCount + 1;
};
f().next().then(() => {
assert.sameValue(callCount, 1, 'invoked exactly once'); | }).then($DONE, $DONE); | random_line_split | |
dstr-dflt-obj-ptrn-id-init-fn-name-class.js | // This file was procedurally generated from the following sources:
// - src/dstr-binding/obj-ptrn-id-init-fn-name-class.case
// - src/dstr-binding/default/async-gen-func-decl-dflt.template
/*---
description: SingleNameBinding assigns `name` to "anonymous" classes (async generator function declaration (default parameter))
esid: sec-asyncgenerator-definitions-instantiatefunctionobject
features: [async-iteration]
flags: [generated, async]
info: |
AsyncGeneratorDeclaration : async [no LineTerminator here] function * BindingIdentifier
( FormalParameters ) { AsyncGeneratorBody }
[...]
3. Let F be ! AsyncGeneratorFunctionCreate(Normal, FormalParameters, AsyncGeneratorBody,
scope, strict).
[...]
13.3.3.7 Runtime Semantics: KeyedBindingInitialization
SingleNameBinding : BindingIdentifier Initializeropt
[...]
6. If Initializer is present and v is undefined, then
[...]
d. If IsAnonymousFunctionDefinition(Initializer) is true, then
i. Let hasNameProperty be HasOwnProperty(v, "name").
ii. ReturnIfAbrupt(hasNameProperty).
iii. If hasNameProperty is false, perform SetFunctionName(v,
bindingId).
---*/
var callCount = 0;
async function* f({ cls = class {}, xCls = class X {}, xCls2 = class { static | () {} } } = {}) {
assert.sameValue(cls.name, 'cls');
assert.notSameValue(xCls.name, 'xCls');
assert.notSameValue(xCls2.name, 'xCls2');
callCount = callCount + 1;
};
f().next().then(() => {
assert.sameValue(callCount, 1, 'invoked exactly once');
}).then($DONE, $DONE);
| name | identifier_name |
dstr-dflt-obj-ptrn-id-init-fn-name-class.js | // This file was procedurally generated from the following sources:
// - src/dstr-binding/obj-ptrn-id-init-fn-name-class.case
// - src/dstr-binding/default/async-gen-func-decl-dflt.template
/*---
description: SingleNameBinding assigns `name` to "anonymous" classes (async generator function declaration (default parameter))
esid: sec-asyncgenerator-definitions-instantiatefunctionobject
features: [async-iteration]
flags: [generated, async]
info: |
AsyncGeneratorDeclaration : async [no LineTerminator here] function * BindingIdentifier
( FormalParameters ) { AsyncGeneratorBody }
[...]
3. Let F be ! AsyncGeneratorFunctionCreate(Normal, FormalParameters, AsyncGeneratorBody,
scope, strict).
[...]
13.3.3.7 Runtime Semantics: KeyedBindingInitialization
SingleNameBinding : BindingIdentifier Initializeropt
[...]
6. If Initializer is present and v is undefined, then
[...]
d. If IsAnonymousFunctionDefinition(Initializer) is true, then
i. Let hasNameProperty be HasOwnProperty(v, "name").
ii. ReturnIfAbrupt(hasNameProperty).
iii. If hasNameProperty is false, perform SetFunctionName(v,
bindingId).
---*/
var callCount = 0;
async function* f({ cls = class {}, xCls = class X {}, xCls2 = class { static name() | } } = {}) {
assert.sameValue(cls.name, 'cls');
assert.notSameValue(xCls.name, 'xCls');
assert.notSameValue(xCls2.name, 'xCls2');
callCount = callCount + 1;
};
f().next().then(() => {
assert.sameValue(callCount, 1, 'invoked exactly once');
}).then($DONE, $DONE);
| {} | identifier_body |
general-benefits.tsx | import {
isBusinessPlan,
isCompletePlan,
isJetpackPlanSlug,
isPersonalPlan,
isPremiumPlan,
isSecurityDailyPlan,
isSecurityRealTimePlan,
} from '@automattic/calypso-products';
import { useTranslate } from 'i18n-calypso';
import * as React from 'react';
/*
* Show a list of Jetpack benefits that do not depend on site data
* These can vary by plan, but we do not need to get any data about the site to show these
* This is similar to the disconnection flow where some plan benefits are listed if a user is disconnecting Jetpack
*/
interface Props {
productSlug: string;
}
const JetpackGeneralBenefits: React.FC< Props > = ( props ) => {
const translate = useTranslate();
const { productSlug } = props;
const hasSecurityDailyPlan = isSecurityDailyPlan( productSlug );
const hasSecurityRealTimePlan = isSecurityRealTimePlan( productSlug );
const hasCompletePlan = isCompletePlan( productSlug );
const hasPersonalPlan = isPersonalPlan( productSlug );
const hasPremiumPlan = isPremiumPlan( productSlug );
const hasBusinessPlan = isBusinessPlan( productSlug );
const hasJetpackPlanSlug = isJetpackPlanSlug( productSlug );
const benefits = [];
// Priority Support
if (
hasSecurityDailyPlan ||
hasSecurityRealTimePlan ||
hasCompletePlan ||
hasPersonalPlan ||
hasPremiumPlan ||
hasBusinessPlan
) {
benefits.push(
<React.Fragment>
{ translate(
"{{strong}}Priority support{{/strong}} from Jetpack's WordPress and security experts.",
{
components: {
strong: <strong />,
},
}
) }
</React.Fragment>
);
}
// Payment Collection
// Ad Program
// Google Analytics | hasBusinessPlan
) {
benefits.push(
<React.Fragment>
{ translate( 'The ability to {{strong}}collect payments{{/strong}}.', {
components: {
strong: <strong />,
},
} ) }
</React.Fragment>
);
benefits.push(
<React.Fragment>
{ translate( 'The {{strong}}Ad program{{/strong}} for WordPress.', {
components: {
strong: <strong />,
},
} ) }
</React.Fragment>
);
benefits.push(
<React.Fragment>
{ translate( 'The {{strong}}Google Analytics{{/strong}} integration.', {
components: {
strong: <strong />,
},
} ) }
</React.Fragment>
);
}
// 13GB of video hosting
if ( hasPremiumPlan || hasSecurityDailyPlan ) {
benefits.push(
<React.Fragment>
{ translate( 'Up to 13GB of {{strong}}high-speed video hosting{{/strong}}.', {
components: {
strong: <strong />,
},
} ) }
</React.Fragment>
);
}
// Unlimited Video Hosting
if ( hasBusinessPlan || hasSecurityRealTimePlan || hasCompletePlan ) {
benefits.push(
<React.Fragment>
{ translate( 'Unlimited {{strong}}high-speed video hosting{{/strong}}.', {
components: {
strong: <strong />,
},
} ) }
</React.Fragment>
);
}
// General benefits of all Jetpack Plans (brute force protection, CDN)
if ( hasJetpackPlanSlug ) {
benefits.push(
<React.Fragment>
{ translate(
'Brute force {{strong}}attack protection{{/strong}} and {{strong}}downtime monitoring{{/strong}}.',
{
components: {
strong: <strong />,
},
}
) }
</React.Fragment>
);
}
if ( benefits.length > 0 ) {
return (
<ul className="jetpack-benefits__general-benefit-list">
{ benefits.map( ( benefit, idx ) => {
return <li key={ idx }>{ benefit }</li>;
} ) }
</ul>
);
}
return null;
};
export default JetpackGeneralBenefits; | if (
hasSecurityDailyPlan ||
hasSecurityRealTimePlan ||
hasCompletePlan ||
hasPremiumPlan || | random_line_split |
iRowModel.d.ts | import { RowNode } from "../entities/rowNode";
export interface RowBounds {
rowTop: number;
rowHeight: number;
rowIndex?: number;
}
export interface IRowModel {
/** Returns the rowNode at the given index. */
getRow(index: number): RowNode | null;
/** Returns the rowNode for given id. */
getRowNode(id: string): RowNode | null;
/** This is legacy, not used by ag-Grid, but keeping for backward compatibility */
getRowCount(): number;
getTopLevelRowCount(): number;
getTopLevelRowDisplayedIndex(topLevelIndex: number): number;
/** Returns the row index at the given pixel */
getRowIndexAtPixel(pixel: number): number;
/** Returns total height of all the rows - used to size the height of the grid div that contains the rows */ | /** Returns row top and bottom for a given row */
getRowBounds(index: number): RowBounds | null;
/** Returns true if this model has no rows, regardless of model filter. EG if rows present, but filtered
* out, this still returns false. If it returns true, then the grid shows the 'no rows' overlay - but we
* don't show that overlay if the rows are just filtered out. */
isEmpty(): boolean;
/** Returns true if no rows (either no rows at all, or the rows are filtered out). This is what the grid
* uses to know if there are rows to render or not. */
isRowsToRender(): boolean;
/** Returns all rows in range that should be selected. If there is a gap in range (non ClientSideRowModel) then
* then no rows should be returned */
getNodesInRangeForSelection(first: RowNode, last: RowNode): RowNode[];
/** Iterate through each node. What this does depends on the model type. For clientSide, goes through
* all nodes. For pagination, goes through current page. For virtualPage, goes through what's loaded in memory. */
forEachNode(callback: (rowNode: RowNode, index: number) => void): void;
/** The base class returns the type. We use this instead of 'instanceof' as the client might provide
* their own implementation of the models in the future. */
getType(): string;
/**
* It tells us if this row model knows about the last row that it can produce. This is used by the
* PaginationPanel, if last row is not found, then the 'last' button is disabled and the last page is
* not shown. This is always true for ClientSideRowModel. It toggles for InfiniteRowModel.
*/
isLastRowFound(): boolean;
/** Used by CSRM only - is makes sure there are now estimated row heights within the range. */
ensureRowHeightsValid(startPixel: number, endPixel: number, startLimitIndex: number, endLimitIndex: number): boolean;
/** Gets called after grid is initialised. What happens depends on row model. Client Side will take rowData
* from gridOptions, the other row models will start calling their datasources. */
start(): void;
} | getCurrentPageHeight(): number;
/** Returns true if the provided rowNode is in the list of rows to render */
isRowPresent(rowNode: RowNode): boolean; | random_line_split |
feedbackStatusbarItem.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { IDisposable } from 'vs/base/common/lifecycle';
import { IStatusbarItem } from 'vs/workbench/browser/parts/statusbar/statusbar';
import { FeedbackDropdown, IFeedback, IFeedbackService } from './feedback';
import { IContextViewService } from 'vs/platform/contextview/browser/contextView';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import product from 'vs/platform/node/product';
import { Themable, STATUS_BAR_FOREGROUND, STATUS_BAR_NO_FOLDER_FOREGROUND } from 'vs/workbench/common/theme';
import { IThemeService } from 'vs/platform/theme/common/themeService';
import { IWorkspaceContextService, WorkbenchState } from 'vs/platform/workspace/common/workspace';
class TwitterFeedbackService implements IFeedbackService {
private static TWITTER_URL: string = 'https://twitter.com/intent/tweet';
private static VIA_NAME: string = 'code';
private static HASHTAGS: string[] = ['HappyCoding'];
private combineHashTagsAsString(): string {
return TwitterFeedbackService.HASHTAGS.join(',');
}
public submitFeedback(feedback: IFeedback): void {
const queryString = `?${feedback.sentiment === 1 ? `hashtags=${this.combineHashTagsAsString()}&` : null}ref_src=twsrc%5Etfw&related=twitterapi%2Ctwitter&text=${feedback.feedback}&tw_p=tweetbutton&via=${TwitterFeedbackService.VIA_NAME}`;
const url = TwitterFeedbackService.TWITTER_URL + queryString;
window.open(url);
}
public getCharacterLimit(sentiment: number): number {
let length: number = 0;
if (sentiment === 1) {
TwitterFeedbackService.HASHTAGS.forEach(element => {
length += element.length + 2;
});
}
if (TwitterFeedbackService.VIA_NAME) {
length += ` via @${TwitterFeedbackService.VIA_NAME}`.length;
}
return 140 - length;
}
}
export class FeedbackStatusbarItem extends Themable implements IStatusbarItem {
private dropdown: FeedbackDropdown;
constructor(
@IInstantiationService private instantiationService: IInstantiationService,
@IContextViewService private contextViewService: IContextViewService,
@IWorkspaceContextService private contextService: IWorkspaceContextService,
@IThemeService themeService: IThemeService
) {
super(themeService);
this.registerListeners();
}
private registerListeners(): void {
this.toUnbind.push(this.contextService.onDidChangeWorkbenchState(() => this.updateStyles()));
}
protected updateStyles(): void {
super.updateStyles();
if (this.dropdown) |
}
public render(element: HTMLElement): IDisposable {
if (product.sendASmile) {
this.dropdown = this.instantiationService.createInstance(FeedbackDropdown, element, {
contextViewProvider: this.contextViewService,
feedbackService: this.instantiationService.createInstance(TwitterFeedbackService)
});
this.updateStyles();
return this.dropdown;
}
return null;
}
} | {
this.dropdown.label.style('background-color', this.getColor(this.contextService.getWorkbenchState() !== WorkbenchState.EMPTY ? STATUS_BAR_FOREGROUND : STATUS_BAR_NO_FOLDER_FOREGROUND));
} | conditional_block |
feedbackStatusbarItem.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { IDisposable } from 'vs/base/common/lifecycle';
import { IStatusbarItem } from 'vs/workbench/browser/parts/statusbar/statusbar';
import { FeedbackDropdown, IFeedback, IFeedbackService } from './feedback';
import { IContextViewService } from 'vs/platform/contextview/browser/contextView';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import product from 'vs/platform/node/product';
import { Themable, STATUS_BAR_FOREGROUND, STATUS_BAR_NO_FOLDER_FOREGROUND } from 'vs/workbench/common/theme';
import { IThemeService } from 'vs/platform/theme/common/themeService';
import { IWorkspaceContextService, WorkbenchState } from 'vs/platform/workspace/common/workspace';
class TwitterFeedbackService implements IFeedbackService {
private static TWITTER_URL: string = 'https://twitter.com/intent/tweet';
private static VIA_NAME: string = 'code';
private static HASHTAGS: string[] = ['HappyCoding'];
private combineHashTagsAsString(): string {
return TwitterFeedbackService.HASHTAGS.join(',');
}
public submitFeedback(feedback: IFeedback): void |
public getCharacterLimit(sentiment: number): number {
let length: number = 0;
if (sentiment === 1) {
TwitterFeedbackService.HASHTAGS.forEach(element => {
length += element.length + 2;
});
}
if (TwitterFeedbackService.VIA_NAME) {
length += ` via @${TwitterFeedbackService.VIA_NAME}`.length;
}
return 140 - length;
}
}
export class FeedbackStatusbarItem extends Themable implements IStatusbarItem {
private dropdown: FeedbackDropdown;
constructor(
@IInstantiationService private instantiationService: IInstantiationService,
@IContextViewService private contextViewService: IContextViewService,
@IWorkspaceContextService private contextService: IWorkspaceContextService,
@IThemeService themeService: IThemeService
) {
super(themeService);
this.registerListeners();
}
private registerListeners(): void {
this.toUnbind.push(this.contextService.onDidChangeWorkbenchState(() => this.updateStyles()));
}
protected updateStyles(): void {
super.updateStyles();
if (this.dropdown) {
this.dropdown.label.style('background-color', this.getColor(this.contextService.getWorkbenchState() !== WorkbenchState.EMPTY ? STATUS_BAR_FOREGROUND : STATUS_BAR_NO_FOLDER_FOREGROUND));
}
}
public render(element: HTMLElement): IDisposable {
if (product.sendASmile) {
this.dropdown = this.instantiationService.createInstance(FeedbackDropdown, element, {
contextViewProvider: this.contextViewService,
feedbackService: this.instantiationService.createInstance(TwitterFeedbackService)
});
this.updateStyles();
return this.dropdown;
}
return null;
}
} | {
const queryString = `?${feedback.sentiment === 1 ? `hashtags=${this.combineHashTagsAsString()}&` : null}ref_src=twsrc%5Etfw&related=twitterapi%2Ctwitter&text=${feedback.feedback}&tw_p=tweetbutton&via=${TwitterFeedbackService.VIA_NAME}`;
const url = TwitterFeedbackService.TWITTER_URL + queryString;
window.open(url);
} | identifier_body |
feedbackStatusbarItem.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { IDisposable } from 'vs/base/common/lifecycle';
import { IStatusbarItem } from 'vs/workbench/browser/parts/statusbar/statusbar';
import { FeedbackDropdown, IFeedback, IFeedbackService } from './feedback';
import { IContextViewService } from 'vs/platform/contextview/browser/contextView';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import product from 'vs/platform/node/product';
import { Themable, STATUS_BAR_FOREGROUND, STATUS_BAR_NO_FOLDER_FOREGROUND } from 'vs/workbench/common/theme';
import { IThemeService } from 'vs/platform/theme/common/themeService';
import { IWorkspaceContextService, WorkbenchState } from 'vs/platform/workspace/common/workspace';
class TwitterFeedbackService implements IFeedbackService {
private static TWITTER_URL: string = 'https://twitter.com/intent/tweet';
private static VIA_NAME: string = 'code';
private static HASHTAGS: string[] = ['HappyCoding'];
private combineHashTagsAsString(): string {
return TwitterFeedbackService.HASHTAGS.join(',');
}
public submitFeedback(feedback: IFeedback): void {
const queryString = `?${feedback.sentiment === 1 ? `hashtags=${this.combineHashTagsAsString()}&` : null}ref_src=twsrc%5Etfw&related=twitterapi%2Ctwitter&text=${feedback.feedback}&tw_p=tweetbutton&via=${TwitterFeedbackService.VIA_NAME}`;
const url = TwitterFeedbackService.TWITTER_URL + queryString;
window.open(url);
}
public getCharacterLimit(sentiment: number): number {
let length: number = 0;
if (sentiment === 1) {
TwitterFeedbackService.HASHTAGS.forEach(element => {
length += element.length + 2;
});
}
if (TwitterFeedbackService.VIA_NAME) {
length += ` via @${TwitterFeedbackService.VIA_NAME}`.length;
}
return 140 - length;
}
}
export class FeedbackStatusbarItem extends Themable implements IStatusbarItem {
private dropdown: FeedbackDropdown;
| (
@IInstantiationService private instantiationService: IInstantiationService,
@IContextViewService private contextViewService: IContextViewService,
@IWorkspaceContextService private contextService: IWorkspaceContextService,
@IThemeService themeService: IThemeService
) {
super(themeService);
this.registerListeners();
}
private registerListeners(): void {
this.toUnbind.push(this.contextService.onDidChangeWorkbenchState(() => this.updateStyles()));
}
protected updateStyles(): void {
super.updateStyles();
if (this.dropdown) {
this.dropdown.label.style('background-color', this.getColor(this.contextService.getWorkbenchState() !== WorkbenchState.EMPTY ? STATUS_BAR_FOREGROUND : STATUS_BAR_NO_FOLDER_FOREGROUND));
}
}
public render(element: HTMLElement): IDisposable {
if (product.sendASmile) {
this.dropdown = this.instantiationService.createInstance(FeedbackDropdown, element, {
contextViewProvider: this.contextViewService,
feedbackService: this.instantiationService.createInstance(TwitterFeedbackService)
});
this.updateStyles();
return this.dropdown;
}
return null;
}
} | constructor | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.