file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
typedefs_1.js | var searchData=
[
['gpr_5fatm',['gpr_atm',['../atm__gcc__atomic_8h.html#aa9e4b16c09888debe306f14e8090b1cc',1,'gpr_atm(): atm_gcc_atomic.h'],['../atm__gcc__sync_8h.html#aa9e4b16c09888debe306f14e8090b1cc',1,'gpr_atm(): atm_gcc_sync.h'],['../atm__win32_8h.html#aa9e4b16c09888debe306f14e8090b1cc',1,'gpr_atm(): atm_win32.h']]],
['gpr_5fcmdline',['gpr_cmdline',['../cmdline_8h.html#aaad93cd9bfc32c54fc89f7c2f3b74eb6',1,'cmdline.h']]],
['gpr_5fcv',['gpr_cv',['../sync__posix_8h.html#a1ad613e07180c0459cda9f3f6d881885',1,'gpr_cv(): sync_posix.h'],['../sync__win32_8h.html#a81241bed6e85e9b106a1311060a04d9d',1,'gpr_cv(): sync_win32.h']]],
['gpr_5fhistogram',['gpr_histogram',['../histogram_8h.html#a7125b0cef0e61a441a17f887dbc073fc',1,'histogram.h']]],
['gpr_5fint16',['gpr_int16',['../port__platform_8h.html#a6c41ac43b02ec37b1e07967a8706e709',1,'port_platform.h']]],
['gpr_5fint32',['gpr_int32',['../port__platform_8h.html#a7c9027ffa98b5efe1767efe79903c6b7',1,'port_platform.h']]],
['gpr_5fint64',['gpr_int64',['../port__platform_8h.html#a71edab4bc3421f129764e5cb342f7181',1,'port_platform.h']]],
['gpr_5fintmax',['gpr_intmax',['../port__platform_8h.html#a3ecb17ae580dbde63199ac206a732139',1,'port_platform.h']]],
['gpr_5fintptr',['gpr_intptr',['../port__platform_8h.html#a72d9d0d00bd576cfc8eab61aaea76db2',1,'port_platform.h']]],
['gpr_5flog_5ffunc',['gpr_log_func',['../log_8h.html#a23f29195676c33c61ff08a7f0a3e69b0',1,'log.h']]],
['gpr_5flog_5fseverity',['gpr_log_severity',['../log_8h.html#ad49303346a78cf4881129958214fde8d',1,'log.h']]],
['gpr_5fmu',['gpr_mu',['../sync__posix_8h.html#aa66fb6a11304ef6759d76f84a34ee28f',1,'sync_posix.h']]],
['gpr_5fonce',['gpr_once',['../sync__posix_8h.html#a28731dc17a4158343f58f453a4d5e37f',1,'gpr_once(): sync_posix.h'],['../sync__win32_8h.html#a6a752a459fe345c616c26b5a556ccc13',1,'gpr_once(): sync_win32.h']]],
['gpr_5fslice',['gpr_slice',['../slice_8h.html#a91fe16db371db7c53d6e3adc90a6678c',1,'slice.h']]], | ['gpr_5ftimespec',['gpr_timespec',['../time_8h.html#a7dd12c72fcf53ebecfdfc13632914c45',1,'time.h']]],
['gpr_5fuint16',['gpr_uint16',['../port__platform_8h.html#abb7bceac4325643af77af51c7b6af371',1,'port_platform.h']]],
['gpr_5fuint32',['gpr_uint32',['../port__platform_8h.html#aa6abd4df815a5498d1a04b5e691a74a9',1,'port_platform.h']]],
['gpr_5fuint64',['gpr_uint64',['../port__platform_8h.html#a737bb95b7b58d90b4215602d36ed65b6',1,'port_platform.h']]],
['gpr_5fuint8',['gpr_uint8',['../port__platform_8h.html#aeeb41e29e7c7a916a4e0fb6cfb9f92e0',1,'port_platform.h']]],
['gpr_5fuintmax',['gpr_uintmax',['../port__platform_8h.html#abee5d576e1d284a7c8135df7d89af0f7',1,'port_platform.h']]],
['gpr_5fuintptr',['gpr_uintptr',['../port__platform_8h.html#ab302f9bfe3be467072bd8c79847636d4',1,'port_platform.h']]],
['grpc_5fauth_5fcontext',['grpc_auth_context',['../grpc__security_8h.html#aa13f0724cb7ff99995d7f0881c01d553',1,'grpc_security.h']]],
['grpc_5fauth_5fproperty',['grpc_auth_property',['../grpc__security_8h.html#a010fe7d27deaa2ea50b765b48dbad429',1,'grpc_security.h']]],
['grpc_5fauth_5fproperty_5fiterator',['grpc_auth_property_iterator',['../grpc__security_8h.html#a26981fdcb987d885b25d34c7dc66e4cf',1,'grpc_security.h']]],
['grpc_5fbyte_5fbuffer',['grpc_byte_buffer',['../byte__buffer_8h.html#a670951ae03b0a2dc4e50f4e25160fc2e',1,'byte_buffer.h']]],
['grpc_5fbyte_5fbuffer_5freader',['grpc_byte_buffer_reader',['../byte__buffer_8h.html#a16143f22ea46cc7fa6b53e6cb30715f1',1,'byte_buffer.h']]],
['grpc_5fcall',['grpc_call',['../grpc_8h.html#a122f8f4f0a585396d993d9b55848f222',1,'grpc.h']]],
['grpc_5fcall_5ferror',['grpc_call_error',['../grpc_8h.html#abb51216cb5ac1cfd8ca4d096e060adbb',1,'grpc.h']]],
['grpc_5fchannel',['grpc_channel',['../grpc_8h.html#a432bfa6b9f6603643cdf9de8804c254e',1,'grpc.h']]],
['grpc_5fcompletion_5fqueue',['grpc_completion_queue',['../grpc_8h.html#a895faab0e6035445750e43482651ba2f',1,'grpc.h']]],
['grpc_5fcompletion_5ftype',['grpc_completion_type',['../grpc_8h.html#a70454b2958c92c1cd8feeaa45f157b74',1,'grpc.h']]],
['grpc_5fcompression_5foptions',['grpc_compression_options',['../compression_8h.html#a074a1aa6fb56901a795fe53c2adeefde',1,'compression.h']]],
['grpc_5fcredentials',['grpc_credentials',['../grpc__security_8h.html#a17768c857575e4500cdd12bb94d3f33e',1,'grpc_security.h']]],
['grpc_5fevent',['grpc_event',['../grpc_8h.html#a07990645ca218f6965fd83edf3f421b7',1,'grpc.h']]],
['grpc_5fmetadata',['grpc_metadata',['../grpc_8h.html#a1da84eaead787d991c5a0c87aed7c30b',1,'grpc.h']]],
['grpc_5fop',['grpc_op',['../grpc_8h.html#a6556a58ca45ad5132b89c770cf875215',1,'grpc.h']]],
['grpc_5fprocess_5fauth_5fmetadata_5fdone_5fcb',['grpc_process_auth_metadata_done_cb',['../grpc__security_8h.html#a54decefff49649afe0c5a3056d0734f1',1,'grpc_security.h']]],
['grpc_5fserver',['grpc_server',['../grpc_8h.html#a2bbbaad8f7a806f6c834b68c5dd916d8',1,'grpc.h']]],
['grpc_5fserver_5fcredentials',['grpc_server_credentials',['../grpc__security_8h.html#aa724192e56fdc03827846f16752deccc',1,'grpc_security.h']]]
]; | ['gpr_5fslice_5frefcount',['gpr_slice_refcount',['../slice_8h.html#af8841873e741f6cf38b2192147ea4f3e',1,'slice.h']]],
['gpr_5fsubprocess',['gpr_subprocess',['../subprocess_8h.html#a1c3492c3aef738e6eef6b8cb1b435095',1,'subprocess.h']]],
['gpr_5fthd_5fid',['gpr_thd_id',['../thd_8h.html#a04194350e2fb18edc439ab0a9d355a72',1,'thd.h']]], | random_line_split |
borrowck-borrowed-uniq-rvalue-2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct defer<'self> {
x: &'self [&'self str],
}
| unsafe {
error!("%?", self.x);
}
}
}
fn defer<'r>(x: &'r [&'r str]) -> defer<'r> {
defer {
x: x
}
}
fn main() {
let x = defer(~["Goodbye", "world!"]); //~ ERROR borrowed value does not live long enough
x.x[0];
} | #[unsafe_destructor]
impl<'self> Drop for defer<'self> {
fn drop(&self) { | random_line_split |
borrowck-borrowed-uniq-rvalue-2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct defer<'self> {
x: &'self [&'self str],
}
#[unsafe_destructor]
impl<'self> Drop for defer<'self> {
fn drop(&self) {
unsafe {
error!("%?", self.x);
}
}
}
fn | <'r>(x: &'r [&'r str]) -> defer<'r> {
defer {
x: x
}
}
fn main() {
let x = defer(~["Goodbye", "world!"]); //~ ERROR borrowed value does not live long enough
x.x[0];
}
| defer | identifier_name |
borrowck-borrowed-uniq-rvalue-2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct defer<'self> {
x: &'self [&'self str],
}
#[unsafe_destructor]
impl<'self> Drop for defer<'self> {
fn drop(&self) |
}
fn defer<'r>(x: &'r [&'r str]) -> defer<'r> {
defer {
x: x
}
}
fn main() {
let x = defer(~["Goodbye", "world!"]); //~ ERROR borrowed value does not live long enough
x.x[0];
}
| {
unsafe {
error!("%?", self.x);
}
} | identifier_body |
irc.py | import time
from typing import List, Optional
from utils import tasks
from zirc.event import Event
from utils.database import Database
| """Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def set_mode(irc: connection_wrapper, channel: str, users: List[str], mode: str):
for block in chunks(users, 4):
modes = "".join(mode[1:]) * len(block)
irc.mode(channel, " ".join(block), mode[0] + modes)
def get_users(args: str):
if args.find(",") != -1:
pos = args.find(",")
users_str = args[pos:].strip()
if args[pos + 1] != " ":
users = users_str[1:].split(",")
else:
users = users_str[2:].split(", ")
args = args[:pos].strip().split(" ")
users.append(args[-1])
else:
args_list = args.split(" ")
if len(args_list) == 1:
users = args_list[0]
elif len(args_list) >= 2:
users = args_list[:-1]
return users
def get_user_host(userdb: Database, channel: str, nick: str):
return userdb.get_user_host(channel, nick)
def get_info_tuple(event: Event, args: List[str], userdb: Optional[Database]=None):
if args[0].startswith("#"):
channel = args[0]
str_args = " ".join(args[1:])
del args[0]
else:
channel = event.target
str_args = " ".join(args)
if str_args.find(",") != -1:
users = get_users(str_args)
else:
users = args[-1:]
if " ".join(args[:-len(users)]) != '':
message = " ".join(args[:-len(users)])
else:
message = f"{event.source.nick}"
for (i, v) in enumerate(users):
if not v.find("!") != -1 and userdb is not None:
users[i] = get_user_host(userdb, event.target, v)
return channel, users, message
def unban_after_duration(irc: connection_wrapper, users: List[str], chan: str, duration: int):
duration += int(time.time())
def func(irc: connection_wrapper, users: List[str], chan: str):
for i in users:
irc.unban(chan, i)
tasks.run_at(duration, func, (irc, users, chan))
def strip_colours(s: str):
import re
ccodes = ['\x0f', '\x16', '\x1d', '\x1f', '\x02',
'\x03([1-9][0-6]?)?,?([1-9][0-6]?)?']
for cc in ccodes:
s = re.sub(cc, '', s)
return s | from zirc.wrappers import connection_wrapper
def chunks(l: List, n: int): | random_line_split |
irc.py | import time
from typing import List, Optional
from utils import tasks
from zirc.event import Event
from utils.database import Database
from zirc.wrappers import connection_wrapper
def chunks(l: List, n: int):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def set_mode(irc: connection_wrapper, channel: str, users: List[str], mode: str):
|
def get_users(args: str):
if args.find(",") != -1:
pos = args.find(",")
users_str = args[pos:].strip()
if args[pos + 1] != " ":
users = users_str[1:].split(",")
else:
users = users_str[2:].split(", ")
args = args[:pos].strip().split(" ")
users.append(args[-1])
else:
args_list = args.split(" ")
if len(args_list) == 1:
users = args_list[0]
elif len(args_list) >= 2:
users = args_list[:-1]
return users
def get_user_host(userdb: Database, channel: str, nick: str):
return userdb.get_user_host(channel, nick)
def get_info_tuple(event: Event, args: List[str], userdb: Optional[Database]=None):
if args[0].startswith("#"):
channel = args[0]
str_args = " ".join(args[1:])
del args[0]
else:
channel = event.target
str_args = " ".join(args)
if str_args.find(",") != -1:
users = get_users(str_args)
else:
users = args[-1:]
if " ".join(args[:-len(users)]) != '':
message = " ".join(args[:-len(users)])
else:
message = f"{event.source.nick}"
for (i, v) in enumerate(users):
if not v.find("!") != -1 and userdb is not None:
users[i] = get_user_host(userdb, event.target, v)
return channel, users, message
def unban_after_duration(irc: connection_wrapper, users: List[str], chan: str, duration: int):
duration += int(time.time())
def func(irc: connection_wrapper, users: List[str], chan: str):
for i in users:
irc.unban(chan, i)
tasks.run_at(duration, func, (irc, users, chan))
def strip_colours(s: str):
import re
ccodes = ['\x0f', '\x16', '\x1d', '\x1f', '\x02',
'\x03([1-9][0-6]?)?,?([1-9][0-6]?)?']
for cc in ccodes:
s = re.sub(cc, '', s)
return s
| for block in chunks(users, 4):
modes = "".join(mode[1:]) * len(block)
irc.mode(channel, " ".join(block), mode[0] + modes) | identifier_body |
irc.py | import time
from typing import List, Optional
from utils import tasks
from zirc.event import Event
from utils.database import Database
from zirc.wrappers import connection_wrapper
def chunks(l: List, n: int):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def set_mode(irc: connection_wrapper, channel: str, users: List[str], mode: str):
for block in chunks(users, 4):
modes = "".join(mode[1:]) * len(block)
irc.mode(channel, " ".join(block), mode[0] + modes)
def get_users(args: str):
if args.find(",") != -1:
pos = args.find(",")
users_str = args[pos:].strip()
if args[pos + 1] != " ":
users = users_str[1:].split(",")
else:
users = users_str[2:].split(", ")
args = args[:pos].strip().split(" ")
users.append(args[-1])
else:
args_list = args.split(" ")
if len(args_list) == 1:
users = args_list[0]
elif len(args_list) >= 2:
users = args_list[:-1]
return users
def get_user_host(userdb: Database, channel: str, nick: str):
return userdb.get_user_host(channel, nick)
def get_info_tuple(event: Event, args: List[str], userdb: Optional[Database]=None):
if args[0].startswith("#"):
channel = args[0]
str_args = " ".join(args[1:])
del args[0]
else:
channel = event.target
str_args = " ".join(args)
if str_args.find(",") != -1:
users = get_users(str_args)
else:
users = args[-1:]
if " ".join(args[:-len(users)]) != '':
message = " ".join(args[:-len(users)])
else:
|
for (i, v) in enumerate(users):
if not v.find("!") != -1 and userdb is not None:
users[i] = get_user_host(userdb, event.target, v)
return channel, users, message
def unban_after_duration(irc: connection_wrapper, users: List[str], chan: str, duration: int):
duration += int(time.time())
def func(irc: connection_wrapper, users: List[str], chan: str):
for i in users:
irc.unban(chan, i)
tasks.run_at(duration, func, (irc, users, chan))
def strip_colours(s: str):
import re
ccodes = ['\x0f', '\x16', '\x1d', '\x1f', '\x02',
'\x03([1-9][0-6]?)?,?([1-9][0-6]?)?']
for cc in ccodes:
s = re.sub(cc, '', s)
return s
| message = f"{event.source.nick}" | conditional_block |
irc.py | import time
from typing import List, Optional
from utils import tasks
from zirc.event import Event
from utils.database import Database
from zirc.wrappers import connection_wrapper
def chunks(l: List, n: int):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def set_mode(irc: connection_wrapper, channel: str, users: List[str], mode: str):
for block in chunks(users, 4):
modes = "".join(mode[1:]) * len(block)
irc.mode(channel, " ".join(block), mode[0] + modes)
def get_users(args: str):
if args.find(",") != -1:
pos = args.find(",")
users_str = args[pos:].strip()
if args[pos + 1] != " ":
users = users_str[1:].split(",")
else:
users = users_str[2:].split(", ")
args = args[:pos].strip().split(" ")
users.append(args[-1])
else:
args_list = args.split(" ")
if len(args_list) == 1:
users = args_list[0]
elif len(args_list) >= 2:
users = args_list[:-1]
return users
def get_user_host(userdb: Database, channel: str, nick: str):
return userdb.get_user_host(channel, nick)
def get_info_tuple(event: Event, args: List[str], userdb: Optional[Database]=None):
if args[0].startswith("#"):
channel = args[0]
str_args = " ".join(args[1:])
del args[0]
else:
channel = event.target
str_args = " ".join(args)
if str_args.find(",") != -1:
users = get_users(str_args)
else:
users = args[-1:]
if " ".join(args[:-len(users)]) != '':
message = " ".join(args[:-len(users)])
else:
message = f"{event.source.nick}"
for (i, v) in enumerate(users):
if not v.find("!") != -1 and userdb is not None:
users[i] = get_user_host(userdb, event.target, v)
return channel, users, message
def | (irc: connection_wrapper, users: List[str], chan: str, duration: int):
duration += int(time.time())
def func(irc: connection_wrapper, users: List[str], chan: str):
for i in users:
irc.unban(chan, i)
tasks.run_at(duration, func, (irc, users, chan))
def strip_colours(s: str):
import re
ccodes = ['\x0f', '\x16', '\x1d', '\x1f', '\x02',
'\x03([1-9][0-6]?)?,?([1-9][0-6]?)?']
for cc in ccodes:
s = re.sub(cc, '', s)
return s
| unban_after_duration | identifier_name |
gzip.rs | extern crate extra;
extern crate libflate;
use extra::option::OptionalExt;
use libflate::gzip::Encoder;
use std::io::Write;
use std::{env, fs, io, process};
fn | () {
let mut stderr = io::stderr();
let mut keep = false;
let mut files = Vec::new();
for arg in env::args().skip(1) {
if arg == "-k" {
keep = true;
} else {
files.push(arg)
}
}
if files.is_empty() {
eprintln!("gzip: no files provided");
process::exit(1);
}
for arg in files {
{
let output = fs::File::create(&format!("{}.gz", &arg)).try(&mut stderr);
let mut encoder = Encoder::new(output).try(&mut stderr);
let mut input = fs::File::open(&arg).try(&mut stderr);
io::copy(&mut input, &mut encoder).try(&mut stderr);
let mut encoded = encoder.finish().into_result().try(&mut stderr);
encoded.flush().try(&mut stderr);
}
if !keep {
fs::remove_file(&arg).try(&mut stderr);
}
}
}
| main | identifier_name |
gzip.rs | extern crate extra;
extern crate libflate;
use extra::option::OptionalExt;
use libflate::gzip::Encoder;
use std::io::Write;
use std::{env, fs, io, process};
fn main() {
let mut stderr = io::stderr();
let mut keep = false;
let mut files = Vec::new();
for arg in env::args().skip(1) {
if arg == "-k" {
keep = true;
} else |
}
if files.is_empty() {
eprintln!("gzip: no files provided");
process::exit(1);
}
for arg in files {
{
let output = fs::File::create(&format!("{}.gz", &arg)).try(&mut stderr);
let mut encoder = Encoder::new(output).try(&mut stderr);
let mut input = fs::File::open(&arg).try(&mut stderr);
io::copy(&mut input, &mut encoder).try(&mut stderr);
let mut encoded = encoder.finish().into_result().try(&mut stderr);
encoded.flush().try(&mut stderr);
}
if !keep {
fs::remove_file(&arg).try(&mut stderr);
}
}
}
| {
files.push(arg)
} | conditional_block |
gzip.rs | extern crate extra; | extern crate libflate;
use extra::option::OptionalExt;
use libflate::gzip::Encoder;
use std::io::Write;
use std::{env, fs, io, process};
fn main() {
let mut stderr = io::stderr();
let mut keep = false;
let mut files = Vec::new();
for arg in env::args().skip(1) {
if arg == "-k" {
keep = true;
} else {
files.push(arg)
}
}
if files.is_empty() {
eprintln!("gzip: no files provided");
process::exit(1);
}
for arg in files {
{
let output = fs::File::create(&format!("{}.gz", &arg)).try(&mut stderr);
let mut encoder = Encoder::new(output).try(&mut stderr);
let mut input = fs::File::open(&arg).try(&mut stderr);
io::copy(&mut input, &mut encoder).try(&mut stderr);
let mut encoded = encoder.finish().into_result().try(&mut stderr);
encoded.flush().try(&mut stderr);
}
if !keep {
fs::remove_file(&arg).try(&mut stderr);
}
}
} | random_line_split | |
gzip.rs | extern crate extra;
extern crate libflate;
use extra::option::OptionalExt;
use libflate::gzip::Encoder;
use std::io::Write;
use std::{env, fs, io, process};
fn main() | {
let mut stderr = io::stderr();
let mut keep = false;
let mut files = Vec::new();
for arg in env::args().skip(1) {
if arg == "-k" {
keep = true;
} else {
files.push(arg)
}
}
if files.is_empty() {
eprintln!("gzip: no files provided");
process::exit(1);
}
for arg in files {
{
let output = fs::File::create(&format!("{}.gz", &arg)).try(&mut stderr);
let mut encoder = Encoder::new(output).try(&mut stderr);
let mut input = fs::File::open(&arg).try(&mut stderr);
io::copy(&mut input, &mut encoder).try(&mut stderr);
let mut encoded = encoder.finish().into_result().try(&mut stderr);
encoded.flush().try(&mut stderr);
}
if !keep {
fs::remove_file(&arg).try(&mut stderr);
}
}
} | identifier_body | |
rotationalResistance.py | #!/usr/bin/env python
# encoding: utf-8
from yade import utils, plot
o = Omega()
fr = 0.5;rho=2000
tc = 0.001; en = 0.7; et = 0.7; o.dt = 0.0002*tc
r = 0.002
mat1 = O.materials.append(ViscElMat(frictionAngle=fr,mR = 0.05, mRtype = 1, density=rho,tc=tc,en=en,et=et))
mat2 = O.materials.append(ViscElMat(frictionAngle=fr,mR = 0.05, mRtype = 2, density=rho,tc=tc,en=en,et=et))
oriBody = Quaternion(Vector3(1,0,0),(pi/28))
id1 = O.bodies.append(sphere(center=[0,0,2*r],radius=r,material=mat1))
id2 = O.bodies.append(geom.facetBox(center=(0,-16.0*r,-2*r),orientation=oriBody,extents=(r,17.0*r,0), material=mat1,color=(0,0,1)))
id3 = O.bodies.append(sphere(center=[10*r,0,2*r],radius=r,material=mat2))
id4 = O.bodies.append(geom.facetBox(center=(10*r,-16.0*r,-2*r),orientation=oriBody,extents=(r,17.0*r,0), material=mat2,color=(0,0,1)))
o.engines = [
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
NewtonIntegrator(damping=0,gravity=[0,0,-9.81]),
PyRunner(command='addPlotData()',iterPeriod=10000, dead = False, label='graph'),
]
def addPlotData():
f |
plot.plots={'sc':('fc1','fc2')}; plot.plot()
from yade import qt
qt.View()
| 1 = [0,0,0]
s1 = O.bodies[id1].state.pos[1]
s2 = O.bodies[id3].state.pos[1]
plot.addData(sc=O.time, fc1=s1, fc2=s2)
| identifier_body |
rotationalResistance.py | #!/usr/bin/env python | o = Omega()
fr = 0.5;rho=2000
tc = 0.001; en = 0.7; et = 0.7; o.dt = 0.0002*tc
r = 0.002
mat1 = O.materials.append(ViscElMat(frictionAngle=fr,mR = 0.05, mRtype = 1, density=rho,tc=tc,en=en,et=et))
mat2 = O.materials.append(ViscElMat(frictionAngle=fr,mR = 0.05, mRtype = 2, density=rho,tc=tc,en=en,et=et))
oriBody = Quaternion(Vector3(1,0,0),(pi/28))
id1 = O.bodies.append(sphere(center=[0,0,2*r],radius=r,material=mat1))
id2 = O.bodies.append(geom.facetBox(center=(0,-16.0*r,-2*r),orientation=oriBody,extents=(r,17.0*r,0), material=mat1,color=(0,0,1)))
id3 = O.bodies.append(sphere(center=[10*r,0,2*r],radius=r,material=mat2))
id4 = O.bodies.append(geom.facetBox(center=(10*r,-16.0*r,-2*r),orientation=oriBody,extents=(r,17.0*r,0), material=mat2,color=(0,0,1)))
o.engines = [
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
NewtonIntegrator(damping=0,gravity=[0,0,-9.81]),
PyRunner(command='addPlotData()',iterPeriod=10000, dead = False, label='graph'),
]
def addPlotData():
f1 = [0,0,0]
s1 = O.bodies[id1].state.pos[1]
s2 = O.bodies[id3].state.pos[1]
plot.addData(sc=O.time, fc1=s1, fc2=s2)
plot.plots={'sc':('fc1','fc2')}; plot.plot()
from yade import qt
qt.View() | # encoding: utf-8
from yade import utils, plot | random_line_split |
rotationalResistance.py | #!/usr/bin/env python
# encoding: utf-8
from yade import utils, plot
o = Omega()
fr = 0.5;rho=2000
tc = 0.001; en = 0.7; et = 0.7; o.dt = 0.0002*tc
r = 0.002
mat1 = O.materials.append(ViscElMat(frictionAngle=fr,mR = 0.05, mRtype = 1, density=rho,tc=tc,en=en,et=et))
mat2 = O.materials.append(ViscElMat(frictionAngle=fr,mR = 0.05, mRtype = 2, density=rho,tc=tc,en=en,et=et))
oriBody = Quaternion(Vector3(1,0,0),(pi/28))
id1 = O.bodies.append(sphere(center=[0,0,2*r],radius=r,material=mat1))
id2 = O.bodies.append(geom.facetBox(center=(0,-16.0*r,-2*r),orientation=oriBody,extents=(r,17.0*r,0), material=mat1,color=(0,0,1)))
id3 = O.bodies.append(sphere(center=[10*r,0,2*r],radius=r,material=mat2))
id4 = O.bodies.append(geom.facetBox(center=(10*r,-16.0*r,-2*r),orientation=oriBody,extents=(r,17.0*r,0), material=mat2,color=(0,0,1)))
o.engines = [
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
NewtonIntegrator(damping=0,gravity=[0,0,-9.81]),
PyRunner(command='addPlotData()',iterPeriod=10000, dead = False, label='graph'),
]
def a | ):
f1 = [0,0,0]
s1 = O.bodies[id1].state.pos[1]
s2 = O.bodies[id3].state.pos[1]
plot.addData(sc=O.time, fc1=s1, fc2=s2)
plot.plots={'sc':('fc1','fc2')}; plot.plot()
from yade import qt
qt.View()
| ddPlotData( | identifier_name |
meg.rs |
extern crate env_logger;
extern crate rustc_serialize;
extern crate toml;
extern crate turbo;
extern crate meg;
extern crate term_painter;
#[macro_use] extern crate log;
use std::collections::BTreeSet;
use std::env;
use std::fs;
use std::io;
use std::path::{PathBuf, Path};
use std::process::Command;
use turbo::turbo::{execute_main_without_stdin, handle_error, shell};
use turbo::core::MultiShell;
use turbo::util::{CliError, CliResult, Config};
use meg::util::{lev_distance};
use self::term_painter::Color::*;
use self::term_painter::ToStyle;
#[derive(RustcDecodable)]
#[derive(RustcEncodable)]
struct Flags {
flag_list: bool,
flag_verbose: bool,
arg_command: String,
arg_args: Vec<String>,
}
const USAGE: &'static str = "
Megam command line
Usage:
meg <command> [<args>...]
meg [options]
Options:
-h, --help Display this message
version Print version info and exit
--list List installed commands
-v, --verbose Use verbose output
meg commands are:
ahoy Ping the status of megam.
account Create an account with megam.
sshkey Create SSHKey with megam.
csar Create apps/services & torpedos
See 'meg help <command>' for more information on a specific command.
";
fn main() {
env_logger::init().unwrap();
execute_main_without_stdin(execute, true, USAGE);
}
macro_rules! each_subcommand{ ($mac:ident) => ({
$mac!(help);
$mac!(ahoy);
$mac!(account);
$mac!(sshkey);
$mac!(csar);
$mac!(version);
}) }
/**
The top-level `cargo` command handles configuration and project location
because they are fundamental (and intertwined). Other commands can rely
on this top-level information.
*/
fn execute(flags: Flags, config: &Config) -> CliResult<Option<()>> {
config.shell().set_verbose(flags.flag_verbose);
if flags.flag_list {
println!("{}",
Green.paint("Installed commands:"));
for command in list_commands().into_iter() {
println!("{}", command);
};
return Ok(None)
}
let args = match &flags.arg_command[..] {
// For the commands `meg` and `meg help`, re-execute ourselves as
// `meg -h` so we can go through the normal process of printing the
// help message.
"" | "help" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "-h".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
// For `meg help -h` and `meg help --help`, print out the help
// message for `meg help`
"help" if flags.arg_args[0] == "-h" ||
flags.arg_args[0] == "--help" => {
vec!["meg".to_string(), "help".to_string(), "-h".to_string()]
}
// For `meg help foo`, print out the usage message for the specified
// subcommand by executing the command with the `-h` flag.
"help" => {
vec!["meg".to_string(), flags.arg_args[0].clone(),
"-h".to_string()]
}
// For all other invocations, we're of the form `meg foo args...`. We
// use the exact environment arguments to preserve tokens like `--` for
// example.
"account" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "account".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
"sshkey" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "sshkey".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
"csar" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "csar".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
_ => env::args().collect(),
};
macro_rules! cmd{ ($name:ident) => (
if args[1] == stringify!($name).replace("_", "-") {
mod $name;
config.shell().set_verbose(true);
let r = turbo::turbo::call_main_without_stdin($name::execute, config,
$name::USAGE,
&args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
) }
each_subcommand!(cmd);
execute_subcommand(&args[1], &args, &mut config.shell());
Ok(None)
}
fn | (cmd: &str) -> Option<String> {
let cmds = list_commands();
// Only consider candidates with a lev_distance of 3 or less so we don't
// suggest out-of-the-blue options.
let mut filtered = cmds.iter().map(|c| (lev_distance(&c, cmd), c))
.filter(|&(d, _)| d < 4)
.collect::<Vec<_>>();
filtered.sort_by(|a, b| a.0.cmp(&b.0));
if filtered.len() == 0 {
None
} else {
Some(filtered[0].1.to_string())
}
}
fn execute_subcommand(cmd: &str, args: &[String], shell: &mut MultiShell) {
let command = match find_command(cmd) {
Some(command) => command,
None => {
let msg = match find_closest(cmd) {
Some(closest) => format!("No such subcommand\n\n\t\
Did you mean `{}`?\n", closest),
None => "No such subcommand".to_string()
};
return handle_error(CliError::new(&msg, 127), shell)
}
};
match Command::new(&command).args(&args[1..]).status() {
Ok(ref status) if status.success() => {}
Ok(ref status) => {
match status.code() {
Some(code) => handle_error(CliError::new("", code), shell),
None => {
let msg = format!("subcommand failed with: {}", status);
handle_error(CliError::new(&msg, 101), shell)
}
}
}
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {
handle_error(CliError::new("No such subcommand", 127), shell)
}
Err(err) => {
let msg = format!("Subcommand failed to run: {}", err);
handle_error(CliError::new(&msg, 127), shell)
}
}
}
/// List all runnable commands. find_command should always succeed
/// if given one of returned command.
fn list_commands() -> BTreeSet<String> {
let command_prefix = "meg-";
let mut commands = BTreeSet::new();
for dir in list_command_directory().iter() {
let entries = match fs::read_dir(dir) {
Ok(entries) => entries,
_ => continue
};
for entry in entries {
let entry = match entry { Ok(e) => e, Err(..) => continue };
let entry = entry.path();
let filename = match entry.file_name().and_then(|s| s.to_str()) {
Some(filename) => filename,
_ => continue
};
if filename.starts_with(command_prefix) &&
filename.ends_with(env::consts::EXE_SUFFIX) &&
is_executable(&entry) {
let command = &filename[
command_prefix.len()..
filename.len() - env::consts::EXE_SUFFIX.len()];
commands.insert(command.to_string());
}
}
}
macro_rules! add_cmd{ ($cmd:ident) => ({
commands.insert(stringify!($cmd).replace("_", "-"));
}) }
each_subcommand!(add_cmd);
commands
}
#[cfg(unix)]
fn is_executable(path: &Path) -> bool {
//use std::os::unix;
//use std::sys::ext;
//fs::metadata(path).map(|m| {
// m.permissions() == 0o001
// }).unwrap_or(false)
return true
}
#[cfg(windows)]
fn is_executable(path: &Path) -> bool {
fs::metadata(path).map(|m| m.is_file()).unwrap_or(false)
}
/// Get `Command` to run given command.
fn find_command(cmd: &str) -> Option<PathBuf> {
let command_exe = format!("meg-{}{}", cmd, env::consts::EXE_SUFFIX);
let dirs = list_command_directory();
let mut command_paths = dirs.iter().map(|dir| dir.join(&command_exe));
command_paths.find(|path| fs::metadata(&path).is_ok())
}
/// List candidate locations where subcommands might be installed.
fn list_command_directory() -> Vec<PathBuf> {
let mut dirs = vec![];
if let Ok(mut path) = env::current_exe() {
path.pop();
dirs.push(path.join("../lib/meg"));
dirs.push(path);
}
if let Some(val) = env::var_os("PATH") {
dirs.extend(env::split_paths(&val));
}
dirs
}
| find_closest | identifier_name |
meg.rs | extern crate env_logger;
extern crate rustc_serialize;
extern crate toml;
extern crate turbo;
extern crate meg;
extern crate term_painter;
#[macro_use] extern crate log;
use std::collections::BTreeSet;
use std::env;
use std::fs;
use std::io;
use std::path::{PathBuf, Path};
use std::process::Command;
use turbo::turbo::{execute_main_without_stdin, handle_error, shell};
use turbo::core::MultiShell;
use turbo::util::{CliError, CliResult, Config};
use meg::util::{lev_distance};
use self::term_painter::Color::*;
use self::term_painter::ToStyle;
#[derive(RustcDecodable)]
#[derive(RustcEncodable)]
struct Flags {
flag_list: bool,
flag_verbose: bool,
arg_command: String,
arg_args: Vec<String>,
}
const USAGE: &'static str = "
Megam command line
Usage:
meg <command> [<args>...]
meg [options]
Options:
-h, --help Display this message
version Print version info and exit
--list List installed commands
-v, --verbose Use verbose output
meg commands are:
ahoy Ping the status of megam.
account Create an account with megam.
sshkey Create SSHKey with megam.
csar Create apps/services & torpedos
See 'meg help <command>' for more information on a specific command.
";
fn main() {
env_logger::init().unwrap();
execute_main_without_stdin(execute, true, USAGE);
}
macro_rules! each_subcommand{ ($mac:ident) => ({
$mac!(help);
$mac!(ahoy);
$mac!(account);
$mac!(sshkey);
$mac!(csar);
$mac!(version);
}) }
/**
The top-level `cargo` command handles configuration and project location
because they are fundamental (and intertwined). Other commands can rely
on this top-level information.
*/
fn execute(flags: Flags, config: &Config) -> CliResult<Option<()>> {
config.shell().set_verbose(flags.flag_verbose);
if flags.flag_list {
println!("{}",
Green.paint("Installed commands:"));
for command in list_commands().into_iter() {
println!("{}", command);
};
return Ok(None)
}
let args = match &flags.arg_command[..] {
// For the commands `meg` and `meg help`, re-execute ourselves as
// `meg -h` so we can go through the normal process of printing the
// help message.
"" | "help" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "-h".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
// For `meg help -h` and `meg help --help`, print out the help
// message for `meg help`
"help" if flags.arg_args[0] == "-h" ||
flags.arg_args[0] == "--help" => {
vec!["meg".to_string(), "help".to_string(), "-h".to_string()]
}
// For `meg help foo`, print out the usage message for the specified
// subcommand by executing the command with the `-h` flag.
"help" => {
vec!["meg".to_string(), flags.arg_args[0].clone(),
"-h".to_string()]
}
// For all other invocations, we're of the form `meg foo args...`. We
// use the exact environment arguments to preserve tokens like `--` for
// example.
"account" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "account".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
"sshkey" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "sshkey".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
"csar" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "csar".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
_ => env::args().collect(),
};
macro_rules! cmd{ ($name:ident) => (
if args[1] == stringify!($name).replace("_", "-") {
mod $name;
config.shell().set_verbose(true);
let r = turbo::turbo::call_main_without_stdin($name::execute, config,
$name::USAGE,
&args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
) }
each_subcommand!(cmd);
execute_subcommand(&args[1], &args, &mut config.shell());
Ok(None)
}
fn find_closest(cmd: &str) -> Option<String> {
let cmds = list_commands();
// Only consider candidates with a lev_distance of 3 or less so we don't
// suggest out-of-the-blue options.
let mut filtered = cmds.iter().map(|c| (lev_distance(&c, cmd), c))
.filter(|&(d, _)| d < 4)
.collect::<Vec<_>>();
filtered.sort_by(|a, b| a.0.cmp(&b.0));
if filtered.len() == 0 {
None
} else {
Some(filtered[0].1.to_string())
}
}
fn execute_subcommand(cmd: &str, args: &[String], shell: &mut MultiShell) {
let command = match find_command(cmd) {
Some(command) => command,
None => {
let msg = match find_closest(cmd) {
Some(closest) => format!("No such subcommand\n\n\t\
Did you mean `{}`?\n", closest),
None => "No such subcommand".to_string()
};
return handle_error(CliError::new(&msg, 127), shell)
}
};
match Command::new(&command).args(&args[1..]).status() {
Ok(ref status) if status.success() => {}
Ok(ref status) => {
match status.code() {
Some(code) => handle_error(CliError::new("", code), shell),
None => {
let msg = format!("subcommand failed with: {}", status);
handle_error(CliError::new(&msg, 101), shell)
}
}
}
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {
handle_error(CliError::new("No such subcommand", 127), shell)
}
Err(err) => {
let msg = format!("Subcommand failed to run: {}", err);
handle_error(CliError::new(&msg, 127), shell)
}
}
}
/// List all runnable commands. find_command should always succeed
/// if given one of returned command.
fn list_commands() -> BTreeSet<String> {
let command_prefix = "meg-";
let mut commands = BTreeSet::new();
for dir in list_command_directory().iter() {
let entries = match fs::read_dir(dir) {
Ok(entries) => entries,
_ => continue
};
for entry in entries {
let entry = match entry { Ok(e) => e, Err(..) => continue };
let entry = entry.path();
let filename = match entry.file_name().and_then(|s| s.to_str()) {
Some(filename) => filename,
_ => continue
};
if filename.starts_with(command_prefix) &&
filename.ends_with(env::consts::EXE_SUFFIX) &&
is_executable(&entry) {
let command = &filename[
command_prefix.len()..
filename.len() - env::consts::EXE_SUFFIX.len()];
commands.insert(command.to_string());
}
}
}
macro_rules! add_cmd{ ($cmd:ident) => ({
commands.insert(stringify!($cmd).replace("_", "-"));
}) }
each_subcommand!(add_cmd);
commands
}
#[cfg(unix)]
fn is_executable(path: &Path) -> bool {
//use std::os::unix;
//use std::sys::ext;
//fs::metadata(path).map(|m| {
// m.permissions() == 0o001
// }).unwrap_or(false)
return true
}
#[cfg(windows)]
fn is_executable(path: &Path) -> bool {
fs::metadata(path).map(|m| m.is_file()).unwrap_or(false)
}
/// Get `Command` to run given command.
fn find_command(cmd: &str) -> Option<PathBuf> {
let command_exe = format!("meg-{}{}", cmd, env::consts::EXE_SUFFIX);
let dirs = list_command_directory();
let mut command_paths = dirs.iter().map(|dir| dir.join(&command_exe));
command_paths.find(|path| fs::metadata(&path).is_ok())
}
/// List candidate locations where subcommands might be installed.
fn list_command_directory() -> Vec<PathBuf> {
let mut dirs = vec![];
if let Ok(mut path) = env::current_exe() {
path.pop();
dirs.push(path.join("../lib/meg"));
dirs.push(path);
} | dirs
} | if let Some(val) = env::var_os("PATH") {
dirs.extend(env::split_paths(&val));
} | random_line_split |
meg.rs |
extern crate env_logger;
extern crate rustc_serialize;
extern crate toml;
extern crate turbo;
extern crate meg;
extern crate term_painter;
#[macro_use] extern crate log;
use std::collections::BTreeSet;
use std::env;
use std::fs;
use std::io;
use std::path::{PathBuf, Path};
use std::process::Command;
use turbo::turbo::{execute_main_without_stdin, handle_error, shell};
use turbo::core::MultiShell;
use turbo::util::{CliError, CliResult, Config};
use meg::util::{lev_distance};
use self::term_painter::Color::*;
use self::term_painter::ToStyle;
#[derive(RustcDecodable)]
#[derive(RustcEncodable)]
struct Flags {
flag_list: bool,
flag_verbose: bool,
arg_command: String,
arg_args: Vec<String>,
}
const USAGE: &'static str = "
Megam command line
Usage:
meg <command> [<args>...]
meg [options]
Options:
-h, --help Display this message
version Print version info and exit
--list List installed commands
-v, --verbose Use verbose output
meg commands are:
ahoy Ping the status of megam.
account Create an account with megam.
sshkey Create SSHKey with megam.
csar Create apps/services & torpedos
See 'meg help <command>' for more information on a specific command.
";
fn main() {
env_logger::init().unwrap();
execute_main_without_stdin(execute, true, USAGE);
}
macro_rules! each_subcommand{ ($mac:ident) => ({
$mac!(help);
$mac!(ahoy);
$mac!(account);
$mac!(sshkey);
$mac!(csar);
$mac!(version);
}) }
/**
The top-level `cargo` command handles configuration and project location
because they are fundamental (and intertwined). Other commands can rely
on this top-level information.
*/
fn execute(flags: Flags, config: &Config) -> CliResult<Option<()>> {
config.shell().set_verbose(flags.flag_verbose);
if flags.flag_list {
println!("{}",
Green.paint("Installed commands:"));
for command in list_commands().into_iter() {
println!("{}", command);
};
return Ok(None)
}
let args = match &flags.arg_command[..] {
// For the commands `meg` and `meg help`, re-execute ourselves as
// `meg -h` so we can go through the normal process of printing the
// help message.
"" | "help" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "-h".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
// For `meg help -h` and `meg help --help`, print out the help
// message for `meg help`
"help" if flags.arg_args[0] == "-h" ||
flags.arg_args[0] == "--help" => {
vec!["meg".to_string(), "help".to_string(), "-h".to_string()]
}
// For `meg help foo`, print out the usage message for the specified
// subcommand by executing the command with the `-h` flag.
"help" => {
vec!["meg".to_string(), flags.arg_args[0].clone(),
"-h".to_string()]
}
// For all other invocations, we're of the form `meg foo args...`. We
// use the exact environment arguments to preserve tokens like `--` for
// example.
"account" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "account".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
"sshkey" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "sshkey".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
"csar" if flags.arg_args.is_empty() => {
config.shell().set_verbose(true);
let args = &["meg".to_string(), "help".to_string(), "csar".to_string()];
let r = turbo::turbo::call_main_without_stdin(execute, config, USAGE, args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
_ => env::args().collect(),
};
macro_rules! cmd{ ($name:ident) => (
if args[1] == stringify!($name).replace("_", "-") {
mod $name;
config.shell().set_verbose(true);
let r = turbo::turbo::call_main_without_stdin($name::execute, config,
$name::USAGE,
&args,
false);
turbo::turbo::process_executed(r, &mut config.shell());
return Ok(None)
}
) }
each_subcommand!(cmd);
execute_subcommand(&args[1], &args, &mut config.shell());
Ok(None)
}
fn find_closest(cmd: &str) -> Option<String> {
let cmds = list_commands();
// Only consider candidates with a lev_distance of 3 or less so we don't
// suggest out-of-the-blue options.
let mut filtered = cmds.iter().map(|c| (lev_distance(&c, cmd), c))
.filter(|&(d, _)| d < 4)
.collect::<Vec<_>>();
filtered.sort_by(|a, b| a.0.cmp(&b.0));
if filtered.len() == 0 {
None
} else {
Some(filtered[0].1.to_string())
}
}
fn execute_subcommand(cmd: &str, args: &[String], shell: &mut MultiShell) |
/// List all runnable commands. find_command should always succeed
/// if given one of returned command.
fn list_commands() -> BTreeSet<String> {
let command_prefix = "meg-";
let mut commands = BTreeSet::new();
for dir in list_command_directory().iter() {
let entries = match fs::read_dir(dir) {
Ok(entries) => entries,
_ => continue
};
for entry in entries {
let entry = match entry { Ok(e) => e, Err(..) => continue };
let entry = entry.path();
let filename = match entry.file_name().and_then(|s| s.to_str()) {
Some(filename) => filename,
_ => continue
};
if filename.starts_with(command_prefix) &&
filename.ends_with(env::consts::EXE_SUFFIX) &&
is_executable(&entry) {
let command = &filename[
command_prefix.len()..
filename.len() - env::consts::EXE_SUFFIX.len()];
commands.insert(command.to_string());
}
}
}
macro_rules! add_cmd{ ($cmd:ident) => ({
commands.insert(stringify!($cmd).replace("_", "-"));
}) }
each_subcommand!(add_cmd);
commands
}
#[cfg(unix)]
fn is_executable(path: &Path) -> bool {
//use std::os::unix;
//use std::sys::ext;
//fs::metadata(path).map(|m| {
// m.permissions() == 0o001
// }).unwrap_or(false)
return true
}
#[cfg(windows)]
fn is_executable(path: &Path) -> bool {
fs::metadata(path).map(|m| m.is_file()).unwrap_or(false)
}
/// Get `Command` to run given command.
fn find_command(cmd: &str) -> Option<PathBuf> {
let command_exe = format!("meg-{}{}", cmd, env::consts::EXE_SUFFIX);
let dirs = list_command_directory();
let mut command_paths = dirs.iter().map(|dir| dir.join(&command_exe));
command_paths.find(|path| fs::metadata(&path).is_ok())
}
/// List candidate locations where subcommands might be installed.
fn list_command_directory() -> Vec<PathBuf> {
let mut dirs = vec![];
if let Ok(mut path) = env::current_exe() {
path.pop();
dirs.push(path.join("../lib/meg"));
dirs.push(path);
}
if let Some(val) = env::var_os("PATH") {
dirs.extend(env::split_paths(&val));
}
dirs
}
| {
let command = match find_command(cmd) {
Some(command) => command,
None => {
let msg = match find_closest(cmd) {
Some(closest) => format!("No such subcommand\n\n\t\
Did you mean `{}`?\n", closest),
None => "No such subcommand".to_string()
};
return handle_error(CliError::new(&msg, 127), shell)
}
};
match Command::new(&command).args(&args[1..]).status() {
Ok(ref status) if status.success() => {}
Ok(ref status) => {
match status.code() {
Some(code) => handle_error(CliError::new("", code), shell),
None => {
let msg = format!("subcommand failed with: {}", status);
handle_error(CliError::new(&msg, 101), shell)
}
}
}
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {
handle_error(CliError::new("No such subcommand", 127), shell)
}
Err(err) => {
let msg = format!("Subcommand failed to run: {}", err);
handle_error(CliError::new(&msg, 127), shell)
}
}
} | identifier_body |
spotify-api.service.ts | import * as rp from 'request-promise-native'
import { Service } from 'ts-express-decorators'
import * as Express from 'express'
import * as querystring from 'querystring'
import { generateRandomString, getSecret } from '../utils'
import { User, IUser } from '../models'
import * as spotify from 'models/spotify'
export interface ITokenResponse {
access_token: string
token_type: string
scope: string
expires_in: number
refresh_token: string
}
export interface IPlaylistTracksResponse {
href: string
items: Array<{
added_at: string
track: { uri: string }
}>,
total: number
}
export let baseApiUrl = 'https://api.spotify.com/v1'
export let baseAuthUrl = 'https://accounts.spotify.com' | export let redirect_uri = process.env.HOST_URL + '/callback'
@Service()
export class SpotifyApiService {
private bearerAuthHeader(currentToken: string) {
return { Authorization: `Bearer ${currentToken}` }
}
public getMe(token: string): Promise<spotify.IUser> {
return rp(`${baseApiUrl}/me`, {
headers: this.bearerAuthHeader(token),
json: true
}).promise()
}
public getPlaylist(token: string, userId: string, playlistId: string) {
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}`, {
headers: this.bearerAuthHeader(token),
json: true
})
}
public getNewToken(refresh_token: string): Promise<ITokenResponse> {
const options = {
method: 'POST',
headers: {
Authorization: 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64'))
},
form: {
grant_type: 'refresh_token',
refresh_token
},
json: true
}
return rp(`${baseAuthUrl}/api/token`, options).promise()
}
public getToken(code: string): Promise<ITokenResponse> {
const options = {
method: 'POST',
headers: {
Authorization: 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64'))
},
form: {
code,
grant_type: 'authorization_code',
redirect_uri
},
json: true
}
return rp(`${baseAuthUrl}/api/token`, options).promise()
}
public getPlaylistTracks(
token: string,
userId: string,
playlistId: string,
limit: number = null,
offset: number = null,
fields: string = 'href,items(added_at,track(uri)),total'
): Promise<IPlaylistTracksResponse | any> {
const options = { headers: this.bearerAuthHeader(token), json: true, qs: { fields: undefined, limit: undefined, offset: undefined } }
if (fields !== null) {
options.qs.fields = fields
}
if (limit !== null && limit > 0) {
options.qs.limit = limit
}
if (offset !== null && offset > 0) {
options.qs.offset = offset
}
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}/tracks`, options).promise()
}
public postPlaylistTracks(token: string, userId: string, playlistId: string, trackUris: string[], position = 0) {
const options = { method: 'POST', headers: this.bearerAuthHeader(token), json: true, qs: { uris: null } }
if (trackUris.length > 0) {
options.qs.uris = trackUris.join(',')
} else {
throw new RangeError('No Track Uris where specified')
}
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}/tracks`, options)
}
public redirectToAuth(res: Express.Response) {
const state = generateRandomString(16)
res.cookie(stateKey, state)
const scope = 'user-read-private user-read-email playlist-read-private playlist-modify-private playlist-modify-public'
res.redirect(`${baseAuthUrl}/authorize?` +
querystring.stringify({
response_type: 'code',
client_id,
scope,
redirect_uri,
state
}))
}
public async handleCallback(req: Express.Request, res: Express.Response) {
const code = req.query.code || null
const state = req.query.state || null
const storedState = req.cookies ? req.cookies[stateKey] : null
if ( state === null || state !== storedState ) {
res.redirect('/')
} else {
res.clearCookie(stateKey)
const token = await this.getToken(code)
const me = await this.getMe(token.access_token)
const expires = new Date()
expires.setSeconds(expires.getSeconds() + token.expires_in)
await User.findOneAndUpdate({ userId: me.id }, {
userId: me.id,
token: token.access_token,
refreshToken: token.refresh_token,
expireDate: expires
}, {
new: true,
upsert: true
})
}
}
} | export let client_id = getSecret('CLIENT_ID')
export let client_secret = getSecret('CLIENT_SECRET')
export let stateKey = 'spotify_auth_state' | random_line_split |
spotify-api.service.ts | import * as rp from 'request-promise-native'
import { Service } from 'ts-express-decorators'
import * as Express from 'express'
import * as querystring from 'querystring'
import { generateRandomString, getSecret } from '../utils'
import { User, IUser } from '../models'
import * as spotify from 'models/spotify'
export interface ITokenResponse {
access_token: string
token_type: string
scope: string
expires_in: number
refresh_token: string
}
export interface IPlaylistTracksResponse {
href: string
items: Array<{
added_at: string
track: { uri: string }
}>,
total: number
}
export let baseApiUrl = 'https://api.spotify.com/v1'
export let baseAuthUrl = 'https://accounts.spotify.com'
export let client_id = getSecret('CLIENT_ID')
export let client_secret = getSecret('CLIENT_SECRET')
export let stateKey = 'spotify_auth_state'
export let redirect_uri = process.env.HOST_URL + '/callback'
@Service()
export class SpotifyApiService {
private bearerAuthHeader(currentToken: string) {
return { Authorization: `Bearer ${currentToken}` }
}
public getMe(token: string): Promise<spotify.IUser> {
return rp(`${baseApiUrl}/me`, {
headers: this.bearerAuthHeader(token),
json: true
}).promise()
}
public getPlaylist(token: string, userId: string, playlistId: string) {
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}`, {
headers: this.bearerAuthHeader(token),
json: true
})
}
public getNewToken(refresh_token: string): Promise<ITokenResponse> {
const options = {
method: 'POST',
headers: {
Authorization: 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64'))
},
form: {
grant_type: 'refresh_token',
refresh_token
},
json: true
}
return rp(`${baseAuthUrl}/api/token`, options).promise()
}
public getToken(code: string): Promise<ITokenResponse> {
const options = {
method: 'POST',
headers: {
Authorization: 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64'))
},
form: {
code,
grant_type: 'authorization_code',
redirect_uri
},
json: true
}
return rp(`${baseAuthUrl}/api/token`, options).promise()
}
public getPlaylistTracks(
token: string,
userId: string,
playlistId: string,
limit: number = null,
offset: number = null,
fields: string = 'href,items(added_at,track(uri)),total'
): Promise<IPlaylistTracksResponse | any> {
const options = { headers: this.bearerAuthHeader(token), json: true, qs: { fields: undefined, limit: undefined, offset: undefined } }
if (fields !== null) {
options.qs.fields = fields
}
if (limit !== null && limit > 0) |
if (offset !== null && offset > 0) {
options.qs.offset = offset
}
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}/tracks`, options).promise()
}
public postPlaylistTracks(token: string, userId: string, playlistId: string, trackUris: string[], position = 0) {
const options = { method: 'POST', headers: this.bearerAuthHeader(token), json: true, qs: { uris: null } }
if (trackUris.length > 0) {
options.qs.uris = trackUris.join(',')
} else {
throw new RangeError('No Track Uris where specified')
}
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}/tracks`, options)
}
public redirectToAuth(res: Express.Response) {
const state = generateRandomString(16)
res.cookie(stateKey, state)
const scope = 'user-read-private user-read-email playlist-read-private playlist-modify-private playlist-modify-public'
res.redirect(`${baseAuthUrl}/authorize?` +
querystring.stringify({
response_type: 'code',
client_id,
scope,
redirect_uri,
state
}))
}
public async handleCallback(req: Express.Request, res: Express.Response) {
const code = req.query.code || null
const state = req.query.state || null
const storedState = req.cookies ? req.cookies[stateKey] : null
if ( state === null || state !== storedState ) {
res.redirect('/')
} else {
res.clearCookie(stateKey)
const token = await this.getToken(code)
const me = await this.getMe(token.access_token)
const expires = new Date()
expires.setSeconds(expires.getSeconds() + token.expires_in)
await User.findOneAndUpdate({ userId: me.id }, {
userId: me.id,
token: token.access_token,
refreshToken: token.refresh_token,
expireDate: expires
}, {
new: true,
upsert: true
})
}
}
}
| {
options.qs.limit = limit
} | conditional_block |
spotify-api.service.ts | import * as rp from 'request-promise-native'
import { Service } from 'ts-express-decorators'
import * as Express from 'express'
import * as querystring from 'querystring'
import { generateRandomString, getSecret } from '../utils'
import { User, IUser } from '../models'
import * as spotify from 'models/spotify'
export interface ITokenResponse {
access_token: string
token_type: string
scope: string
expires_in: number
refresh_token: string
}
export interface IPlaylistTracksResponse {
href: string
items: Array<{
added_at: string
track: { uri: string }
}>,
total: number
}
export let baseApiUrl = 'https://api.spotify.com/v1'
export let baseAuthUrl = 'https://accounts.spotify.com'
export let client_id = getSecret('CLIENT_ID')
export let client_secret = getSecret('CLIENT_SECRET')
export let stateKey = 'spotify_auth_state'
export let redirect_uri = process.env.HOST_URL + '/callback'
@Service()
export class SpotifyApiService {
private | (currentToken: string) {
return { Authorization: `Bearer ${currentToken}` }
}
public getMe(token: string): Promise<spotify.IUser> {
return rp(`${baseApiUrl}/me`, {
headers: this.bearerAuthHeader(token),
json: true
}).promise()
}
public getPlaylist(token: string, userId: string, playlistId: string) {
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}`, {
headers: this.bearerAuthHeader(token),
json: true
})
}
public getNewToken(refresh_token: string): Promise<ITokenResponse> {
const options = {
method: 'POST',
headers: {
Authorization: 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64'))
},
form: {
grant_type: 'refresh_token',
refresh_token
},
json: true
}
return rp(`${baseAuthUrl}/api/token`, options).promise()
}
public getToken(code: string): Promise<ITokenResponse> {
const options = {
method: 'POST',
headers: {
Authorization: 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64'))
},
form: {
code,
grant_type: 'authorization_code',
redirect_uri
},
json: true
}
return rp(`${baseAuthUrl}/api/token`, options).promise()
}
public getPlaylistTracks(
token: string,
userId: string,
playlistId: string,
limit: number = null,
offset: number = null,
fields: string = 'href,items(added_at,track(uri)),total'
): Promise<IPlaylistTracksResponse | any> {
const options = { headers: this.bearerAuthHeader(token), json: true, qs: { fields: undefined, limit: undefined, offset: undefined } }
if (fields !== null) {
options.qs.fields = fields
}
if (limit !== null && limit > 0) {
options.qs.limit = limit
}
if (offset !== null && offset > 0) {
options.qs.offset = offset
}
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}/tracks`, options).promise()
}
public postPlaylistTracks(token: string, userId: string, playlistId: string, trackUris: string[], position = 0) {
const options = { method: 'POST', headers: this.bearerAuthHeader(token), json: true, qs: { uris: null } }
if (trackUris.length > 0) {
options.qs.uris = trackUris.join(',')
} else {
throw new RangeError('No Track Uris where specified')
}
return rp(`${baseApiUrl}/users/${userId}/playlists/${playlistId}/tracks`, options)
}
public redirectToAuth(res: Express.Response) {
const state = generateRandomString(16)
res.cookie(stateKey, state)
const scope = 'user-read-private user-read-email playlist-read-private playlist-modify-private playlist-modify-public'
res.redirect(`${baseAuthUrl}/authorize?` +
querystring.stringify({
response_type: 'code',
client_id,
scope,
redirect_uri,
state
}))
}
public async handleCallback(req: Express.Request, res: Express.Response) {
const code = req.query.code || null
const state = req.query.state || null
const storedState = req.cookies ? req.cookies[stateKey] : null
if ( state === null || state !== storedState ) {
res.redirect('/')
} else {
res.clearCookie(stateKey)
const token = await this.getToken(code)
const me = await this.getMe(token.access_token)
const expires = new Date()
expires.setSeconds(expires.getSeconds() + token.expires_in)
await User.findOneAndUpdate({ userId: me.id }, {
userId: me.id,
token: token.access_token,
refreshToken: token.refresh_token,
expireDate: expires
}, {
new: true,
upsert: true
})
}
}
}
| bearerAuthHeader | identifier_name |
cli.py | from .. import __description__
from ..defender import VkRaidDefender, data, update_data
####################################################################################################
LOGO = '''\
_ _ _ _ __ _
__ _| | __ _ __ __ _(_) __| | __| | ___ / _| ___ _ __ __| | ___ _ __
\ \ / / |/ / | '__/ _` | |/ _` | / _` |/ _ \ |_ / _ \ '_ \ / _` |/ _ \ '__|
\ V /| < | | | (_| | | (_| | | (_| | __/ _| __/ | | | (_| | __/ |
\_/ |_|\_\ |_| \__,_|_|\__,_| \__,_|\___|_| \___|_| |_|\__,_|\___|_|
by alfred richardsn'''
####################################################################################################
from ..logger import logger
from ..settings import CLIENT_ID
import re
import os
import sys
import webbrowser
from getpass import getpass
from argparse import ArgumentParser
from vk_api.exceptions import ApiError
from requests.exceptions import InvalidSchema, ProxyError
class CLIDefender(VkRaidDefender):
def run(self, chat_ids, objectives):
self._chat_ids = chat_ids
self._objectives = objectives
start_screen()
logger.info('начинаю приём сообщений')
try:
self.listen()
except KeyboardInterrupt:
raise
except Exception as e:
start_screen()
logger.critical('произошла критическая ошибка, перезапускаюсь', exc_info=True)
self.listen()
def start_screen():
os.system('cls' if os.name == 'nt' else 'clear')
print(LOGO + '\n\n')
def ask_yes_or_no(question, true_answer='y', false_answer='n', default_answer='', default=True):
true_answer = true_answer.lower()
false_answer = false_answer.lower()
default_answer = default_answer.lower()
output = question.strip() + ' (' + (true_answer.upper() + '/' + false_answer if default else
true_answer + '/' + false_answer.upper()) + '): '
answer = None
while answer not in (true_answer, false_answer, default_answer):
answer = input(output).lower()
if answer == true_answer: | answer == false_answer:
return False
else:
return default
def register():
use_webbrowser = ask_yes_or_no('открыть ссылку для авторизации в веб-браузере по умолчанию?')
print()
oauth_url = 'https://oauth.vk.com/authorize?client_id={}&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=69632&response_type=token'.format(CLIENT_ID)
if use_webbrowser:
webbrowser.open(oauth_url, new=2)
print('в веб-браузере только что была открыта ссылка для авторизации.')
else:
print(oauth_url + '\n')
print('открой в веб-браузере страницу по ссылке выше.')
token = None
while token is None:
user_input = getpass('авторизируйся на открытой странице при необходимости и вставь адресную строку страницы, на которую было осуществлено перенаправление: ')
token = re.search(r'(?:.*access_token=)?([a-f0-9]+).*', user_input)
return token.group(1)
def run(proxy=None, chat_ids=[], objectives=[], auto_login=False):
token = data.get('token')
proxies = data.get('proxies')
if not token or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для авторизации?')):
token = register()
proxies = None
IP_ADDRESS = re.compile(r'((socks5://)|(?:https?://))?(localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})')
if proxy:
match = IP_ADDRESS.match(proxy)
if not proxy or (not match and not auto_login):
proxy = input('введи адрес прокси-сервера при необходимости его использования: ')
while proxy:
match = IP_ADDRESS.match(proxy)
if match:
break
proxy = input('неверный формат адреса сервера, попробуй ещё раз: ')
else:
match = None
if match:
protocol, use_socks, ip = match.groups()
if not protocol:
use_socks = ask_yes_or_no('использовать протокол socks5 вместо http?') if not auto_login else False
if use_socks:
proxies = {'http': 'socks5://' + ip, 'https': 'socks5://' + ip}
else:
proxies = {'http': 'http://' + ip, 'https': 'https://' + ip}
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['token'] = token
data['proxies'] = proxies
update_data()
start_screen()
if not chat_ids:
chat_ids = data.get('chat_ids')
if not objectives:
objectives = data.get('objectives')
if chat_ids is None or objectives is None or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для работы?')):
chat_ids = list(map(int, input('введи айди конф, в которых нужно защищать рейдеров, через пробел: ').split()))
objectives = list(map(int, input('введи айди защищаемых рейдеров: ').split()))
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['chat_ids'] = chat_ids
data['objectives'] = objectives
update_data()
try:
defender = CLIDefender(token, proxies=proxies)
except InvalidSchema:
sys.exit('необходимо установить дополнительные зависимости для поддержки протокола socks5')
except ApiError:
del data['token']
update_data()
sys.exit('введённый токен недействителен')
except ProxyError:
del data['proxies']
update_data()
sys.exit('не удалось подключиться к прокси-серверу')
defender.run(chat_ids, objectives)
def main():
parser = ArgumentParser(prog='vk-raid-defender', description=__description__, usage='%(prog)s [опции]', add_help=False)
group = parser.add_argument_group('опциональные аргументы')
group.add_argument('-h', '--help', action='help', help='показать это сообщение о помощи и выйти')
group.add_argument('-l', '--login', action='store_true', help='осуществить автоматическую авторизацию')
group.add_argument('-p', '--proxy', metavar='proxy_address', help='адрес прокси-сервера')
group.add_argument('-c', '--chats', type=int, nargs='+', metavar='chat', help='айди конф, в которых нужно защищать рейдеров')
group.add_argument('-u', '--users', type=int, nargs='+', metavar='user', help='айди защищаемых рейдеров')
args = parser.parse_args()
try:
run(args.proxy, args.chats, args.users, args.login)
except KeyboardInterrupt:
print()
sys.exit()
if __name__ == "__main__":
main()
|
return True
elif | conditional_block |
cli.py | from .. import __description__
from ..defender import VkRaidDefender, data, update_data
####################################################################################################
LOGO = '''\
_ _ _ _ __ _
__ _| | __ _ __ __ _(_) __| | __| | ___ / _| ___ _ __ __| | ___ _ __
\ \ / / |/ / | '__/ _` | |/ _` | / _` |/ _ \ |_ / _ \ '_ \ / _` |/ _ \ '__|
\ V /| < | | | (_| | | (_| | | (_| | __/ _| __/ | | | (_| | __/ |
\_/ |_|\_\ |_| \__,_|_|\__,_| \__,_|\___|_| \___|_| |_|\__,_|\___|_|
by alfred richardsn'''
####################################################################################################
from ..logger import logger
from ..settings import CLIENT_ID
import re
import os
import sys
import webbrowser
from getpass import getpass
from argparse import ArgumentParser
from vk_api.exceptions import ApiError
from requests.exceptions import InvalidSchema, ProxyError
class CLIDefender(VkRaidDefender):
def run(self, chat_ids, objectives):
self._chat_ids = chat_ids
self._objectives = objectives
start_screen()
logger.info('начинаю приём сообщений')
try:
self.listen()
except KeyboardInterrupt:
raise
except Exception as e:
start_screen()
logger.critical('произошла критическая ошибка, перезапускаюсь', exc_info=True)
self.listen()
def start_screen():
os.system('cls' if os.name == 'nt' else 'clear')
print(LO | 'n', default_answer='', default=True):
true_answer = true_answer.lower()
false_answer = false_answer.lower()
default_answer = default_answer.lower()
output = question.strip() + ' (' + (true_answer.upper() + '/' + false_answer if default else
true_answer + '/' + false_answer.upper()) + '): '
answer = None
while answer not in (true_answer, false_answer, default_answer):
answer = input(output).lower()
if answer == true_answer:
return True
elif answer == false_answer:
return False
else:
return default
def register():
use_webbrowser = ask_yes_or_no('открыть ссылку для авторизации в веб-браузере по умолчанию?')
print()
oauth_url = 'https://oauth.vk.com/authorize?client_id={}&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=69632&response_type=token'.format(CLIENT_ID)
if use_webbrowser:
webbrowser.open(oauth_url, new=2)
print('в веб-браузере только что была открыта ссылка для авторизации.')
else:
print(oauth_url + '\n')
print('открой в веб-браузере страницу по ссылке выше.')
token = None
while token is None:
user_input = getpass('авторизируйся на открытой странице при необходимости и вставь адресную строку страницы, на которую было осуществлено перенаправление: ')
token = re.search(r'(?:.*access_token=)?([a-f0-9]+).*', user_input)
return token.group(1)
def run(proxy=None, chat_ids=[], objectives=[], auto_login=False):
token = data.get('token')
proxies = data.get('proxies')
if not token or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для авторизации?')):
token = register()
proxies = None
IP_ADDRESS = re.compile(r'((socks5://)|(?:https?://))?(localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})')
if proxy:
match = IP_ADDRESS.match(proxy)
if not proxy or (not match and not auto_login):
proxy = input('введи адрес прокси-сервера при необходимости его использования: ')
while proxy:
match = IP_ADDRESS.match(proxy)
if match:
break
proxy = input('неверный формат адреса сервера, попробуй ещё раз: ')
else:
match = None
if match:
protocol, use_socks, ip = match.groups()
if not protocol:
use_socks = ask_yes_or_no('использовать протокол socks5 вместо http?') if not auto_login else False
if use_socks:
proxies = {'http': 'socks5://' + ip, 'https': 'socks5://' + ip}
else:
proxies = {'http': 'http://' + ip, 'https': 'https://' + ip}
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['token'] = token
data['proxies'] = proxies
update_data()
start_screen()
if not chat_ids:
chat_ids = data.get('chat_ids')
if not objectives:
objectives = data.get('objectives')
if chat_ids is None or objectives is None or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для работы?')):
chat_ids = list(map(int, input('введи айди конф, в которых нужно защищать рейдеров, через пробел: ').split()))
objectives = list(map(int, input('введи айди защищаемых рейдеров: ').split()))
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['chat_ids'] = chat_ids
data['objectives'] = objectives
update_data()
try:
defender = CLIDefender(token, proxies=proxies)
except InvalidSchema:
sys.exit('необходимо установить дополнительные зависимости для поддержки протокола socks5')
except ApiError:
del data['token']
update_data()
sys.exit('введённый токен недействителен')
except ProxyError:
del data['proxies']
update_data()
sys.exit('не удалось подключиться к прокси-серверу')
defender.run(chat_ids, objectives)
def main():
parser = ArgumentParser(prog='vk-raid-defender', description=__description__, usage='%(prog)s [опции]', add_help=False)
group = parser.add_argument_group('опциональные аргументы')
group.add_argument('-h', '--help', action='help', help='показать это сообщение о помощи и выйти')
group.add_argument('-l', '--login', action='store_true', help='осуществить автоматическую авторизацию')
group.add_argument('-p', '--proxy', metavar='proxy_address', help='адрес прокси-сервера')
group.add_argument('-c', '--chats', type=int, nargs='+', metavar='chat', help='айди конф, в которых нужно защищать рейдеров')
group.add_argument('-u', '--users', type=int, nargs='+', metavar='user', help='айди защищаемых рейдеров')
args = parser.parse_args()
try:
run(args.proxy, args.chats, args.users, args.login)
except KeyboardInterrupt:
print()
sys.exit()
if __name__ == "__main__":
main()
| GO + '\n\n')
def ask_yes_or_no(question, true_answer='y', false_answer= | identifier_body |
cli.py | from .. import __description__
from ..defender import VkRaidDefender, data, update_data
####################################################################################################
LOGO = '''\
_ _ _ _ __ _
__ _| | __ _ __ __ _(_) __| | __| | ___ / _| ___ _ __ __| | ___ _ __
\ \ / / |/ / | '__/ _` | |/ _` | / _` |/ _ \ |_ / _ \ '_ \ / _` |/ _ \ '__|
\ V /| < | | | (_| | | (_| | | (_| | __/ _| __/ | | | (_| | __/ |
\_/ |_|\_\ |_| \__,_|_|\__,_| \__,_|\___|_| \___|_| |_|\__,_|\___|_|
by alfred richardsn'''
####################################################################################################
from ..logger import logger
from ..settings import CLIENT_ID
import re
import os
import sys
import webbrowser
from getpass import getpass
from argparse import ArgumentParser
from vk_api.exceptions import ApiError
from requests.exceptions import InvalidSchema, ProxyError
class CLIDefender(VkRaidDefender):
def run(self, chat_ids, objectives):
self._chat_ids = chat_ids
self._objectives = objectives
start_screen()
logger.info('начинаю приём сообщений')
try:
self.listen()
except KeyboardInterrupt:
raise
except Exception as e:
start_screen()
logger.critical('произошла критическая ошибка, перезапускаюсь', exc_info=True)
self.listen()
def start_screen():
os.system('cls' if os.name == 'nt' else 'clear')
print(LOGO + '\n\n')
def ask_yes_or_no(question, true_answer='y', false_answer='n', default_answer='', default=True):
true_answer = true_answer.lower()
false_answer = false_answer.lower()
default_answer = default_answer.lower()
output = question.strip() + ' (' + (true_answer.upper() + '/' + false_answer if default else
true_answer + '/' + false_answer.upper()) + '): '
answer = None
while answer not in (true_answer, false_answer, default_answer):
answer = input(output).lower()
if answer == true_answer:
return True
elif answer == false_answer:
return False
else:
return default
def register():
use_webbrowser = ask_yes_or_no('открыть ссылку для авторизации в веб-браузере по умолчанию?')
print()
oauth_url = 'https://oauth.vk.com/authorize?client_id={}&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=69632&response_type=token'.format(CLIENT_ID)
if use_webbrowser:
webbrowser.open(oauth_url, new=2)
print('в веб-браузере только что была открыта ссылка для авторизации.')
else:
print(oauth_url + '\n')
print('открой в веб-браузере страницу по ссылке выше.')
token = None
while token is None:
user_input = getpass('авторизируйся на открытой странице при необходимости и вставь адресную строку страницы, на которую было осуществлено перенаправление: ')
token = re.search(r'(?:.*access_token=)?([a-f0-9]+).*', user_input)
return token.group(1)
def run(proxy=None, chat_ids=[], objectives=[], auto_login=False):
token = data.get('token')
proxies = data.get('proxies')
if not token or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для авторизации?')):
token = register()
proxies = None
IP_ADDRESS = re.compile(r'((socks5://)|(?:https?://))?(localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})')
if proxy:
match = IP_ADDRESS.match(proxy)
if not proxy or (not match and not auto_login):
proxy = input('введи адрес прокси-сервера при необходимости его использования: ')
while proxy:
match = IP_ADDRESS.match(proxy)
if match:
break
proxy = input('неверный формат адреса сервера, попробуй ещё раз: ')
else:
match = None
if match:
protocol, use_socks, ip = match.groups()
if not protocol: |
if use_socks:
proxies = {'http': 'socks5://' + ip, 'https': 'socks5://' + ip}
else:
proxies = {'http': 'http://' + ip, 'https': 'https://' + ip}
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['token'] = token
data['proxies'] = proxies
update_data()
start_screen()
if not chat_ids:
chat_ids = data.get('chat_ids')
if not objectives:
objectives = data.get('objectives')
if chat_ids is None or objectives is None or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для работы?')):
chat_ids = list(map(int, input('введи айди конф, в которых нужно защищать рейдеров, через пробел: ').split()))
objectives = list(map(int, input('введи айди защищаемых рейдеров: ').split()))
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['chat_ids'] = chat_ids
data['objectives'] = objectives
update_data()
try:
defender = CLIDefender(token, proxies=proxies)
except InvalidSchema:
sys.exit('необходимо установить дополнительные зависимости для поддержки протокола socks5')
except ApiError:
del data['token']
update_data()
sys.exit('введённый токен недействителен')
except ProxyError:
del data['proxies']
update_data()
sys.exit('не удалось подключиться к прокси-серверу')
defender.run(chat_ids, objectives)
def main():
parser = ArgumentParser(prog='vk-raid-defender', description=__description__, usage='%(prog)s [опции]', add_help=False)
group = parser.add_argument_group('опциональные аргументы')
group.add_argument('-h', '--help', action='help', help='показать это сообщение о помощи и выйти')
group.add_argument('-l', '--login', action='store_true', help='осуществить автоматическую авторизацию')
group.add_argument('-p', '--proxy', metavar='proxy_address', help='адрес прокси-сервера')
group.add_argument('-c', '--chats', type=int, nargs='+', metavar='chat', help='айди конф, в которых нужно защищать рейдеров')
group.add_argument('-u', '--users', type=int, nargs='+', metavar='user', help='айди защищаемых рейдеров')
args = parser.parse_args()
try:
run(args.proxy, args.chats, args.users, args.login)
except KeyboardInterrupt:
print()
sys.exit()
if __name__ == "__main__":
main() | use_socks = ask_yes_or_no('использовать протокол socks5 вместо http?') if not auto_login else False | random_line_split |
cli.py | from .. import __description__
from ..defender import VkRaidDefender, data, update_data
####################################################################################################
LOGO = '''\
_ _ _ _ __ _
__ _| | __ _ __ __ _(_) __| | __| | ___ / _| ___ _ __ __| | ___ _ __
\ \ / / |/ / | '__/ _` | |/ _` | / _` |/ _ \ |_ / _ \ '_ \ / _` |/ _ \ '__|
\ V /| < | | | (_| | | (_| | | (_| | __/ _| __/ | | | (_| | __/ |
\_/ |_|\_\ |_| \__,_|_|\__,_| \__,_|\___|_| \___|_| |_|\__,_|\___|_|
by alfred richardsn'''
####################################################################################################
from ..logger import logger
from ..settings import CLIENT_ID
import re
import os
import sys
import webbrowser
from getpass import getpass
from argparse import ArgumentParser
from vk_api.exceptions import ApiError
from requests.exceptions import InvalidSchema, ProxyError
class CLIDefender(VkRaidDefender):
def run(self, chat_ids, objectives):
self._chat_ids = chat_ids
self._objectives = objectives
start_screen()
logger.info('начинаю приём сообщений')
try:
self.listen()
except KeyboardInterrupt:
raise
except Exception as e:
start_screen()
logger.critical('произошла критическая ошибка, перезапускаюсь', exc_info=True)
self.listen()
def start_screen():
os.system('cls' if os.name == 'nt' else 'clear')
print(LOGO + '\n\n')
def ask_yes_or_no(question, true_answer='y', false_answer='n', default_answer='', default=True):
true_answer = true_answer.lower()
false_answer = false_answer.lower()
default_answer = default_answer.lower()
output = question.strip() + ' (' + (true_answer.upper() + '/' + false_answer if default else
true_answer + '/' + false_answer.upper()) + '): '
answer = None
while answer not in (true_answer, false_answer, default_answer):
answer = input(output).lower()
if answer == true_answer:
return True
elif answer == false_answer:
return False
else:
return default
def register():
use_webbrowser = ask_yes_or_no('открыть ссылку для авторизации в веб-браузере по умолчанию?')
print()
oauth_url = 'https://oauth.vk.com/authorize?client_id={}&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=69632&response_type=token'.format(CLIENT_ID)
if use_webbrowser:
webbrowser.open(oauth_url, new=2)
print('в веб-браузере только что была открыта ссылка для авторизации.')
else:
print(oauth_url + '\n')
print('открой в веб-браузере страницу по ссылке выше.')
token = None
while token is None:
user_input = getpass('авторизируйся на открытой странице при необходимости и вставь адресную строку страницы, на которую было осуществлено перенаправление: ')
token = re.search(r'(?:.*access_token=)?([a-f0-9]+).*', user_input)
return token.group(1)
def run(proxy=None, chat_ids=[], objectives=[], auto_login=False):
token = data.get('token')
proxies = data.get('proxies')
if not token or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для авторизации?')):
token = register()
proxies = None
IP_ADDRESS = | .compile(r'((socks5://)|(?:https?://))?(localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})')
if proxy:
match = IP_ADDRESS.match(proxy)
if not proxy or (not match and not auto_login):
proxy = input('введи адрес прокси-сервера при необходимости его использования: ')
while proxy:
match = IP_ADDRESS.match(proxy)
if match:
break
proxy = input('неверный формат адреса сервера, попробуй ещё раз: ')
else:
match = None
if match:
protocol, use_socks, ip = match.groups()
if not protocol:
use_socks = ask_yes_or_no('использовать протокол socks5 вместо http?') if not auto_login else False
if use_socks:
proxies = {'http': 'socks5://' + ip, 'https': 'socks5://' + ip}
else:
proxies = {'http': 'http://' + ip, 'https': 'https://' + ip}
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['token'] = token
data['proxies'] = proxies
update_data()
start_screen()
if not chat_ids:
chat_ids = data.get('chat_ids')
if not objectives:
objectives = data.get('objectives')
if chat_ids is None or objectives is None or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для работы?')):
chat_ids = list(map(int, input('введи айди конф, в которых нужно защищать рейдеров, через пробел: ').split()))
objectives = list(map(int, input('введи айди защищаемых рейдеров: ').split()))
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['chat_ids'] = chat_ids
data['objectives'] = objectives
update_data()
try:
defender = CLIDefender(token, proxies=proxies)
except InvalidSchema:
sys.exit('необходимо установить дополнительные зависимости для поддержки протокола socks5')
except ApiError:
del data['token']
update_data()
sys.exit('введённый токен недействителен')
except ProxyError:
del data['proxies']
update_data()
sys.exit('не удалось подключиться к прокси-серверу')
defender.run(chat_ids, objectives)
def main():
parser = ArgumentParser(prog='vk-raid-defender', description=__description__, usage='%(prog)s [опции]', add_help=False)
group = parser.add_argument_group('опциональные аргументы')
group.add_argument('-h', '--help', action='help', help='показать это сообщение о помощи и выйти')
group.add_argument('-l', '--login', action='store_true', help='осуществить автоматическую авторизацию')
group.add_argument('-p', '--proxy', metavar='proxy_address', help='адрес прокси-сервера')
group.add_argument('-c', '--chats', type=int, nargs='+', metavar='chat', help='айди конф, в которых нужно защищать рейдеров')
group.add_argument('-u', '--users', type=int, nargs='+', metavar='user', help='айди защищаемых рейдеров')
args = parser.parse_args()
try:
run(args.proxy, args.chats, args.users, args.login)
except KeyboardInterrupt:
print()
sys.exit()
if __name__ == "__main__":
main()
| re | identifier_name |
ml_spam.py | # ------------------- start ML blackbox ----------------------------
# the details here aren't fully important
from textblob import TextBlob
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
def _split_into_lemmas(message):
message = unicode(message, 'utf8').lower()
words = TextBlob(message).words
# for each word, take its "base form" = lemma
return [word.lemma for word in words]
pipeline_svm = Pipeline([
('bow', CountVectorizer(analyzer=_split_into_lemmas)),
('tfidf', TfidfTransformer()),
('classifier', SVC()), # <== change here
])
# pipeline parameters to automatically explore and tune
param_svm = [
{'classifier__C': [1, 10, 100, 1000], 'classifier__kernel': ['linear']},
{'classifier__C': [1, 10, 100, 1000], 'classifier__gamma': [0.001, 0.0001], 'classifier__kernel': ['rbf']},
]
def _grid_svm(label_train):
return GridSearchCV(
pipeline_svm, # pipeline from above
param_grid=param_svm, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1, # number of cores to use for parallelization; -1 for "all cores"
scoring='accuracy', # what score are we optimizing?
cv=StratifiedKFold(label_train, n_folds=5), # what type of cross validation to use
)
# ------------------- end big ML blackbox ------------------------
from ticdat import TicDatFactory, LogFile
import cPickle
import time
import datetime
dataFactory = TicDatFactory(messages = [[],["label", "message"]],
parameters = [["key"], ["value"]])
solnFactory = TicDatFactory(predictions = [[],["message", "prediction"]],
parameters = [["key"], ["value"]])
def _timeStamp() :
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def run(td, output, error):
assert dataFactory.good_tic_dat_object(td)
assert isinstance(output, LogFile) and isinstance(error, LogFile)
output.write("Output log file for spam \n%s\n\n"%_timeStamp())
error.write("Error log file for spam \n%s\n\n"%_timeStamp())
messages = dataFactory.copy_to_pandas(td).messages
# it's hard to get the type just right when reading with the csv routines.
# to be safe, casting to strings to insure that purely numeric strings are still strings
# this is needed for a text analysis like this example, not generally needed
messages.message = messages.message.apply(str)
params = dict({"mode":"predict"}, **{k:r["value"] for k,r in td.parameters.items()})
assert params["mode"] in ["fit", "predict"]
soln = solnFactory.TicDat()
if params["mode"] == "fit":
spam_detector = _grid_svm(messages.label).fit(messages.message, messages.label)
soln.parameters["fitted CLOB"] = cPickle.dumps(spam_detector)
output.write("Fitted %s records\n"%len(messages))
return soln
else:
| if "fitted CLOB" not in params:
error.write("Need a fitted CLOB object in order to perform a prediction.\n")
return
spam_detector = cPickle.loads(params["fitted CLOB"])
rtn = spam_detector.predict(messages.message)
assert len(rtn) == len(messages.message)
map(soln.predictions.append, zip(messages.message, rtn))
output.write("Predicted %s records\n"%len(messages))
return soln | conditional_block | |
ml_spam.py | # ------------------- start ML blackbox ----------------------------
# the details here aren't fully important
from textblob import TextBlob
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
def _split_into_lemmas(message):
message = unicode(message, 'utf8').lower()
words = TextBlob(message).words
# for each word, take its "base form" = lemma
return [word.lemma for word in words]
pipeline_svm = Pipeline([
('bow', CountVectorizer(analyzer=_split_into_lemmas)),
('tfidf', TfidfTransformer()),
('classifier', SVC()), # <== change here
])
# pipeline parameters to automatically explore and tune
param_svm = [
{'classifier__C': [1, 10, 100, 1000], 'classifier__kernel': ['linear']},
{'classifier__C': [1, 10, 100, 1000], 'classifier__gamma': [0.001, 0.0001], 'classifier__kernel': ['rbf']},
]
def _grid_svm(label_train):
|
# ------------------- end big ML blackbox ------------------------
from ticdat import TicDatFactory, LogFile
import cPickle
import time
import datetime
dataFactory = TicDatFactory(messages = [[],["label", "message"]],
parameters = [["key"], ["value"]])
solnFactory = TicDatFactory(predictions = [[],["message", "prediction"]],
parameters = [["key"], ["value"]])
def _timeStamp() :
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def run(td, output, error):
assert dataFactory.good_tic_dat_object(td)
assert isinstance(output, LogFile) and isinstance(error, LogFile)
output.write("Output log file for spam \n%s\n\n"%_timeStamp())
error.write("Error log file for spam \n%s\n\n"%_timeStamp())
messages = dataFactory.copy_to_pandas(td).messages
# it's hard to get the type just right when reading with the csv routines.
# to be safe, casting to strings to insure that purely numeric strings are still strings
# this is needed for a text analysis like this example, not generally needed
messages.message = messages.message.apply(str)
params = dict({"mode":"predict"}, **{k:r["value"] for k,r in td.parameters.items()})
assert params["mode"] in ["fit", "predict"]
soln = solnFactory.TicDat()
if params["mode"] == "fit":
spam_detector = _grid_svm(messages.label).fit(messages.message, messages.label)
soln.parameters["fitted CLOB"] = cPickle.dumps(spam_detector)
output.write("Fitted %s records\n"%len(messages))
return soln
else:
if "fitted CLOB" not in params:
error.write("Need a fitted CLOB object in order to perform a prediction.\n")
return
spam_detector = cPickle.loads(params["fitted CLOB"])
rtn = spam_detector.predict(messages.message)
assert len(rtn) == len(messages.message)
map(soln.predictions.append, zip(messages.message, rtn))
output.write("Predicted %s records\n"%len(messages))
return soln
| return GridSearchCV(
pipeline_svm, # pipeline from above
param_grid=param_svm, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1, # number of cores to use for parallelization; -1 for "all cores"
scoring='accuracy', # what score are we optimizing?
cv=StratifiedKFold(label_train, n_folds=5), # what type of cross validation to use
) | identifier_body |
ml_spam.py | # ------------------- start ML blackbox ----------------------------
# the details here aren't fully important
from textblob import TextBlob
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
def _split_into_lemmas(message):
message = unicode(message, 'utf8').lower()
words = TextBlob(message).words
# for each word, take its "base form" = lemma
return [word.lemma for word in words]
pipeline_svm = Pipeline([
('bow', CountVectorizer(analyzer=_split_into_lemmas)),
('tfidf', TfidfTransformer()),
('classifier', SVC()), # <== change here
])
# pipeline parameters to automatically explore and tune
param_svm = [
{'classifier__C': [1, 10, 100, 1000], 'classifier__kernel': ['linear']},
{'classifier__C': [1, 10, 100, 1000], 'classifier__gamma': [0.001, 0.0001], 'classifier__kernel': ['rbf']},
]
def _grid_svm(label_train):
return GridSearchCV(
pipeline_svm, # pipeline from above
param_grid=param_svm, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1, # number of cores to use for parallelization; -1 for "all cores"
scoring='accuracy', # what score are we optimizing?
cv=StratifiedKFold(label_train, n_folds=5), # what type of cross validation to use
)
# ------------------- end big ML blackbox ------------------------
from ticdat import TicDatFactory, LogFile
import cPickle
import time
import datetime
dataFactory = TicDatFactory(messages = [[],["label", "message"]],
parameters = [["key"], ["value"]])
solnFactory = TicDatFactory(predictions = [[],["message", "prediction"]],
parameters = [["key"], ["value"]])
def _timeStamp() :
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def | (td, output, error):
assert dataFactory.good_tic_dat_object(td)
assert isinstance(output, LogFile) and isinstance(error, LogFile)
output.write("Output log file for spam \n%s\n\n"%_timeStamp())
error.write("Error log file for spam \n%s\n\n"%_timeStamp())
messages = dataFactory.copy_to_pandas(td).messages
# it's hard to get the type just right when reading with the csv routines.
# to be safe, casting to strings to insure that purely numeric strings are still strings
# this is needed for a text analysis like this example, not generally needed
messages.message = messages.message.apply(str)
params = dict({"mode":"predict"}, **{k:r["value"] for k,r in td.parameters.items()})
assert params["mode"] in ["fit", "predict"]
soln = solnFactory.TicDat()
if params["mode"] == "fit":
spam_detector = _grid_svm(messages.label).fit(messages.message, messages.label)
soln.parameters["fitted CLOB"] = cPickle.dumps(spam_detector)
output.write("Fitted %s records\n"%len(messages))
return soln
else:
if "fitted CLOB" not in params:
error.write("Need a fitted CLOB object in order to perform a prediction.\n")
return
spam_detector = cPickle.loads(params["fitted CLOB"])
rtn = spam_detector.predict(messages.message)
assert len(rtn) == len(messages.message)
map(soln.predictions.append, zip(messages.message, rtn))
output.write("Predicted %s records\n"%len(messages))
return soln
| run | identifier_name |
polyfill.ts | interface String {
startsWith(str: string): boolean;
endsWith(str: string): boolean;
compare(target: string, ignoreCase?: boolean): number;
contains(str: string, ignoreCase?: boolean): boolean;
}
interface Array<T> {
includes(obj: T): boolean;
remove(from: number, to: number): Array<T>;
getUnique(getValue: (val: T) => any): Array<T>;
indexOfDelegate(predicate: (val: T) => boolean, fromIndex?: number): number;
last(): T;
find(predicate: (val: T) => boolean): T;
some(predicate: (val: T) => boolean): boolean;
}
if (!String.prototype.compare) {
String.prototype.compare = function (target: string, ignoreCase?: boolean) {
var selfValue: string = this;
var targetValue: string = target || "";
if (ignoreCase) {
selfValue = selfValue.toLowerCase();
targetValue = targetValue.toLowerCase();
}
if (selfValue > targetValue) {
return 1;
} else if (selfValue < targetValue) {
return -1;
} else {
return 0;
}
}
}
if (!String.prototype.startsWith) {
String.prototype.startsWith = function (str) {
return this.slice(0, str.length) === str;
};
}
if (!String.prototype.endsWith) {
String.prototype.endsWith = function (str) {
return this.indexOf(str, this.length - str.length) !== -1;
};
}
if (!String.prototype.contains) {
String.prototype.contains = function (str: string, ignoreCase?: boolean) {
var selfValue: string = this;
var searchValue: string = str || "";
if (ignoreCase) {
selfValue = selfValue.toLowerCase();
searchValue = searchValue.toLowerCase();
}
return selfValue.indexOf(searchValue) !== -1;
}
}
if (!Array.prototype.includes) {
Array.prototype.includes = function (searchElement) {
if (this === undefined || this === null) {
throw new TypeError('Cannot convert this value to object');
}
var O = Object(this);
var len = parseInt(O.length) || 0;
if (len === 0) {
return false;
}
var k = 0;
while (k < len) {
var currentElement = O[k];
if (searchElement === currentElement ||
(searchElement !== searchElement && currentElement !== currentElement)) {
return true;
}
k++;
}
return false;
}
}
Array.prototype.remove = function (from, to) {
var rest = this.slice((to || from) + 1 || this.length);
this.length = from < 0 ? this.length + from : from;
return this.push.apply(this, rest);
};
Array.prototype.getUnique = function (getValue) {
var u = {}, a = [];
for (var i = 0, l = this.length; i < l; ++i) {
var value = getValue(this[i]);
if (u.hasOwnProperty(value)) {
continue;
}
a.push(this[i]);
u[value] = 1;
}
return a;
};
//http://devdocs.io/javascript/global_objects/array/indexof
Array.prototype.indexOfDelegate = function (predicate, fromIndex) {
var k;
// 1. Let O be the result of calling ToObject passing
// the this value as the argument.
if (this == null) {
throw new TypeError('"this" is null or not defined');
}
var O = Object(this);
// 2. Let lenValue be the result of calling the Get
// internal method of O with the argument "length".
// 3. Let len be ToUint32(lenValue).
var len = O.length >>> 0;
// 4. If len is 0, return -1.
if (len === 0) {
return -1;
}
// 5. If argument fromIndex was passed let n be
// ToInteger(fromIndex); else let n be 0.
var n = +fromIndex || 0;
if (Math.abs(n) === Infinity) {
n = 0;
}
// 6. If n >= len, return -1.
if (n >= len) {
return -1;
}
// 7. If n >= 0, then Let k be n.
// 8. Else, n<0, Let k be len - abs(n).
// If k is less than 0, then let k be 0.
k = Math.max(n >= 0 ? n : len - Math.abs(n), 0);
// 9. Repeat, while k < len
while (k < len) {
var kValue;
// a. Let Pk be ToString(k).
// This is implicit for LHS operands of the in operator
// b. Let kPresent be the result of calling the
// HasProperty internal method of O with argument Pk.
// This step can be combined with c
// c. If kPresent is true, then
// i. Let elementK be the result of calling the Get
// internal method of O with the argument ToString(k).
// ii. Let same be the result of applying the
// Strict Equality Comparison Algorithm to
// searchElement and elementK.
// iii. If same is true, return k.
if (k in O && predicate(O[k])) {
| k++;
}
return -1;
};
if (!Array.prototype.some) {
Array.prototype.some = function (fun/*, thisArg*/) {
'use strict';
if (this == null) {
throw new TypeError('Array.prototype.some called on null or undefined');
}
if (typeof fun !== 'function') {
throw new TypeError();
}
var t = Object(this);
var len = t.length >>> 0;
var thisArg = arguments.length >= 2 ? arguments[1] : void 0;
for (var i = 0; i < len; i++) {
if (i in t && fun.call(thisArg, t[i], i, t)) {
return true;
}
}
return false;
};
}
if (!Array.prototype.last) {
Array.prototype.last = function () {
return this[this.length - 1];
};
};
if (!Array.prototype.find) {
Array.prototype.find = function (predicate) {
if (this == null) {
throw new TypeError('Array.prototype.find called on null or undefined');
}
if (typeof predicate !== 'function') {
throw new TypeError('predicate must be a function');
}
var list = Object(this);
var length = list.length >>> 0;
var thisArg = arguments[1];
var value;
for (var i = 0; i < length; i++) {
value = list[i];
if (predicate.call(thisArg, value, i, list)) {
return value;
}
}
return undefined;
};
}
| return k;
}
| conditional_block |
polyfill.ts | interface String {
startsWith(str: string): boolean;
endsWith(str: string): boolean;
compare(target: string, ignoreCase?: boolean): number;
contains(str: string, ignoreCase?: boolean): boolean;
}
interface Array<T> {
includes(obj: T): boolean;
remove(from: number, to: number): Array<T>;
getUnique(getValue: (val: T) => any): Array<T>;
indexOfDelegate(predicate: (val: T) => boolean, fromIndex?: number): number;
last(): T;
find(predicate: (val: T) => boolean): T;
some(predicate: (val: T) => boolean): boolean;
} |
if (!String.prototype.compare) {
String.prototype.compare = function (target: string, ignoreCase?: boolean) {
var selfValue: string = this;
var targetValue: string = target || "";
if (ignoreCase) {
selfValue = selfValue.toLowerCase();
targetValue = targetValue.toLowerCase();
}
if (selfValue > targetValue) {
return 1;
} else if (selfValue < targetValue) {
return -1;
} else {
return 0;
}
}
}
if (!String.prototype.startsWith) {
String.prototype.startsWith = function (str) {
return this.slice(0, str.length) === str;
};
}
if (!String.prototype.endsWith) {
String.prototype.endsWith = function (str) {
return this.indexOf(str, this.length - str.length) !== -1;
};
}
if (!String.prototype.contains) {
String.prototype.contains = function (str: string, ignoreCase?: boolean) {
var selfValue: string = this;
var searchValue: string = str || "";
if (ignoreCase) {
selfValue = selfValue.toLowerCase();
searchValue = searchValue.toLowerCase();
}
return selfValue.indexOf(searchValue) !== -1;
}
}
if (!Array.prototype.includes) {
Array.prototype.includes = function (searchElement) {
if (this === undefined || this === null) {
throw new TypeError('Cannot convert this value to object');
}
var O = Object(this);
var len = parseInt(O.length) || 0;
if (len === 0) {
return false;
}
var k = 0;
while (k < len) {
var currentElement = O[k];
if (searchElement === currentElement ||
(searchElement !== searchElement && currentElement !== currentElement)) {
return true;
}
k++;
}
return false;
}
}
Array.prototype.remove = function (from, to) {
var rest = this.slice((to || from) + 1 || this.length);
this.length = from < 0 ? this.length + from : from;
return this.push.apply(this, rest);
};
Array.prototype.getUnique = function (getValue) {
var u = {}, a = [];
for (var i = 0, l = this.length; i < l; ++i) {
var value = getValue(this[i]);
if (u.hasOwnProperty(value)) {
continue;
}
a.push(this[i]);
u[value] = 1;
}
return a;
};
//http://devdocs.io/javascript/global_objects/array/indexof
Array.prototype.indexOfDelegate = function (predicate, fromIndex) {
var k;
// 1. Let O be the result of calling ToObject passing
// the this value as the argument.
if (this == null) {
throw new TypeError('"this" is null or not defined');
}
var O = Object(this);
// 2. Let lenValue be the result of calling the Get
// internal method of O with the argument "length".
// 3. Let len be ToUint32(lenValue).
var len = O.length >>> 0;
// 4. If len is 0, return -1.
if (len === 0) {
return -1;
}
// 5. If argument fromIndex was passed let n be
// ToInteger(fromIndex); else let n be 0.
var n = +fromIndex || 0;
if (Math.abs(n) === Infinity) {
n = 0;
}
// 6. If n >= len, return -1.
if (n >= len) {
return -1;
}
// 7. If n >= 0, then Let k be n.
// 8. Else, n<0, Let k be len - abs(n).
// If k is less than 0, then let k be 0.
k = Math.max(n >= 0 ? n : len - Math.abs(n), 0);
// 9. Repeat, while k < len
while (k < len) {
var kValue;
// a. Let Pk be ToString(k).
// This is implicit for LHS operands of the in operator
// b. Let kPresent be the result of calling the
// HasProperty internal method of O with argument Pk.
// This step can be combined with c
// c. If kPresent is true, then
// i. Let elementK be the result of calling the Get
// internal method of O with the argument ToString(k).
// ii. Let same be the result of applying the
// Strict Equality Comparison Algorithm to
// searchElement and elementK.
// iii. If same is true, return k.
if (k in O && predicate(O[k])) {
return k;
}
k++;
}
return -1;
};
if (!Array.prototype.some) {
Array.prototype.some = function (fun/*, thisArg*/) {
'use strict';
if (this == null) {
throw new TypeError('Array.prototype.some called on null or undefined');
}
if (typeof fun !== 'function') {
throw new TypeError();
}
var t = Object(this);
var len = t.length >>> 0;
var thisArg = arguments.length >= 2 ? arguments[1] : void 0;
for (var i = 0; i < len; i++) {
if (i in t && fun.call(thisArg, t[i], i, t)) {
return true;
}
}
return false;
};
}
if (!Array.prototype.last) {
Array.prototype.last = function () {
return this[this.length - 1];
};
};
if (!Array.prototype.find) {
Array.prototype.find = function (predicate) {
if (this == null) {
throw new TypeError('Array.prototype.find called on null or undefined');
}
if (typeof predicate !== 'function') {
throw new TypeError('predicate must be a function');
}
var list = Object(this);
var length = list.length >>> 0;
var thisArg = arguments[1];
var value;
for (var i = 0; i < length; i++) {
value = list[i];
if (predicate.call(thisArg, value, i, list)) {
return value;
}
}
return undefined;
};
} | random_line_split | |
text-editor-registry.js | /** @babel */
import {Emitter, Disposable, CompositeDisposable} from 'event-kit'
import {Point, Range} from 'text-buffer'
import TextEditor from './text-editor'
import ScopeDescriptor from './scope-descriptor'
const EDITOR_PARAMS_BY_SETTING_KEY = [
['core.fileEncoding', 'encoding'],
['editor.atomicSoftTabs', 'atomicSoftTabs'],
['editor.showInvisibles', 'showInvisibles'],
['editor.tabLength', 'tabLength'],
['editor.invisibles', 'invisibles'],
['editor.showCursorOnSelection', 'showCursorOnSelection'],
['editor.showIndentGuide', 'showIndentGuide'],
['editor.showLineNumbers', 'showLineNumbers'],
['editor.softWrap', 'softWrapped'],
['editor.softWrapHangingIndent', 'softWrapHangingIndentLength'],
['editor.softWrapAtPreferredLineLength', 'softWrapAtPreferredLineLength'],
['editor.preferredLineLength', 'preferredLineLength'],
['editor.maxScreenLineLength', 'maxScreenLineLength'],
['editor.autoIndent', 'autoIndent'],
['editor.autoIndentOnPaste', 'autoIndentOnPaste'],
['editor.scrollPastEnd', 'scrollPastEnd'],
['editor.undoGroupingInterval', 'undoGroupingInterval'],
['editor.nonWordCharacters', 'nonWordCharacters'],
['editor.scrollSensitivity', 'scrollSensitivity']
]
const GRAMMAR_SELECTION_RANGE = Range(Point.ZERO, Point(10, 0)).freeze()
// Experimental: This global registry tracks registered `TextEditors`.
//
// If you want to add functionality to a wider set of text editors than just
// those appearing within workspace panes, use `atom.textEditors.observe` to
// invoke a callback for all current and future registered text editors.
//
// If you want packages to be able to add functionality to your non-pane text
// editors (such as a search field in a custom user interface element), register
// them for observation via `atom.textEditors.add`. **Important:** When you're
// done using your editor, be sure to call `dispose` on the returned disposable
// to avoid leaking editors.
export default class TextEditorRegistry {
constructor ({config, grammarRegistry, assert, packageManager}) {
this.assert = assert
this.config = config
this.grammarRegistry = grammarRegistry
this.scopedSettingsDelegate = new ScopedSettingsDelegate(config)
this.grammarAddedOrUpdated = this.grammarAddedOrUpdated.bind(this)
this.clear()
this.initialPackageActivationPromise = new Promise((resolve) => {
// TODO: Remove this usage of a private property of PackageManager.
// Should PackageManager just expose a promise-based API like this?
if (packageManager.deferredActivationHooks) {
packageManager.onDidActivateInitialPackages(resolve)
} else {
resolve()
}
})
}
deserialize (state) {
this.editorGrammarOverrides = state.editorGrammarOverrides
}
serialize () {
return {
editorGrammarOverrides: Object.assign({}, this.editorGrammarOverrides)
}
}
clear () {
if (this.subscriptions) {
this.subscriptions.dispose()
}
this.subscriptions = new CompositeDisposable()
this.editors = new Set()
this.emitter = new Emitter()
this.scopesWithConfigSubscriptions = new Set()
this.editorsWithMaintainedConfig = new Set()
this.editorsWithMaintainedGrammar = new Set()
this.editorGrammarOverrides = {}
this.editorGrammarScores = new WeakMap()
this.subscriptions.add(
this.grammarRegistry.onDidAddGrammar(this.grammarAddedOrUpdated),
this.grammarRegistry.onDidUpdateGrammar(this.grammarAddedOrUpdated)
)
}
destroy () {
this.subscriptions.dispose()
this.editorsWithMaintainedConfig = null
}
// Register a `TextEditor`.
//
// * `editor` The editor to register.
//
// Returns a {Disposable} on which `.dispose()` can be called to remove the
// added editor. To avoid any memory leaks this should be called when the
// editor is destroyed.
add (editor) {
this.editors.add(editor)
editor.registered = true
this.emitter.emit('did-add-editor', editor)
return new Disposable(() => this.remove(editor))
}
build (params) {
params = Object.assign({assert: this.assert}, params)
let scope = null
if (params.buffer) {
const filePath = params.buffer.getPath()
const headContent = params.buffer.getTextInRange(GRAMMAR_SELECTION_RANGE)
params.grammar = this.grammarRegistry.selectGrammar(filePath, headContent)
scope = new ScopeDescriptor({scopes: [params.grammar.scopeName]})
}
Object.assign(params, this.textEditorParamsForScope(scope))
return new TextEditor(params)
}
// Remove a `TextEditor`.
//
// * `editor` The editor to remove.
//
// Returns a {Boolean} indicating whether the editor was successfully removed.
remove (editor) {
var removed = this.editors.delete(editor)
editor.registered = false
return removed
}
// Invoke the given callback with all the current and future registered
// `TextEditors`.
//
// * `callback` {Function} to be called with current and future text editors.
//
// Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
observe (callback) {
this.editors.forEach(callback)
return this.emitter.on('did-add-editor', callback)
}
// Keep a {TextEditor}'s configuration in sync with Atom's settings.
//
// * `editor` The editor whose configuration will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// configuration.
maintainConfig (editor) {
if (this.editorsWithMaintainedConfig.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedConfig.add(editor)
editor.setScopedSettingsDelegate(this.scopedSettingsDelegate)
this.subscribeToSettingsForEditorScope(editor)
const grammarChangeSubscription = editor.onDidChangeGrammar(() => {
this.subscribeToSettingsForEditorScope(editor)
})
this.subscriptions.add(grammarChangeSubscription)
const updateTabTypes = () => {
const configOptions = {scope: editor.getRootScopeDescriptor()}
editor.setSoftTabs(shouldEditorUseSoftTabs(
editor,
this.config.get('editor.tabType', configOptions),
this.config.get('editor.softTabs', configOptions)
))
}
updateTabTypes()
const tokenizeSubscription = editor.onDidTokenize(updateTabTypes)
this.subscriptions.add(tokenizeSubscription)
return new Disposable(() => {
this.editorsWithMaintainedConfig.delete(editor)
editor.setScopedSettingsDelegate(null)
tokenizeSubscription.dispose()
grammarChangeSubscription.dispose()
this.subscriptions.remove(grammarChangeSubscription)
this.subscriptions.remove(tokenizeSubscription)
})
}
// Set a {TextEditor}'s grammar based on its path and content, and continue
// to update its grammar as grammars are added or updated, or the editor's
// file path changes.
//
// * `editor` The editor whose grammar will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// grammar.
maintainGrammar (editor) {
if (this.editorsWithMaintainedGrammar.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedGrammar.add(editor)
const buffer = editor.getBuffer()
for (let existingEditor of this.editorsWithMaintainedGrammar) {
if (existingEditor.getBuffer() === buffer) {
const existingOverride = this.editorGrammarOverrides[existingEditor.id]
if (existingOverride) {
this.editorGrammarOverrides[editor.id] = existingOverride
}
break
}
}
this.selectGrammarForEditor(editor)
const pathChangeSubscription = editor.onDidChangePath(() => {
this.editorGrammarScores.delete(editor)
this.selectGrammarForEditor(editor)
})
this.subscriptions.add(pathChangeSubscription)
return new Disposable(() => {
delete this.editorGrammarOverrides[editor.id]
this.editorsWithMaintainedGrammar.delete(editor)
this.subscriptions.remove(pathChangeSubscription)
pathChangeSubscription.dispose()
})
}
// Force a {TextEditor} to use a different grammar than the one that would
// otherwise be selected for it.
//
// * `editor` The editor whose gramamr will be set.
// * `scopeName` The {String} root scope name for the desired {Grammar}.
setGrammarOverride (editor, scopeName) {
this.editorGrammarOverrides[editor.id] = scopeName
this.editorGrammarScores.delete(editor)
editor.setGrammar(this.grammarRegistry.grammarForScopeName(scopeName))
}
// Retrieve the grammar scope name that has been set as a grammar override
// for the given {TextEditor}.
//
// * `editor` The editor.
//
// Returns a {String} scope name, or `null` if no override has been set
// for the given editor.
getGrammarOverride (editor) {
return this.editorGrammarOverrides[editor.id]
}
// Remove any grammar override that has been set for the given {TextEditor}.
//
// * `editor` The editor.
clearGrammarOverride (editor) {
delete this.editorGrammarOverrides[editor.id]
this.selectGrammarForEditor(editor)
}
// Private
grammarAddedOrUpdated (grammar) {
this.editorsWithMaintainedGrammar.forEach((editor) => {
if (grammar.injectionSelector) {
if (editor.tokenizedBuffer.hasTokenForSelector(grammar.injectionSelector)) {
editor.tokenizedBuffer.retokenizeLines()
}
return
}
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
if (grammar.scopeName === grammarOverride) {
editor.setGrammar(grammar)
}
} else |
})
}
selectGrammarForEditor (editor) {
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
const grammar = this.grammarRegistry.grammarForScopeName(grammarOverride)
editor.setGrammar(grammar)
return
}
const {grammar, score} = this.grammarRegistry.selectGrammarWithScore(
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
if (!grammar) {
throw new Error(`No grammar found for path: ${editor.getPath()}`)
}
const currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
}
async subscribeToSettingsForEditorScope (editor) {
await this.initialPackageActivationPromise
const scopeDescriptor = editor.getRootScopeDescriptor()
const scopeChain = scopeDescriptor.getScopeChain()
editor.update(this.textEditorParamsForScope(scopeDescriptor))
if (!this.scopesWithConfigSubscriptions.has(scopeChain)) {
this.scopesWithConfigSubscriptions.add(scopeChain)
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
this.subscriptions.add(
this.config.onDidChange(settingKey, configOptions, ({newValue}) => {
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.update({[paramName]: newValue})
}
})
})
)
}
const updateTabTypes = () => {
const tabType = this.config.get('editor.tabType', configOptions)
const softTabs = this.config.get('editor.softTabs', configOptions)
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.setSoftTabs(shouldEditorUseSoftTabs(editor, tabType, softTabs))
}
})
}
this.subscriptions.add(
this.config.onDidChange('editor.tabType', configOptions, updateTabTypes),
this.config.onDidChange('editor.softTabs', configOptions, updateTabTypes)
)
}
}
textEditorParamsForScope (scopeDescriptor) {
const result = {}
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
result[paramName] = this.config.get(settingKey, configOptions)
}
return result
}
}
function shouldEditorUseSoftTabs (editor, tabType, softTabs) {
switch (tabType) {
case 'hard':
return false
case 'soft':
return true
case 'auto':
switch (editor.usesSoftTabs()) {
case true:
return true
case false:
return false
default:
return softTabs
}
}
}
function noop () {}
class ScopedSettingsDelegate {
constructor (config) {
this.config = config
}
getNonWordCharacters (scope) {
return this.config.get('editor.nonWordCharacters', {scope: scope})
}
getIncreaseIndentPattern (scope) {
return this.config.get('editor.increaseIndentPattern', {scope: scope})
}
getDecreaseIndentPattern (scope) {
return this.config.get('editor.decreaseIndentPattern', {scope: scope})
}
getDecreaseNextIndentPattern (scope) {
return this.config.get('editor.decreaseNextIndentPattern', {scope: scope})
}
getFoldEndPattern (scope) {
return this.config.get('editor.foldEndPattern', {scope: scope})
}
getCommentStrings (scope) {
const commentStartEntries = this.config.getAll('editor.commentStart', {scope})
const commentEndEntries = this.config.getAll('editor.commentEnd', {scope})
const commentStartEntry = commentStartEntries[0]
const commentEndEntry = commentEndEntries.find((entry) => {
return entry.scopeSelector === commentStartEntry.scopeSelector
})
return {
commentStartString: commentStartEntry && commentStartEntry.value,
commentEndString: commentEndEntry && commentEndEntry.value
}
}
}
TextEditorRegistry.ScopedSettingsDelegate = ScopedSettingsDelegate
| {
const score = this.grammarRegistry.getGrammarScore(
grammar,
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
let currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
} | conditional_block |
text-editor-registry.js | /** @babel */
import {Emitter, Disposable, CompositeDisposable} from 'event-kit'
import {Point, Range} from 'text-buffer'
import TextEditor from './text-editor'
import ScopeDescriptor from './scope-descriptor'
const EDITOR_PARAMS_BY_SETTING_KEY = [
['core.fileEncoding', 'encoding'],
['editor.atomicSoftTabs', 'atomicSoftTabs'],
['editor.showInvisibles', 'showInvisibles'],
['editor.tabLength', 'tabLength'],
['editor.invisibles', 'invisibles'],
['editor.showCursorOnSelection', 'showCursorOnSelection'],
['editor.showIndentGuide', 'showIndentGuide'],
['editor.showLineNumbers', 'showLineNumbers'],
['editor.softWrap', 'softWrapped'],
['editor.softWrapHangingIndent', 'softWrapHangingIndentLength'],
['editor.softWrapAtPreferredLineLength', 'softWrapAtPreferredLineLength'],
['editor.preferredLineLength', 'preferredLineLength'],
['editor.maxScreenLineLength', 'maxScreenLineLength'],
['editor.autoIndent', 'autoIndent'],
['editor.autoIndentOnPaste', 'autoIndentOnPaste'],
['editor.scrollPastEnd', 'scrollPastEnd'],
['editor.undoGroupingInterval', 'undoGroupingInterval'],
['editor.nonWordCharacters', 'nonWordCharacters'],
['editor.scrollSensitivity', 'scrollSensitivity']
]
const GRAMMAR_SELECTION_RANGE = Range(Point.ZERO, Point(10, 0)).freeze()
// Experimental: This global registry tracks registered `TextEditors`.
//
// If you want to add functionality to a wider set of text editors than just
// those appearing within workspace panes, use `atom.textEditors.observe` to
// invoke a callback for all current and future registered text editors.
//
// If you want packages to be able to add functionality to your non-pane text
// editors (such as a search field in a custom user interface element), register
// them for observation via `atom.textEditors.add`. **Important:** When you're
// done using your editor, be sure to call `dispose` on the returned disposable
// to avoid leaking editors.
export default class | {
constructor ({config, grammarRegistry, assert, packageManager}) {
this.assert = assert
this.config = config
this.grammarRegistry = grammarRegistry
this.scopedSettingsDelegate = new ScopedSettingsDelegate(config)
this.grammarAddedOrUpdated = this.grammarAddedOrUpdated.bind(this)
this.clear()
this.initialPackageActivationPromise = new Promise((resolve) => {
// TODO: Remove this usage of a private property of PackageManager.
// Should PackageManager just expose a promise-based API like this?
if (packageManager.deferredActivationHooks) {
packageManager.onDidActivateInitialPackages(resolve)
} else {
resolve()
}
})
}
deserialize (state) {
this.editorGrammarOverrides = state.editorGrammarOverrides
}
serialize () {
return {
editorGrammarOverrides: Object.assign({}, this.editorGrammarOverrides)
}
}
clear () {
if (this.subscriptions) {
this.subscriptions.dispose()
}
this.subscriptions = new CompositeDisposable()
this.editors = new Set()
this.emitter = new Emitter()
this.scopesWithConfigSubscriptions = new Set()
this.editorsWithMaintainedConfig = new Set()
this.editorsWithMaintainedGrammar = new Set()
this.editorGrammarOverrides = {}
this.editorGrammarScores = new WeakMap()
this.subscriptions.add(
this.grammarRegistry.onDidAddGrammar(this.grammarAddedOrUpdated),
this.grammarRegistry.onDidUpdateGrammar(this.grammarAddedOrUpdated)
)
}
destroy () {
this.subscriptions.dispose()
this.editorsWithMaintainedConfig = null
}
// Register a `TextEditor`.
//
// * `editor` The editor to register.
//
// Returns a {Disposable} on which `.dispose()` can be called to remove the
// added editor. To avoid any memory leaks this should be called when the
// editor is destroyed.
add (editor) {
this.editors.add(editor)
editor.registered = true
this.emitter.emit('did-add-editor', editor)
return new Disposable(() => this.remove(editor))
}
build (params) {
params = Object.assign({assert: this.assert}, params)
let scope = null
if (params.buffer) {
const filePath = params.buffer.getPath()
const headContent = params.buffer.getTextInRange(GRAMMAR_SELECTION_RANGE)
params.grammar = this.grammarRegistry.selectGrammar(filePath, headContent)
scope = new ScopeDescriptor({scopes: [params.grammar.scopeName]})
}
Object.assign(params, this.textEditorParamsForScope(scope))
return new TextEditor(params)
}
// Remove a `TextEditor`.
//
// * `editor` The editor to remove.
//
// Returns a {Boolean} indicating whether the editor was successfully removed.
remove (editor) {
var removed = this.editors.delete(editor)
editor.registered = false
return removed
}
// Invoke the given callback with all the current and future registered
// `TextEditors`.
//
// * `callback` {Function} to be called with current and future text editors.
//
// Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
observe (callback) {
this.editors.forEach(callback)
return this.emitter.on('did-add-editor', callback)
}
// Keep a {TextEditor}'s configuration in sync with Atom's settings.
//
// * `editor` The editor whose configuration will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// configuration.
maintainConfig (editor) {
if (this.editorsWithMaintainedConfig.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedConfig.add(editor)
editor.setScopedSettingsDelegate(this.scopedSettingsDelegate)
this.subscribeToSettingsForEditorScope(editor)
const grammarChangeSubscription = editor.onDidChangeGrammar(() => {
this.subscribeToSettingsForEditorScope(editor)
})
this.subscriptions.add(grammarChangeSubscription)
const updateTabTypes = () => {
const configOptions = {scope: editor.getRootScopeDescriptor()}
editor.setSoftTabs(shouldEditorUseSoftTabs(
editor,
this.config.get('editor.tabType', configOptions),
this.config.get('editor.softTabs', configOptions)
))
}
updateTabTypes()
const tokenizeSubscription = editor.onDidTokenize(updateTabTypes)
this.subscriptions.add(tokenizeSubscription)
return new Disposable(() => {
this.editorsWithMaintainedConfig.delete(editor)
editor.setScopedSettingsDelegate(null)
tokenizeSubscription.dispose()
grammarChangeSubscription.dispose()
this.subscriptions.remove(grammarChangeSubscription)
this.subscriptions.remove(tokenizeSubscription)
})
}
// Set a {TextEditor}'s grammar based on its path and content, and continue
// to update its grammar as grammars are added or updated, or the editor's
// file path changes.
//
// * `editor` The editor whose grammar will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// grammar.
maintainGrammar (editor) {
if (this.editorsWithMaintainedGrammar.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedGrammar.add(editor)
const buffer = editor.getBuffer()
for (let existingEditor of this.editorsWithMaintainedGrammar) {
if (existingEditor.getBuffer() === buffer) {
const existingOverride = this.editorGrammarOverrides[existingEditor.id]
if (existingOverride) {
this.editorGrammarOverrides[editor.id] = existingOverride
}
break
}
}
this.selectGrammarForEditor(editor)
const pathChangeSubscription = editor.onDidChangePath(() => {
this.editorGrammarScores.delete(editor)
this.selectGrammarForEditor(editor)
})
this.subscriptions.add(pathChangeSubscription)
return new Disposable(() => {
delete this.editorGrammarOverrides[editor.id]
this.editorsWithMaintainedGrammar.delete(editor)
this.subscriptions.remove(pathChangeSubscription)
pathChangeSubscription.dispose()
})
}
// Force a {TextEditor} to use a different grammar than the one that would
// otherwise be selected for it.
//
// * `editor` The editor whose gramamr will be set.
// * `scopeName` The {String} root scope name for the desired {Grammar}.
setGrammarOverride (editor, scopeName) {
this.editorGrammarOverrides[editor.id] = scopeName
this.editorGrammarScores.delete(editor)
editor.setGrammar(this.grammarRegistry.grammarForScopeName(scopeName))
}
// Retrieve the grammar scope name that has been set as a grammar override
// for the given {TextEditor}.
//
// * `editor` The editor.
//
// Returns a {String} scope name, or `null` if no override has been set
// for the given editor.
getGrammarOverride (editor) {
return this.editorGrammarOverrides[editor.id]
}
// Remove any grammar override that has been set for the given {TextEditor}.
//
// * `editor` The editor.
clearGrammarOverride (editor) {
delete this.editorGrammarOverrides[editor.id]
this.selectGrammarForEditor(editor)
}
// Private
grammarAddedOrUpdated (grammar) {
this.editorsWithMaintainedGrammar.forEach((editor) => {
if (grammar.injectionSelector) {
if (editor.tokenizedBuffer.hasTokenForSelector(grammar.injectionSelector)) {
editor.tokenizedBuffer.retokenizeLines()
}
return
}
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
if (grammar.scopeName === grammarOverride) {
editor.setGrammar(grammar)
}
} else {
const score = this.grammarRegistry.getGrammarScore(
grammar,
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
let currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
}
})
}
selectGrammarForEditor (editor) {
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
const grammar = this.grammarRegistry.grammarForScopeName(grammarOverride)
editor.setGrammar(grammar)
return
}
const {grammar, score} = this.grammarRegistry.selectGrammarWithScore(
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
if (!grammar) {
throw new Error(`No grammar found for path: ${editor.getPath()}`)
}
const currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
}
async subscribeToSettingsForEditorScope (editor) {
await this.initialPackageActivationPromise
const scopeDescriptor = editor.getRootScopeDescriptor()
const scopeChain = scopeDescriptor.getScopeChain()
editor.update(this.textEditorParamsForScope(scopeDescriptor))
if (!this.scopesWithConfigSubscriptions.has(scopeChain)) {
this.scopesWithConfigSubscriptions.add(scopeChain)
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
this.subscriptions.add(
this.config.onDidChange(settingKey, configOptions, ({newValue}) => {
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.update({[paramName]: newValue})
}
})
})
)
}
const updateTabTypes = () => {
const tabType = this.config.get('editor.tabType', configOptions)
const softTabs = this.config.get('editor.softTabs', configOptions)
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.setSoftTabs(shouldEditorUseSoftTabs(editor, tabType, softTabs))
}
})
}
this.subscriptions.add(
this.config.onDidChange('editor.tabType', configOptions, updateTabTypes),
this.config.onDidChange('editor.softTabs', configOptions, updateTabTypes)
)
}
}
textEditorParamsForScope (scopeDescriptor) {
const result = {}
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
result[paramName] = this.config.get(settingKey, configOptions)
}
return result
}
}
function shouldEditorUseSoftTabs (editor, tabType, softTabs) {
switch (tabType) {
case 'hard':
return false
case 'soft':
return true
case 'auto':
switch (editor.usesSoftTabs()) {
case true:
return true
case false:
return false
default:
return softTabs
}
}
}
function noop () {}
class ScopedSettingsDelegate {
constructor (config) {
this.config = config
}
getNonWordCharacters (scope) {
return this.config.get('editor.nonWordCharacters', {scope: scope})
}
getIncreaseIndentPattern (scope) {
return this.config.get('editor.increaseIndentPattern', {scope: scope})
}
getDecreaseIndentPattern (scope) {
return this.config.get('editor.decreaseIndentPattern', {scope: scope})
}
getDecreaseNextIndentPattern (scope) {
return this.config.get('editor.decreaseNextIndentPattern', {scope: scope})
}
getFoldEndPattern (scope) {
return this.config.get('editor.foldEndPattern', {scope: scope})
}
getCommentStrings (scope) {
const commentStartEntries = this.config.getAll('editor.commentStart', {scope})
const commentEndEntries = this.config.getAll('editor.commentEnd', {scope})
const commentStartEntry = commentStartEntries[0]
const commentEndEntry = commentEndEntries.find((entry) => {
return entry.scopeSelector === commentStartEntry.scopeSelector
})
return {
commentStartString: commentStartEntry && commentStartEntry.value,
commentEndString: commentEndEntry && commentEndEntry.value
}
}
}
TextEditorRegistry.ScopedSettingsDelegate = ScopedSettingsDelegate
| TextEditorRegistry | identifier_name |
text-editor-registry.js | /** @babel */
import {Emitter, Disposable, CompositeDisposable} from 'event-kit'
import {Point, Range} from 'text-buffer'
import TextEditor from './text-editor'
import ScopeDescriptor from './scope-descriptor'
const EDITOR_PARAMS_BY_SETTING_KEY = [
['core.fileEncoding', 'encoding'],
['editor.atomicSoftTabs', 'atomicSoftTabs'],
['editor.showInvisibles', 'showInvisibles'],
['editor.tabLength', 'tabLength'],
['editor.invisibles', 'invisibles'],
['editor.showCursorOnSelection', 'showCursorOnSelection'],
['editor.showIndentGuide', 'showIndentGuide'],
['editor.showLineNumbers', 'showLineNumbers'],
['editor.softWrap', 'softWrapped'],
['editor.softWrapHangingIndent', 'softWrapHangingIndentLength'],
['editor.softWrapAtPreferredLineLength', 'softWrapAtPreferredLineLength'],
['editor.preferredLineLength', 'preferredLineLength'],
['editor.maxScreenLineLength', 'maxScreenLineLength'],
['editor.autoIndent', 'autoIndent'],
['editor.autoIndentOnPaste', 'autoIndentOnPaste'],
['editor.scrollPastEnd', 'scrollPastEnd'],
['editor.undoGroupingInterval', 'undoGroupingInterval'],
['editor.nonWordCharacters', 'nonWordCharacters'],
['editor.scrollSensitivity', 'scrollSensitivity']
]
const GRAMMAR_SELECTION_RANGE = Range(Point.ZERO, Point(10, 0)).freeze()
// Experimental: This global registry tracks registered `TextEditors`.
//
// If you want to add functionality to a wider set of text editors than just
// those appearing within workspace panes, use `atom.textEditors.observe` to
// invoke a callback for all current and future registered text editors.
//
// If you want packages to be able to add functionality to your non-pane text
// editors (such as a search field in a custom user interface element), register
// them for observation via `atom.textEditors.add`. **Important:** When you're
// done using your editor, be sure to call `dispose` on the returned disposable
// to avoid leaking editors.
export default class TextEditorRegistry {
constructor ({config, grammarRegistry, assert, packageManager}) {
this.assert = assert
this.config = config
this.grammarRegistry = grammarRegistry
this.scopedSettingsDelegate = new ScopedSettingsDelegate(config)
this.grammarAddedOrUpdated = this.grammarAddedOrUpdated.bind(this)
this.clear()
this.initialPackageActivationPromise = new Promise((resolve) => {
// TODO: Remove this usage of a private property of PackageManager.
// Should PackageManager just expose a promise-based API like this?
if (packageManager.deferredActivationHooks) {
packageManager.onDidActivateInitialPackages(resolve)
} else {
resolve()
}
})
}
deserialize (state) {
this.editorGrammarOverrides = state.editorGrammarOverrides
}
serialize () {
return {
editorGrammarOverrides: Object.assign({}, this.editorGrammarOverrides)
}
}
clear () {
if (this.subscriptions) {
this.subscriptions.dispose()
}
this.subscriptions = new CompositeDisposable()
this.editors = new Set()
this.emitter = new Emitter()
this.scopesWithConfigSubscriptions = new Set()
this.editorsWithMaintainedConfig = new Set()
this.editorsWithMaintainedGrammar = new Set()
this.editorGrammarOverrides = {}
this.editorGrammarScores = new WeakMap()
this.subscriptions.add(
this.grammarRegistry.onDidAddGrammar(this.grammarAddedOrUpdated),
this.grammarRegistry.onDidUpdateGrammar(this.grammarAddedOrUpdated)
)
}
destroy () {
this.subscriptions.dispose()
this.editorsWithMaintainedConfig = null
}
// Register a `TextEditor`.
//
// * `editor` The editor to register.
//
// Returns a {Disposable} on which `.dispose()` can be called to remove the
// added editor. To avoid any memory leaks this should be called when the
// editor is destroyed.
add (editor) {
this.editors.add(editor)
editor.registered = true
this.emitter.emit('did-add-editor', editor)
return new Disposable(() => this.remove(editor))
}
build (params) {
params = Object.assign({assert: this.assert}, params)
let scope = null
if (params.buffer) {
const filePath = params.buffer.getPath()
const headContent = params.buffer.getTextInRange(GRAMMAR_SELECTION_RANGE)
params.grammar = this.grammarRegistry.selectGrammar(filePath, headContent)
scope = new ScopeDescriptor({scopes: [params.grammar.scopeName]})
}
Object.assign(params, this.textEditorParamsForScope(scope))
return new TextEditor(params)
}
// Remove a `TextEditor`.
//
// * `editor` The editor to remove.
//
// Returns a {Boolean} indicating whether the editor was successfully removed.
remove (editor) {
var removed = this.editors.delete(editor)
editor.registered = false
return removed
}
// Invoke the given callback with all the current and future registered
// `TextEditors`.
//
// * `callback` {Function} to be called with current and future text editors.
//
// Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
observe (callback) {
this.editors.forEach(callback)
return this.emitter.on('did-add-editor', callback)
}
// Keep a {TextEditor}'s configuration in sync with Atom's settings.
//
// * `editor` The editor whose configuration will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// configuration.
maintainConfig (editor) {
if (this.editorsWithMaintainedConfig.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedConfig.add(editor)
editor.setScopedSettingsDelegate(this.scopedSettingsDelegate)
this.subscribeToSettingsForEditorScope(editor)
const grammarChangeSubscription = editor.onDidChangeGrammar(() => {
this.subscribeToSettingsForEditorScope(editor)
})
this.subscriptions.add(grammarChangeSubscription)
const updateTabTypes = () => {
const configOptions = {scope: editor.getRootScopeDescriptor()}
editor.setSoftTabs(shouldEditorUseSoftTabs(
editor,
this.config.get('editor.tabType', configOptions),
this.config.get('editor.softTabs', configOptions)
))
}
updateTabTypes()
const tokenizeSubscription = editor.onDidTokenize(updateTabTypes)
this.subscriptions.add(tokenizeSubscription)
return new Disposable(() => {
this.editorsWithMaintainedConfig.delete(editor)
editor.setScopedSettingsDelegate(null)
tokenizeSubscription.dispose()
grammarChangeSubscription.dispose()
this.subscriptions.remove(grammarChangeSubscription)
this.subscriptions.remove(tokenizeSubscription)
})
}
// Set a {TextEditor}'s grammar based on its path and content, and continue
// to update its grammar as grammars are added or updated, or the editor's
// file path changes.
//
// * `editor` The editor whose grammar will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// grammar.
maintainGrammar (editor) {
if (this.editorsWithMaintainedGrammar.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedGrammar.add(editor)
const buffer = editor.getBuffer()
for (let existingEditor of this.editorsWithMaintainedGrammar) {
if (existingEditor.getBuffer() === buffer) {
const existingOverride = this.editorGrammarOverrides[existingEditor.id]
if (existingOverride) {
this.editorGrammarOverrides[editor.id] = existingOverride
}
break
}
}
this.selectGrammarForEditor(editor)
const pathChangeSubscription = editor.onDidChangePath(() => {
this.editorGrammarScores.delete(editor)
this.selectGrammarForEditor(editor)
})
this.subscriptions.add(pathChangeSubscription)
return new Disposable(() => {
delete this.editorGrammarOverrides[editor.id]
this.editorsWithMaintainedGrammar.delete(editor)
this.subscriptions.remove(pathChangeSubscription)
pathChangeSubscription.dispose()
})
}
// Force a {TextEditor} to use a different grammar than the one that would
// otherwise be selected for it.
//
// * `editor` The editor whose gramamr will be set.
// * `scopeName` The {String} root scope name for the desired {Grammar}.
setGrammarOverride (editor, scopeName) {
this.editorGrammarOverrides[editor.id] = scopeName
this.editorGrammarScores.delete(editor)
editor.setGrammar(this.grammarRegistry.grammarForScopeName(scopeName))
}
// Retrieve the grammar scope name that has been set as a grammar override
// for the given {TextEditor}.
//
// * `editor` The editor.
//
// Returns a {String} scope name, or `null` if no override has been set
// for the given editor.
getGrammarOverride (editor) {
return this.editorGrammarOverrides[editor.id]
}
// Remove any grammar override that has been set for the given {TextEditor}.
//
// * `editor` The editor.
clearGrammarOverride (editor) {
delete this.editorGrammarOverrides[editor.id]
this.selectGrammarForEditor(editor)
}
// Private
grammarAddedOrUpdated (grammar) {
this.editorsWithMaintainedGrammar.forEach((editor) => {
if (grammar.injectionSelector) {
if (editor.tokenizedBuffer.hasTokenForSelector(grammar.injectionSelector)) {
editor.tokenizedBuffer.retokenizeLines()
}
return
}
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
if (grammar.scopeName === grammarOverride) {
editor.setGrammar(grammar)
}
} else {
const score = this.grammarRegistry.getGrammarScore(
grammar,
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
let currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
}
})
}
selectGrammarForEditor (editor) {
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
const grammar = this.grammarRegistry.grammarForScopeName(grammarOverride)
editor.setGrammar(grammar)
return
}
const {grammar, score} = this.grammarRegistry.selectGrammarWithScore(
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
if (!grammar) {
throw new Error(`No grammar found for path: ${editor.getPath()}`)
}
const currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
}
async subscribeToSettingsForEditorScope (editor) {
await this.initialPackageActivationPromise
const scopeDescriptor = editor.getRootScopeDescriptor()
const scopeChain = scopeDescriptor.getScopeChain()
editor.update(this.textEditorParamsForScope(scopeDescriptor))
if (!this.scopesWithConfigSubscriptions.has(scopeChain)) {
this.scopesWithConfigSubscriptions.add(scopeChain)
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
this.subscriptions.add(
this.config.onDidChange(settingKey, configOptions, ({newValue}) => {
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.update({[paramName]: newValue})
}
})
})
)
}
const updateTabTypes = () => {
const tabType = this.config.get('editor.tabType', configOptions)
const softTabs = this.config.get('editor.softTabs', configOptions)
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.setSoftTabs(shouldEditorUseSoftTabs(editor, tabType, softTabs))
}
})
}
this.subscriptions.add(
this.config.onDidChange('editor.tabType', configOptions, updateTabTypes),
this.config.onDidChange('editor.softTabs', configOptions, updateTabTypes)
)
}
}
textEditorParamsForScope (scopeDescriptor) {
const result = {}
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
result[paramName] = this.config.get(settingKey, configOptions)
}
return result
}
}
function shouldEditorUseSoftTabs (editor, tabType, softTabs) {
switch (tabType) {
case 'hard':
return false
case 'soft':
return true
case 'auto':
switch (editor.usesSoftTabs()) {
case true:
return true
case false:
return false
default:
return softTabs
}
}
}
function noop () {}
class ScopedSettingsDelegate {
constructor (config) {
this.config = config
}
getNonWordCharacters (scope) |
getIncreaseIndentPattern (scope) {
return this.config.get('editor.increaseIndentPattern', {scope: scope})
}
getDecreaseIndentPattern (scope) {
return this.config.get('editor.decreaseIndentPattern', {scope: scope})
}
getDecreaseNextIndentPattern (scope) {
return this.config.get('editor.decreaseNextIndentPattern', {scope: scope})
}
getFoldEndPattern (scope) {
return this.config.get('editor.foldEndPattern', {scope: scope})
}
getCommentStrings (scope) {
const commentStartEntries = this.config.getAll('editor.commentStart', {scope})
const commentEndEntries = this.config.getAll('editor.commentEnd', {scope})
const commentStartEntry = commentStartEntries[0]
const commentEndEntry = commentEndEntries.find((entry) => {
return entry.scopeSelector === commentStartEntry.scopeSelector
})
return {
commentStartString: commentStartEntry && commentStartEntry.value,
commentEndString: commentEndEntry && commentEndEntry.value
}
}
}
TextEditorRegistry.ScopedSettingsDelegate = ScopedSettingsDelegate
| {
return this.config.get('editor.nonWordCharacters', {scope: scope})
} | identifier_body |
text-editor-registry.js | /** @babel */
import {Emitter, Disposable, CompositeDisposable} from 'event-kit'
import {Point, Range} from 'text-buffer'
import TextEditor from './text-editor'
import ScopeDescriptor from './scope-descriptor'
const EDITOR_PARAMS_BY_SETTING_KEY = [
['core.fileEncoding', 'encoding'],
['editor.atomicSoftTabs', 'atomicSoftTabs'],
['editor.showInvisibles', 'showInvisibles'],
['editor.tabLength', 'tabLength'],
['editor.invisibles', 'invisibles'],
['editor.showCursorOnSelection', 'showCursorOnSelection'],
['editor.showIndentGuide', 'showIndentGuide'],
['editor.showLineNumbers', 'showLineNumbers'],
['editor.softWrap', 'softWrapped'],
['editor.softWrapHangingIndent', 'softWrapHangingIndentLength'],
['editor.softWrapAtPreferredLineLength', 'softWrapAtPreferredLineLength'],
['editor.preferredLineLength', 'preferredLineLength'],
['editor.maxScreenLineLength', 'maxScreenLineLength'],
['editor.autoIndent', 'autoIndent'],
['editor.autoIndentOnPaste', 'autoIndentOnPaste'],
['editor.scrollPastEnd', 'scrollPastEnd'],
['editor.undoGroupingInterval', 'undoGroupingInterval'],
['editor.nonWordCharacters', 'nonWordCharacters'],
['editor.scrollSensitivity', 'scrollSensitivity']
]
const GRAMMAR_SELECTION_RANGE = Range(Point.ZERO, Point(10, 0)).freeze()
// Experimental: This global registry tracks registered `TextEditors`.
//
// If you want to add functionality to a wider set of text editors than just
// those appearing within workspace panes, use `atom.textEditors.observe` to
// invoke a callback for all current and future registered text editors.
//
// If you want packages to be able to add functionality to your non-pane text
// editors (such as a search field in a custom user interface element), register
// them for observation via `atom.textEditors.add`. **Important:** When you're
// done using your editor, be sure to call `dispose` on the returned disposable
// to avoid leaking editors.
export default class TextEditorRegistry {
constructor ({config, grammarRegistry, assert, packageManager}) {
this.assert = assert
this.config = config
this.grammarRegistry = grammarRegistry
this.scopedSettingsDelegate = new ScopedSettingsDelegate(config)
this.grammarAddedOrUpdated = this.grammarAddedOrUpdated.bind(this)
this.clear()
this.initialPackageActivationPromise = new Promise((resolve) => {
// TODO: Remove this usage of a private property of PackageManager.
// Should PackageManager just expose a promise-based API like this?
if (packageManager.deferredActivationHooks) {
packageManager.onDidActivateInitialPackages(resolve)
} else {
resolve()
}
})
}
deserialize (state) {
this.editorGrammarOverrides = state.editorGrammarOverrides
}
serialize () {
return {
editorGrammarOverrides: Object.assign({}, this.editorGrammarOverrides)
}
}
clear () {
if (this.subscriptions) {
this.subscriptions.dispose()
}
this.subscriptions = new CompositeDisposable()
this.editors = new Set()
this.emitter = new Emitter()
this.scopesWithConfigSubscriptions = new Set()
this.editorsWithMaintainedConfig = new Set()
this.editorsWithMaintainedGrammar = new Set()
this.editorGrammarOverrides = {}
this.editorGrammarScores = new WeakMap()
this.subscriptions.add(
this.grammarRegistry.onDidAddGrammar(this.grammarAddedOrUpdated),
this.grammarRegistry.onDidUpdateGrammar(this.grammarAddedOrUpdated)
)
}
destroy () {
this.subscriptions.dispose()
this.editorsWithMaintainedConfig = null
}
// Register a `TextEditor`.
//
// * `editor` The editor to register.
//
// Returns a {Disposable} on which `.dispose()` can be called to remove the
// added editor. To avoid any memory leaks this should be called when the
// editor is destroyed.
add (editor) {
this.editors.add(editor)
editor.registered = true
this.emitter.emit('did-add-editor', editor)
return new Disposable(() => this.remove(editor))
}
build (params) {
params = Object.assign({assert: this.assert}, params)
let scope = null
if (params.buffer) {
const filePath = params.buffer.getPath()
const headContent = params.buffer.getTextInRange(GRAMMAR_SELECTION_RANGE)
params.grammar = this.grammarRegistry.selectGrammar(filePath, headContent)
scope = new ScopeDescriptor({scopes: [params.grammar.scopeName]})
}
Object.assign(params, this.textEditorParamsForScope(scope))
return new TextEditor(params)
}
// Remove a `TextEditor`.
//
// * `editor` The editor to remove.
//
// Returns a {Boolean} indicating whether the editor was successfully removed.
remove (editor) {
var removed = this.editors.delete(editor)
editor.registered = false
return removed
}
// Invoke the given callback with all the current and future registered
// `TextEditors`.
//
// * `callback` {Function} to be called with current and future text editors.
//
// Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
observe (callback) {
this.editors.forEach(callback)
return this.emitter.on('did-add-editor', callback)
}
// Keep a {TextEditor}'s configuration in sync with Atom's settings.
//
// * `editor` The editor whose configuration will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// configuration.
maintainConfig (editor) {
if (this.editorsWithMaintainedConfig.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedConfig.add(editor)
editor.setScopedSettingsDelegate(this.scopedSettingsDelegate)
this.subscribeToSettingsForEditorScope(editor)
const grammarChangeSubscription = editor.onDidChangeGrammar(() => {
this.subscribeToSettingsForEditorScope(editor)
})
this.subscriptions.add(grammarChangeSubscription)
const updateTabTypes = () => {
const configOptions = {scope: editor.getRootScopeDescriptor()}
editor.setSoftTabs(shouldEditorUseSoftTabs(
editor,
this.config.get('editor.tabType', configOptions),
this.config.get('editor.softTabs', configOptions)
))
}
updateTabTypes()
const tokenizeSubscription = editor.onDidTokenize(updateTabTypes)
this.subscriptions.add(tokenizeSubscription)
return new Disposable(() => {
this.editorsWithMaintainedConfig.delete(editor)
editor.setScopedSettingsDelegate(null)
tokenizeSubscription.dispose()
grammarChangeSubscription.dispose()
this.subscriptions.remove(grammarChangeSubscription)
this.subscriptions.remove(tokenizeSubscription)
})
}
// Set a {TextEditor}'s grammar based on its path and content, and continue
// to update its grammar as grammars are added or updated, or the editor's
// file path changes.
//
// * `editor` The editor whose grammar will be maintained.
//
// Returns a {Disposable} that can be used to stop updating the editor's
// grammar.
maintainGrammar (editor) {
if (this.editorsWithMaintainedGrammar.has(editor)) {
return new Disposable(noop)
}
this.editorsWithMaintainedGrammar.add(editor)
const buffer = editor.getBuffer()
for (let existingEditor of this.editorsWithMaintainedGrammar) {
if (existingEditor.getBuffer() === buffer) {
const existingOverride = this.editorGrammarOverrides[existingEditor.id]
if (existingOverride) {
this.editorGrammarOverrides[editor.id] = existingOverride
}
break
}
}
this.selectGrammarForEditor(editor)
const pathChangeSubscription = editor.onDidChangePath(() => {
this.editorGrammarScores.delete(editor)
this.selectGrammarForEditor(editor)
})
this.subscriptions.add(pathChangeSubscription)
return new Disposable(() => {
delete this.editorGrammarOverrides[editor.id]
this.editorsWithMaintainedGrammar.delete(editor)
this.subscriptions.remove(pathChangeSubscription)
pathChangeSubscription.dispose()
})
}
// Force a {TextEditor} to use a different grammar than the one that would
// otherwise be selected for it.
// | setGrammarOverride (editor, scopeName) {
this.editorGrammarOverrides[editor.id] = scopeName
this.editorGrammarScores.delete(editor)
editor.setGrammar(this.grammarRegistry.grammarForScopeName(scopeName))
}
// Retrieve the grammar scope name that has been set as a grammar override
// for the given {TextEditor}.
//
// * `editor` The editor.
//
// Returns a {String} scope name, or `null` if no override has been set
// for the given editor.
getGrammarOverride (editor) {
return this.editorGrammarOverrides[editor.id]
}
// Remove any grammar override that has been set for the given {TextEditor}.
//
// * `editor` The editor.
clearGrammarOverride (editor) {
delete this.editorGrammarOverrides[editor.id]
this.selectGrammarForEditor(editor)
}
// Private
grammarAddedOrUpdated (grammar) {
this.editorsWithMaintainedGrammar.forEach((editor) => {
if (grammar.injectionSelector) {
if (editor.tokenizedBuffer.hasTokenForSelector(grammar.injectionSelector)) {
editor.tokenizedBuffer.retokenizeLines()
}
return
}
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
if (grammar.scopeName === grammarOverride) {
editor.setGrammar(grammar)
}
} else {
const score = this.grammarRegistry.getGrammarScore(
grammar,
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
let currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
}
})
}
selectGrammarForEditor (editor) {
const grammarOverride = this.editorGrammarOverrides[editor.id]
if (grammarOverride) {
const grammar = this.grammarRegistry.grammarForScopeName(grammarOverride)
editor.setGrammar(grammar)
return
}
const {grammar, score} = this.grammarRegistry.selectGrammarWithScore(
editor.getPath(),
editor.getTextInBufferRange(GRAMMAR_SELECTION_RANGE)
)
if (!grammar) {
throw new Error(`No grammar found for path: ${editor.getPath()}`)
}
const currentScore = this.editorGrammarScores.get(editor)
if (currentScore == null || score > currentScore) {
editor.setGrammar(grammar)
this.editorGrammarScores.set(editor, score)
}
}
async subscribeToSettingsForEditorScope (editor) {
await this.initialPackageActivationPromise
const scopeDescriptor = editor.getRootScopeDescriptor()
const scopeChain = scopeDescriptor.getScopeChain()
editor.update(this.textEditorParamsForScope(scopeDescriptor))
if (!this.scopesWithConfigSubscriptions.has(scopeChain)) {
this.scopesWithConfigSubscriptions.add(scopeChain)
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
this.subscriptions.add(
this.config.onDidChange(settingKey, configOptions, ({newValue}) => {
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.update({[paramName]: newValue})
}
})
})
)
}
const updateTabTypes = () => {
const tabType = this.config.get('editor.tabType', configOptions)
const softTabs = this.config.get('editor.softTabs', configOptions)
this.editorsWithMaintainedConfig.forEach((editor) => {
if (editor.getRootScopeDescriptor().isEqual(scopeDescriptor)) {
editor.setSoftTabs(shouldEditorUseSoftTabs(editor, tabType, softTabs))
}
})
}
this.subscriptions.add(
this.config.onDidChange('editor.tabType', configOptions, updateTabTypes),
this.config.onDidChange('editor.softTabs', configOptions, updateTabTypes)
)
}
}
textEditorParamsForScope (scopeDescriptor) {
const result = {}
const configOptions = {scope: scopeDescriptor}
for (const [settingKey, paramName] of EDITOR_PARAMS_BY_SETTING_KEY) {
result[paramName] = this.config.get(settingKey, configOptions)
}
return result
}
}
function shouldEditorUseSoftTabs (editor, tabType, softTabs) {
switch (tabType) {
case 'hard':
return false
case 'soft':
return true
case 'auto':
switch (editor.usesSoftTabs()) {
case true:
return true
case false:
return false
default:
return softTabs
}
}
}
function noop () {}
class ScopedSettingsDelegate {
constructor (config) {
this.config = config
}
getNonWordCharacters (scope) {
return this.config.get('editor.nonWordCharacters', {scope: scope})
}
getIncreaseIndentPattern (scope) {
return this.config.get('editor.increaseIndentPattern', {scope: scope})
}
getDecreaseIndentPattern (scope) {
return this.config.get('editor.decreaseIndentPattern', {scope: scope})
}
getDecreaseNextIndentPattern (scope) {
return this.config.get('editor.decreaseNextIndentPattern', {scope: scope})
}
getFoldEndPattern (scope) {
return this.config.get('editor.foldEndPattern', {scope: scope})
}
getCommentStrings (scope) {
const commentStartEntries = this.config.getAll('editor.commentStart', {scope})
const commentEndEntries = this.config.getAll('editor.commentEnd', {scope})
const commentStartEntry = commentStartEntries[0]
const commentEndEntry = commentEndEntries.find((entry) => {
return entry.scopeSelector === commentStartEntry.scopeSelector
})
return {
commentStartString: commentStartEntry && commentStartEntry.value,
commentEndString: commentEndEntry && commentEndEntry.value
}
}
}
TextEditorRegistry.ScopedSettingsDelegate = ScopedSettingsDelegate | // * `editor` The editor whose gramamr will be set.
// * `scopeName` The {String} root scope name for the desired {Grammar}. | random_line_split |
SearchTextView.js | // -------------------------------------------------------------------------- \\
// File: SearchTextView.js \\
// Module: ControlViews \\
// Requires: TextView.js \\
// Author: Neil Jenkins \\
// License: © 2010-2015 FastMail Pty Ltd. MIT Licensed. \\
// -------------------------------------------------------------------------- \\
"use strict";
( function (NS) {
var SearchTextView = NS.Class({
Extends: NS.TextView,
type: 'v-SearchText',
placeholder: 'Search',
target: null, |
draw: function (layer, Element, el) {
var children =
SearchTextView.parent.draw.call(this, layer, Element, el);
children.push(
el('i.icon.v-icon-search'),
new NS.ButtonView({
type: NS.bind(this, 'value', function (value) {
return value ?
'v-SearchText-reset v-Button--iconOnly' : 'u-hidden';
}),
icon: 'v-icon-clear',
positioning: 'absolute',
label: NS.loc('Clear Search'),
target: this,
method: 'reset'
})
);
return children;
},
reset: function () {
this.set('value', '')
.blur();
this.fire('search:reset');
},
activate: function () {
if (!this.get('isDisabled')) {
var target = this.get('target') || this,
action;
if (action = this.get('action')) {
target.fire(action, {originView: this});
} else if (action = this.get('method')) {
target[action](this);
}
this.fire('search:activate');
}
}.observes('value')
});
NS.SearchTextView = SearchTextView;
}(O) ); |
method: null, | random_line_split |
SearchTextView.js | // -------------------------------------------------------------------------- \\
// File: SearchTextView.js \\
// Module: ControlViews \\
// Requires: TextView.js \\
// Author: Neil Jenkins \\
// License: © 2010-2015 FastMail Pty Ltd. MIT Licensed. \\
// -------------------------------------------------------------------------- \\
"use strict";
( function (NS) {
var SearchTextView = NS.Class({
Extends: NS.TextView,
type: 'v-SearchText',
placeholder: 'Search',
target: null,
method: null,
draw: function (layer, Element, el) {
var children =
SearchTextView.parent.draw.call(this, layer, Element, el);
children.push(
el('i.icon.v-icon-search'),
new NS.ButtonView({
type: NS.bind(this, 'value', function (value) {
return value ?
'v-SearchText-reset v-Button--iconOnly' : 'u-hidden';
}),
icon: 'v-icon-clear',
positioning: 'absolute',
label: NS.loc('Clear Search'),
target: this,
method: 'reset'
})
);
return children;
},
reset: function () {
this.set('value', '')
.blur();
this.fire('search:reset');
},
activate: function () {
if (!this.get('isDisabled')) {
var target = this.get('target') || this,
action;
if (action = this.get('action')) { | else if (action = this.get('method')) {
target[action](this);
}
this.fire('search:activate');
}
}.observes('value')
});
NS.SearchTextView = SearchTextView;
}(O) );
|
target.fire(action, {originView: this});
} | conditional_block |
html_builder.py | # -*- coding: utf-8 -*-
"""
Module for the generation of docx format documents.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import jinja2 |
# -----------------------------------------------------------------------------
def build(_, section_list, filepath):
"""
Build and save the specified document.
"""
environment = jinja2.Environment(
loader = jinja2.PackageLoader(
'da.report', 'templates'),
trim_blocks = True,
lstrip_blocks = True)
template = environment.get_template('engineering_document.template.html')
# Filter out empty sections
filtered_list = []
for section in section_list:
if section['level'] != 1 and len(section['para']) == 0:
continue
filtered_list.append(section)
html = template.render( # pylint: disable=E1101
section_list = filtered_list)
with open(filepath, 'wt') as file:
file.write(html)
# _add_title_section(document, doc_data['_metadata'])
# _add_toc_section(document)
# for item in sorted(_generate_content_items(doc_data),
# key = _doc_data_sortkey):
# if item['section_level'] == 1:
# _add_content_section(document)
# if 0 < len(item['paragraph_list']):
# _add_content_para(document,
# level = item['section_level'],
# title = item['section_title'],
# type = item['section_type'],
# content = item['paragraph_list'])
# else:
# print('Skipping section: ' + item['section_title'])
# # Save the document.
# da.util.ensure_dir_exists(os.path.dirname(filepath))
# document.save(filepath) | random_line_split | |
html_builder.py | # -*- coding: utf-8 -*-
"""
Module for the generation of docx format documents.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import jinja2
# -----------------------------------------------------------------------------
def | (_, section_list, filepath):
"""
Build and save the specified document.
"""
environment = jinja2.Environment(
loader = jinja2.PackageLoader(
'da.report', 'templates'),
trim_blocks = True,
lstrip_blocks = True)
template = environment.get_template('engineering_document.template.html')
# Filter out empty sections
filtered_list = []
for section in section_list:
if section['level'] != 1 and len(section['para']) == 0:
continue
filtered_list.append(section)
html = template.render( # pylint: disable=E1101
section_list = filtered_list)
with open(filepath, 'wt') as file:
file.write(html)
# _add_title_section(document, doc_data['_metadata'])
# _add_toc_section(document)
# for item in sorted(_generate_content_items(doc_data),
# key = _doc_data_sortkey):
# if item['section_level'] == 1:
# _add_content_section(document)
# if 0 < len(item['paragraph_list']):
# _add_content_para(document,
# level = item['section_level'],
# title = item['section_title'],
# type = item['section_type'],
# content = item['paragraph_list'])
# else:
# print('Skipping section: ' + item['section_title'])
# # Save the document.
# da.util.ensure_dir_exists(os.path.dirname(filepath))
# document.save(filepath)
| build | identifier_name |
html_builder.py | # -*- coding: utf-8 -*-
"""
Module for the generation of docx format documents.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import jinja2
# -----------------------------------------------------------------------------
def build(_, section_list, filepath):
|
# _add_title_section(document, doc_data['_metadata'])
# _add_toc_section(document)
# for item in sorted(_generate_content_items(doc_data),
# key = _doc_data_sortkey):
# if item['section_level'] == 1:
# _add_content_section(document)
# if 0 < len(item['paragraph_list']):
# _add_content_para(document,
# level = item['section_level'],
# title = item['section_title'],
# type = item['section_type'],
# content = item['paragraph_list'])
# else:
# print('Skipping section: ' + item['section_title'])
# # Save the document.
# da.util.ensure_dir_exists(os.path.dirname(filepath))
# document.save(filepath)
| """
Build and save the specified document.
"""
environment = jinja2.Environment(
loader = jinja2.PackageLoader(
'da.report', 'templates'),
trim_blocks = True,
lstrip_blocks = True)
template = environment.get_template('engineering_document.template.html')
# Filter out empty sections
filtered_list = []
for section in section_list:
if section['level'] != 1 and len(section['para']) == 0:
continue
filtered_list.append(section)
html = template.render( # pylint: disable=E1101
section_list = filtered_list)
with open(filepath, 'wt') as file:
file.write(html) | identifier_body |
html_builder.py | # -*- coding: utf-8 -*-
"""
Module for the generation of docx format documents.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import jinja2
# -----------------------------------------------------------------------------
def build(_, section_list, filepath):
"""
Build and save the specified document.
"""
environment = jinja2.Environment(
loader = jinja2.PackageLoader(
'da.report', 'templates'),
trim_blocks = True,
lstrip_blocks = True)
template = environment.get_template('engineering_document.template.html')
# Filter out empty sections
filtered_list = []
for section in section_list:
|
html = template.render( # pylint: disable=E1101
section_list = filtered_list)
with open(filepath, 'wt') as file:
file.write(html)
# _add_title_section(document, doc_data['_metadata'])
# _add_toc_section(document)
# for item in sorted(_generate_content_items(doc_data),
# key = _doc_data_sortkey):
# if item['section_level'] == 1:
# _add_content_section(document)
# if 0 < len(item['paragraph_list']):
# _add_content_para(document,
# level = item['section_level'],
# title = item['section_title'],
# type = item['section_type'],
# content = item['paragraph_list'])
# else:
# print('Skipping section: ' + item['section_title'])
# # Save the document.
# da.util.ensure_dir_exists(os.path.dirname(filepath))
# document.save(filepath)
| if section['level'] != 1 and len(section['para']) == 0:
continue
filtered_list.append(section) | conditional_block |
dellAnalyzer.ts | import fs from 'fs';
import cheerio from 'cheerio';
import { Analyzer } from './crowller';
interface Course {
title: string;
count: number;
}
interface CourseResult {
time: number;
data: Course[];
}
interface Content {
[propName: number]: Course[];
}
export default class DellAnalyzer implements Analyzer {
private static instance: DellAnalyzer;
static getInstance() {
if (!DellAnalyzer.instance) {
DellAnalyzer.instance = new DellAnalyzer();
}
return DellAnalyzer.instance;
}
private getCourseInfo(html: string) {
const $ = cheerio.load(html);
const courseItems = $('.course-item');
const courseInfos: Course[] = [];
courseItems.map((index, element) => {
const descs = $(element).find('.course-desc');
const title = descs.eq(0).text();
const count = parseInt(
descs
.eq(1)
.text()
.split(':')[1],
10
);
courseInfos.push({ title, count });
});
return {
time: new Date().getTime(),
data: courseInfos
};
}
private generateJsonContent(courseInfo: CourseResult, filePath: string) {
let fileContent: Content = {};
if (fs.existsSync(filePath)) {
fileContent = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
}
fileContent[courseInfo.time] = courseInfo.data;
return fileContent;
}
public analyze(html: string, filePath: string) {
const courseInfo = this.getCourseInfo(html);
const fileContent = this.generateJsonContent(courseInfo, filePath);
return JSON.stringify(fileContent);
}
private co | { }
}
| nstructor() | identifier_name |
dellAnalyzer.ts | import fs from 'fs';
import cheerio from 'cheerio'; | interface Course {
title: string;
count: number;
}
interface CourseResult {
time: number;
data: Course[];
}
interface Content {
[propName: number]: Course[];
}
export default class DellAnalyzer implements Analyzer {
private static instance: DellAnalyzer;
static getInstance() {
if (!DellAnalyzer.instance) {
DellAnalyzer.instance = new DellAnalyzer();
}
return DellAnalyzer.instance;
}
private getCourseInfo(html: string) {
const $ = cheerio.load(html);
const courseItems = $('.course-item');
const courseInfos: Course[] = [];
courseItems.map((index, element) => {
const descs = $(element).find('.course-desc');
const title = descs.eq(0).text();
const count = parseInt(
descs
.eq(1)
.text()
.split(':')[1],
10
);
courseInfos.push({ title, count });
});
return {
time: new Date().getTime(),
data: courseInfos
};
}
private generateJsonContent(courseInfo: CourseResult, filePath: string) {
let fileContent: Content = {};
if (fs.existsSync(filePath)) {
fileContent = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
}
fileContent[courseInfo.time] = courseInfo.data;
return fileContent;
}
public analyze(html: string, filePath: string) {
const courseInfo = this.getCourseInfo(html);
const fileContent = this.generateJsonContent(courseInfo, filePath);
return JSON.stringify(fileContent);
}
private constructor() { }
} | import { Analyzer } from './crowller';
| random_line_split |
dellAnalyzer.ts | import fs from 'fs';
import cheerio from 'cheerio';
import { Analyzer } from './crowller';
interface Course {
title: string;
count: number;
}
interface CourseResult {
time: number;
data: Course[];
}
interface Content {
[propName: number]: Course[];
}
export default class DellAnalyzer implements Analyzer {
private static instance: DellAnalyzer;
static getInstance() {
if (!DellAnalyzer.instance) {
DellAnalyzer.instance = new DellAnalyzer();
}
return DellAnalyzer.instance;
}
private getCourseInfo(html: string) | private generateJsonContent(courseInfo: CourseResult, filePath: string) {
let fileContent: Content = {};
if (fs.existsSync(filePath)) {
fileContent = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
}
fileContent[courseInfo.time] = courseInfo.data;
return fileContent;
}
public analyze(html: string, filePath: string) {
const courseInfo = this.getCourseInfo(html);
const fileContent = this.generateJsonContent(courseInfo, filePath);
return JSON.stringify(fileContent);
}
private constructor() { }
}
| {
const $ = cheerio.load(html);
const courseItems = $('.course-item');
const courseInfos: Course[] = [];
courseItems.map((index, element) => {
const descs = $(element).find('.course-desc');
const title = descs.eq(0).text();
const count = parseInt(
descs
.eq(1)
.text()
.split(':')[1],
10
);
courseInfos.push({ title, count });
});
return {
time: new Date().getTime(),
data: courseInfos
};
}
| identifier_body |
dellAnalyzer.ts | import fs from 'fs';
import cheerio from 'cheerio';
import { Analyzer } from './crowller';
interface Course {
title: string;
count: number;
}
interface CourseResult {
time: number;
data: Course[];
}
interface Content {
[propName: number]: Course[];
}
export default class DellAnalyzer implements Analyzer {
private static instance: DellAnalyzer;
static getInstance() {
if (!DellAnalyzer.instance) {
DellAnalyzer.instance = new DellAnalyzer();
}
return DellAnalyzer.instance;
}
private getCourseInfo(html: string) {
const $ = cheerio.load(html);
const courseItems = $('.course-item');
const courseInfos: Course[] = [];
courseItems.map((index, element) => {
const descs = $(element).find('.course-desc');
const title = descs.eq(0).text();
const count = parseInt(
descs
.eq(1)
.text()
.split(':')[1],
10
);
courseInfos.push({ title, count });
});
return {
time: new Date().getTime(),
data: courseInfos
};
}
private generateJsonContent(courseInfo: CourseResult, filePath: string) {
let fileContent: Content = {};
if (fs.existsSync(filePath)) {
| fileContent[courseInfo.time] = courseInfo.data;
return fileContent;
}
public analyze(html: string, filePath: string) {
const courseInfo = this.getCourseInfo(html);
const fileContent = this.generateJsonContent(courseInfo, filePath);
return JSON.stringify(fileContent);
}
private constructor() { }
}
| fileContent = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
}
| conditional_block |
labels.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {GithubConfig} from '../../../utils/config';
import {GithubClient} from '../../../utils/git/github';
import {TargetLabel} from '../config';
import {InvalidTargetBranchError, InvalidTargetLabelError} from '../target-label';
import {fetchActiveReleaseTrainBranches, getVersionOfBranch, GithubRepo, isReleaseTrainBranch, nextBranchName} from './branches';
import {assertActiveLtsBranch} from './lts-branch';
/**
* Gets a label configuration for the merge tooling that reflects the default Angular
* organization-wide labeling and branching semantics as outlined in the specification.
*
* https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU
*/
export async function getDefaultTargetLabelConfiguration(
api: GithubClient, github: GithubConfig, npmPackageName: string): Promise<TargetLabel[]> {
const repo: GithubRepo = {owner: github.owner, repo: github.name, api, npmPackageName};
const nextVersion = await getVersionOfBranch(repo, nextBranchName);
const hasNextMajorTrain = nextVersion.minor === 0;
const {latestVersionBranch, releaseCandidateBranch} =
await fetchActiveReleaseTrainBranches(repo, nextVersion);
return [
{
pattern: 'target: major',
branches: () => {
// If `next` is currently not designated to be a major version, we do not
// allow merging of PRs with `target: major`.
if (!hasNextMajorTrain) {
throw new InvalidTargetLabelError(
`Unable to merge pull request. The "${nextBranchName}" branch will be ` +
`released as a minor version.`);
}
return [nextBranchName];
},
},
{
pattern: 'target: minor',
// Changes labeled with `target: minor` are merged most commonly into the next branch
// (i.e. `master`). In rare cases of an exceptional minor version while being already
// on a major release train, this would need to be overridden manually.
// TODO: Consider handling this automatically by checking if the NPM version matches
// the last-minor. If not, then an exceptional minor might be in progress. See:
// https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU/edit#heading=h.h7o5pjq6yqd0
branches: [nextBranchName],
},
{
pattern: 'target: patch',
branches: githubTargetBranch => {
// If a PR is targeting the latest active version-branch through the Github UI,
// and is also labeled with `target: patch`, then we merge it directly into the
// branch without doing any cherry-picking. This is useful if a PR could not be
// applied cleanly, and a separate PR for the patch branch has been created.
if (githubTargetBranch === latestVersionBranch) {
return [latestVersionBranch];
}
// Otherwise, patch changes are always merged into the next and patch branch.
const branches = [nextBranchName, latestVersionBranch];
// Additionally, if there is a release-candidate/feature-freeze release-train
// currently active, also merge the PR into that version-branch.
if (releaseCandidateBranch !== null) |
return branches;
}
},
{
pattern: 'target: rc',
branches: githubTargetBranch => {
// The `target: rc` label cannot be applied if there is no active feature-freeze
// or release-candidate release train.
if (releaseCandidateBranch === null) {
throw new InvalidTargetLabelError(
`No active feature-freeze/release-candidate branch. ` +
`Unable to merge pull request using "target: rc" label.`);
}
// If the PR is targeting the active release-candidate/feature-freeze version branch
// directly through the Github UI and has the `target: rc` label applied, merge it
// only into the release candidate branch. This is useful if a PR did not apply cleanly
// into the release-candidate/feature-freeze branch, and a separate PR has been created.
if (githubTargetBranch === releaseCandidateBranch) {
return [releaseCandidateBranch];
}
// Otherwise, merge into the next and active release-candidate/feature-freeze branch.
return [nextBranchName, releaseCandidateBranch];
},
},
{
// LTS changes are rare enough that we won't worry about cherry-picking changes into all
// active LTS branches for PRs created against any other branch. Instead, PR authors need
// to manually create separate PRs for desired LTS branches. Additionally, active LT branches
// commonly diverge quickly. This makes cherry-picking not an option for LTS changes.
pattern: 'target: lts',
branches: async githubTargetBranch => {
if (!isReleaseTrainBranch(githubTargetBranch)) {
throw new InvalidTargetBranchError(
`PR cannot be merged as it does not target a long-term support ` +
`branch: "${githubTargetBranch}"`);
}
if (githubTargetBranch === latestVersionBranch) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into patch branch. ` +
`Consider changing the label to "target: patch" if this is intentional.`);
}
if (githubTargetBranch === releaseCandidateBranch && releaseCandidateBranch !== null) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into feature-freeze/release-candidate ` +
`branch. Consider changing the label to "target: rc" if this is intentional.`);
}
// Assert that the selected branch is an active LTS branch.
await assertActiveLtsBranch(repo, githubTargetBranch);
return [githubTargetBranch];
},
},
];
}
| {
branches.push(releaseCandidateBranch);
} | conditional_block |
labels.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {GithubConfig} from '../../../utils/config';
import {GithubClient} from '../../../utils/git/github';
import {TargetLabel} from '../config';
import {InvalidTargetBranchError, InvalidTargetLabelError} from '../target-label';
import {fetchActiveReleaseTrainBranches, getVersionOfBranch, GithubRepo, isReleaseTrainBranch, nextBranchName} from './branches';
import {assertActiveLtsBranch} from './lts-branch';
/**
* Gets a label configuration for the merge tooling that reflects the default Angular
* organization-wide labeling and branching semantics as outlined in the specification.
*
* https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU
*/
export async function getDefaultTargetLabelConfiguration(
api: GithubClient, github: GithubConfig, npmPackageName: string): Promise<TargetLabel[]> | {
const repo: GithubRepo = {owner: github.owner, repo: github.name, api, npmPackageName};
const nextVersion = await getVersionOfBranch(repo, nextBranchName);
const hasNextMajorTrain = nextVersion.minor === 0;
const {latestVersionBranch, releaseCandidateBranch} =
await fetchActiveReleaseTrainBranches(repo, nextVersion);
return [
{
pattern: 'target: major',
branches: () => {
// If `next` is currently not designated to be a major version, we do not
// allow merging of PRs with `target: major`.
if (!hasNextMajorTrain) {
throw new InvalidTargetLabelError(
`Unable to merge pull request. The "${nextBranchName}" branch will be ` +
`released as a minor version.`);
}
return [nextBranchName];
},
},
{
pattern: 'target: minor',
// Changes labeled with `target: minor` are merged most commonly into the next branch
// (i.e. `master`). In rare cases of an exceptional minor version while being already
// on a major release train, this would need to be overridden manually.
// TODO: Consider handling this automatically by checking if the NPM version matches
// the last-minor. If not, then an exceptional minor might be in progress. See:
// https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU/edit#heading=h.h7o5pjq6yqd0
branches: [nextBranchName],
},
{
pattern: 'target: patch',
branches: githubTargetBranch => {
// If a PR is targeting the latest active version-branch through the Github UI,
// and is also labeled with `target: patch`, then we merge it directly into the
// branch without doing any cherry-picking. This is useful if a PR could not be
// applied cleanly, and a separate PR for the patch branch has been created.
if (githubTargetBranch === latestVersionBranch) {
return [latestVersionBranch];
}
// Otherwise, patch changes are always merged into the next and patch branch.
const branches = [nextBranchName, latestVersionBranch];
// Additionally, if there is a release-candidate/feature-freeze release-train
// currently active, also merge the PR into that version-branch.
if (releaseCandidateBranch !== null) {
branches.push(releaseCandidateBranch);
}
return branches;
}
},
{
pattern: 'target: rc',
branches: githubTargetBranch => {
// The `target: rc` label cannot be applied if there is no active feature-freeze
// or release-candidate release train.
if (releaseCandidateBranch === null) {
throw new InvalidTargetLabelError(
`No active feature-freeze/release-candidate branch. ` +
`Unable to merge pull request using "target: rc" label.`);
}
// If the PR is targeting the active release-candidate/feature-freeze version branch
// directly through the Github UI and has the `target: rc` label applied, merge it
// only into the release candidate branch. This is useful if a PR did not apply cleanly
// into the release-candidate/feature-freeze branch, and a separate PR has been created.
if (githubTargetBranch === releaseCandidateBranch) {
return [releaseCandidateBranch];
}
// Otherwise, merge into the next and active release-candidate/feature-freeze branch.
return [nextBranchName, releaseCandidateBranch];
},
},
{
// LTS changes are rare enough that we won't worry about cherry-picking changes into all
// active LTS branches for PRs created against any other branch. Instead, PR authors need
// to manually create separate PRs for desired LTS branches. Additionally, active LT branches
// commonly diverge quickly. This makes cherry-picking not an option for LTS changes.
pattern: 'target: lts',
branches: async githubTargetBranch => {
if (!isReleaseTrainBranch(githubTargetBranch)) {
throw new InvalidTargetBranchError(
`PR cannot be merged as it does not target a long-term support ` +
`branch: "${githubTargetBranch}"`);
}
if (githubTargetBranch === latestVersionBranch) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into patch branch. ` +
`Consider changing the label to "target: patch" if this is intentional.`);
}
if (githubTargetBranch === releaseCandidateBranch && releaseCandidateBranch !== null) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into feature-freeze/release-candidate ` +
`branch. Consider changing the label to "target: rc" if this is intentional.`);
}
// Assert that the selected branch is an active LTS branch.
await assertActiveLtsBranch(repo, githubTargetBranch);
return [githubTargetBranch];
},
},
];
} | identifier_body | |
labels.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {GithubConfig} from '../../../utils/config';
import {GithubClient} from '../../../utils/git/github';
import {TargetLabel} from '../config';
import {InvalidTargetBranchError, InvalidTargetLabelError} from '../target-label';
import {fetchActiveReleaseTrainBranches, getVersionOfBranch, GithubRepo, isReleaseTrainBranch, nextBranchName} from './branches';
import {assertActiveLtsBranch} from './lts-branch';
/**
* Gets a label configuration for the merge tooling that reflects the default Angular
* organization-wide labeling and branching semantics as outlined in the specification.
*
* https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU
*/
export async function | (
api: GithubClient, github: GithubConfig, npmPackageName: string): Promise<TargetLabel[]> {
const repo: GithubRepo = {owner: github.owner, repo: github.name, api, npmPackageName};
const nextVersion = await getVersionOfBranch(repo, nextBranchName);
const hasNextMajorTrain = nextVersion.minor === 0;
const {latestVersionBranch, releaseCandidateBranch} =
await fetchActiveReleaseTrainBranches(repo, nextVersion);
return [
{
pattern: 'target: major',
branches: () => {
// If `next` is currently not designated to be a major version, we do not
// allow merging of PRs with `target: major`.
if (!hasNextMajorTrain) {
throw new InvalidTargetLabelError(
`Unable to merge pull request. The "${nextBranchName}" branch will be ` +
`released as a minor version.`);
}
return [nextBranchName];
},
},
{
pattern: 'target: minor',
// Changes labeled with `target: minor` are merged most commonly into the next branch
// (i.e. `master`). In rare cases of an exceptional minor version while being already
// on a major release train, this would need to be overridden manually.
// TODO: Consider handling this automatically by checking if the NPM version matches
// the last-minor. If not, then an exceptional minor might be in progress. See:
// https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU/edit#heading=h.h7o5pjq6yqd0
branches: [nextBranchName],
},
{
pattern: 'target: patch',
branches: githubTargetBranch => {
// If a PR is targeting the latest active version-branch through the Github UI,
// and is also labeled with `target: patch`, then we merge it directly into the
// branch without doing any cherry-picking. This is useful if a PR could not be
// applied cleanly, and a separate PR for the patch branch has been created.
if (githubTargetBranch === latestVersionBranch) {
return [latestVersionBranch];
}
// Otherwise, patch changes are always merged into the next and patch branch.
const branches = [nextBranchName, latestVersionBranch];
// Additionally, if there is a release-candidate/feature-freeze release-train
// currently active, also merge the PR into that version-branch.
if (releaseCandidateBranch !== null) {
branches.push(releaseCandidateBranch);
}
return branches;
}
},
{
pattern: 'target: rc',
branches: githubTargetBranch => {
// The `target: rc` label cannot be applied if there is no active feature-freeze
// or release-candidate release train.
if (releaseCandidateBranch === null) {
throw new InvalidTargetLabelError(
`No active feature-freeze/release-candidate branch. ` +
`Unable to merge pull request using "target: rc" label.`);
}
// If the PR is targeting the active release-candidate/feature-freeze version branch
// directly through the Github UI and has the `target: rc` label applied, merge it
// only into the release candidate branch. This is useful if a PR did not apply cleanly
// into the release-candidate/feature-freeze branch, and a separate PR has been created.
if (githubTargetBranch === releaseCandidateBranch) {
return [releaseCandidateBranch];
}
// Otherwise, merge into the next and active release-candidate/feature-freeze branch.
return [nextBranchName, releaseCandidateBranch];
},
},
{
// LTS changes are rare enough that we won't worry about cherry-picking changes into all
// active LTS branches for PRs created against any other branch. Instead, PR authors need
// to manually create separate PRs for desired LTS branches. Additionally, active LT branches
// commonly diverge quickly. This makes cherry-picking not an option for LTS changes.
pattern: 'target: lts',
branches: async githubTargetBranch => {
if (!isReleaseTrainBranch(githubTargetBranch)) {
throw new InvalidTargetBranchError(
`PR cannot be merged as it does not target a long-term support ` +
`branch: "${githubTargetBranch}"`);
}
if (githubTargetBranch === latestVersionBranch) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into patch branch. ` +
`Consider changing the label to "target: patch" if this is intentional.`);
}
if (githubTargetBranch === releaseCandidateBranch && releaseCandidateBranch !== null) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into feature-freeze/release-candidate ` +
`branch. Consider changing the label to "target: rc" if this is intentional.`);
}
// Assert that the selected branch is an active LTS branch.
await assertActiveLtsBranch(repo, githubTargetBranch);
return [githubTargetBranch];
},
},
];
}
| getDefaultTargetLabelConfiguration | identifier_name |
labels.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {GithubConfig} from '../../../utils/config';
import {GithubClient} from '../../../utils/git/github';
import {TargetLabel} from '../config';
import {InvalidTargetBranchError, InvalidTargetLabelError} from '../target-label';
import {fetchActiveReleaseTrainBranches, getVersionOfBranch, GithubRepo, isReleaseTrainBranch, nextBranchName} from './branches';
import {assertActiveLtsBranch} from './lts-branch';
/**
* Gets a label configuration for the merge tooling that reflects the default Angular
* organization-wide labeling and branching semantics as outlined in the specification.
*
* https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU
*/
export async function getDefaultTargetLabelConfiguration(
api: GithubClient, github: GithubConfig, npmPackageName: string): Promise<TargetLabel[]> {
const repo: GithubRepo = {owner: github.owner, repo: github.name, api, npmPackageName};
const nextVersion = await getVersionOfBranch(repo, nextBranchName);
const hasNextMajorTrain = nextVersion.minor === 0;
const {latestVersionBranch, releaseCandidateBranch} =
await fetchActiveReleaseTrainBranches(repo, nextVersion);
return [
{
pattern: 'target: major',
branches: () => {
// If `next` is currently not designated to be a major version, we do not
// allow merging of PRs with `target: major`.
if (!hasNextMajorTrain) {
throw new InvalidTargetLabelError(
`Unable to merge pull request. The "${nextBranchName}" branch will be ` +
`released as a minor version.`);
}
return [nextBranchName];
},
},
{
pattern: 'target: minor',
// Changes labeled with `target: minor` are merged most commonly into the next branch
// (i.e. `master`). In rare cases of an exceptional minor version while being already
// on a major release train, this would need to be overridden manually.
// TODO: Consider handling this automatically by checking if the NPM version matches
// the last-minor. If not, then an exceptional minor might be in progress. See:
// https://docs.google.com/document/d/197kVillDwx-RZtSVOBtPb4BBIAw0E9RT3q3v6DZkykU/edit#heading=h.h7o5pjq6yqd0
branches: [nextBranchName],
},
{
pattern: 'target: patch',
branches: githubTargetBranch => {
// If a PR is targeting the latest active version-branch through the Github UI,
// and is also labeled with `target: patch`, then we merge it directly into the
// branch without doing any cherry-picking. This is useful if a PR could not be
// applied cleanly, and a separate PR for the patch branch has been created.
if (githubTargetBranch === latestVersionBranch) {
return [latestVersionBranch];
}
// Otherwise, patch changes are always merged into the next and patch branch.
const branches = [nextBranchName, latestVersionBranch];
// Additionally, if there is a release-candidate/feature-freeze release-train
// currently active, also merge the PR into that version-branch.
if (releaseCandidateBranch !== null) {
branches.push(releaseCandidateBranch);
}
return branches;
}
},
{
pattern: 'target: rc',
branches: githubTargetBranch => {
// The `target: rc` label cannot be applied if there is no active feature-freeze
// or release-candidate release train.
if (releaseCandidateBranch === null) {
throw new InvalidTargetLabelError(
`No active feature-freeze/release-candidate branch. ` +
`Unable to merge pull request using "target: rc" label.`);
}
// If the PR is targeting the active release-candidate/feature-freeze version branch
// directly through the Github UI and has the `target: rc` label applied, merge it
// only into the release candidate branch. This is useful if a PR did not apply cleanly
// into the release-candidate/feature-freeze branch, and a separate PR has been created.
if (githubTargetBranch === releaseCandidateBranch) {
return [releaseCandidateBranch];
}
// Otherwise, merge into the next and active release-candidate/feature-freeze branch.
return [nextBranchName, releaseCandidateBranch];
},
},
{
// LTS changes are rare enough that we won't worry about cherry-picking changes into all
// active LTS branches for PRs created against any other branch. Instead, PR authors need
// to manually create separate PRs for desired LTS branches. Additionally, active LT branches
// commonly diverge quickly. This makes cherry-picking not an option for LTS changes.
pattern: 'target: lts',
branches: async githubTargetBranch => {
if (!isReleaseTrainBranch(githubTargetBranch)) {
throw new InvalidTargetBranchError(
`PR cannot be merged as it does not target a long-term support ` +
`branch: "${githubTargetBranch}"`);
}
if (githubTargetBranch === latestVersionBranch) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into patch branch. ` +
`Consider changing the label to "target: patch" if this is intentional.`);
}
if (githubTargetBranch === releaseCandidateBranch && releaseCandidateBranch !== null) {
throw new InvalidTargetBranchError(
`PR cannot be merged with "target: lts" into feature-freeze/release-candidate ` + | // Assert that the selected branch is an active LTS branch.
await assertActiveLtsBranch(repo, githubTargetBranch);
return [githubTargetBranch];
},
},
];
} | `branch. Consider changing the label to "target: rc" if this is intentional.`);
} | random_line_split |
logs-spec.js | "use strict";
let logs = require("../logs");
let assert = require("assert");
describe("logs DB collection", function () {
let userID = 1;
let log = {
title: "Food Journal"
};
let logID = "";
it("should create a log", function () {
return logs.createLog(global.db,userID,log).then(function (savedLog) {
assert.equal(savedLog.title, "Food Journal");
assert.equal(savedLog.userID, userID);
assert(savedLog._id);
logID = savedLog._id.toString();
});
});
it("should update a log", function () {
let log = {
title: "Updated Food Journal"
};
return logs.updateLog(global.db, userID, logID, log);
});
it("should read a log", function () {
return logs.readLog(global.db, userID, logID).then(function (log) {
assert.equal(log._id, logID);
assert.equal(log.title, "Updated Food Journal");
});
});
it("should read many logs", function () {
return logs.readLogList(global.db, userID).then(function (list) {
assert(list.length);
assert(list[0]._id);
}); | return logs.deleteLog(global.db, userID, logID).then(function () {
return logs.readLogList(global.db, userID).then(function (list) {
list = list.filter(function (log) {
return log._id == logID;
});
assert.equal(list.length, 0);
});
});
});
it("should delete many logs", function () {
return logs.createLog(global.db, userID, log).then(function (savedLog) {
return logs.deleteLogList(global.db, userID).then(function () {
return logs.readLogList(global.db, userID).then(function (list) {
assert.equal(list.length, 0);
});
});
});
});
}); | });
it("should delete a log", function () { | random_line_split |
zpm.py | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import glob
import gzip
import json
import os
import shlex
import sys
import tarfile
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import jinja2
import prettytable
import six
import swiftclient
import yaml
import zpmlib
from zpmlib import util
from zpmlib import zappbundler
from zpmlib import zapptemplate
_DEFAULT_UI_TEMPLATES = ['index.html.tmpl', 'style.css', 'zerocloud.js']
_ZAPP_YAML = 'python-zapp.yaml'
_ZAPP_WITH_UI_YAML = 'python-zapp-with-ui.yaml'
LOG = zpmlib.get_logger(__name__)
BUFFER_SIZE = 65536
#: path/filename of the system.map (job description) in every zapp
SYSTEM_MAP_ZAPP_PATH = 'boot/system.map'
#: Message displayed if insufficient auth settings are specified, either on the
#: command line or in environment variables. Shamelessly copied from
#: ``python-swiftclient``.
NO_AUTH_MSG = """\
Auth version 1.0 requires ST_AUTH, ST_USER, and ST_KEY environment variables
to be set or overridden with -A, -U, or -K.
Auth version 2.0 requires OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, and
OS_TENANT_NAME OS_TENANT_ID to be set or overridden with --os-auth-url,
--os-username, --os-password, --os-tenant-name or os-tenant-id. Note:
adding "-V 2" is necessary for this."""
#: Column labels for the execution summary table
EXEC_TABLE_HEADER = [
'Node',
'Status',
'Retcode',
'NodeT',
'SysT',
'UserT',
'DiskReads',
'DiskBytesR',
'DiskWrites',
'DiskBytesW',
'NetworkReads',
'NetworkBytesR',
'NetworkWrites',
'NetworkBytesW',
]
def create_project(location, with_ui=False, template=None):
"""
Create a ZeroVM application project by writing a default `zapp.yaml` in the
specified directory `location`.
:param location:
Directory location to place project files.
:param with_ui:
Defaults to `False`. If `True`, add basic UI template files as well to
``location``.
:param template:
Default: ``None``. If no template is specified, use the default project
template. (See `zpmlib.zapptemplate`.)
:returns: List of created project files.
"""
if os.path.exists(location):
if not os.path.isdir(location):
# target must be an empty directory
raise RuntimeError("Target `location` must be a directory")
else:
os.makedirs(location)
# Run the template builder, and create additional files for the project by
# the type. If ``template`` is none, this is essientially a NOP.
# TODO: just use the afc._created_files
created_files = []
with util.AtomicFileCreator() as afc:
for file_type, path, contents in zapptemplate.template(
location, template, with_ui=with_ui):
afc.create_file(file_type, path, contents)
created_files.append(path)
return created_files
def find_project_root():
"""
Starting from the `cwd`, search up the file system hierarchy until a
``zapp.yaml`` file is found. Once the file is found, return the directory
containing it. If no file is found, raise a `RuntimeError`.
"""
root = os.getcwd()
while not os.path.isfile(os.path.join(root, 'zapp.yaml')):
oldroot, root = root, os.path.dirname(root)
if root == oldroot:
raise RuntimeError("no zapp.yaml file found")
return root
def _generate_job_desc(zapp):
"""
Generate the boot/system.map file contents from the zapp config file.
:param zapp:
`dict` of the contents of a ``zapp.yaml`` file.
:returns:
`dict` of the job description
"""
job = []
# TODO(mg): we should eventually reuse zvsh._nvram_escape
def escape(value):
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def translate_args(cmdline):
# On Python 2, the yaml module loads non-ASCII strings as
# unicode objects. In Python 2.7.2 and earlier, we must give
# shlex.split a str -- but it is an error to give shlex.split
# a bytes object in Python 3.
need_decode = not isinstance(cmdline, str)
if need_decode:
cmdline = cmdline.encode('utf8')
args = shlex.split(cmdline)
if need_decode:
args = [arg.decode('utf8') for arg in args]
return ' '.join(escape(arg) for arg in args)
for zgroup in zapp['execution']['groups']:
# Copy everything, but handle 'env', 'path', and 'args' specially:
jgroup = dict(zgroup)
path = zgroup['path']
# if path is `file://image:exe`, exec->name is "exe"
# if path is `swift://~/container/obj`, exec->name is "obj"
exec_name = None
if path.startswith('file://'):
exec_name = path.split(':')[-1]
elif path.startswith('swift://'):
# If obj is a pseudo path, like foo/bar/obj, we need to
# handle this as well with a careful split.
# If the object path is something like `swift://~/container/obj`,
# then exec_name will be `obj`.
# If the object path is something like
# `swift://./container/foo/bar/obj`, then the exec_name will be
# `foo/bar/obj`.
exec_name = path.split('/', 4)[-1]
jgroup['exec'] = {
'path': zgroup['path'],
'args': translate_args(zgroup['args']),
}
if exec_name is not None:
jgroup['exec']['name'] = exec_name
del jgroup['path'], jgroup['args']
if 'env' in zgroup:
jgroup['exec']['env'] = zgroup['env']
del jgroup['env']
job.append(jgroup)
return job
def _get_swift_zapp_url(swift_service_url, zapp_path):
"""
:param str swift_service_url:
The Swift service URL returned from a Keystone service catalog.
Example: http://localhost:8080/v1/AUTH_469a9cd20b5a4fc5be9438f66bb5ee04
:param str zapp_path:
<container>/<zapp-file-name>. Example:
test_container/myapp.zapp
Here's a typical usage example, with typical input and output:
>>> swift_service_url = ('http://localhost:8080/v1/'
... 'AUTH_469a9cd20b5a4fc5be9438f66bb5ee04')
>>> zapp_path = 'test_container/myapp.zapp'
>>> _get_swift_zapp_url(swift_service_url, zapp_path)
'swift://AUTH_469a9cd20b5a4fc5be9438f66bb5ee04/test_container/myapp.zapp'
"""
swift_path = urlparse.urlparse(swift_service_url).path
# TODO(larsbutler): Why do we need to check if the path contains '/v1/'?
# This is here due to legacy reasons, but it's not clear to me why this is
# needed.
if swift_path.startswith('/v1/'):
swift_path = swift_path[4:]
return 'swift://%s/%s' % (swift_path, zapp_path)
def _prepare_job(tar, zapp, zapp_swift_url):
"""
:param tar:
The application .zapp file, as a :class:`tarfile.TarFile` object.
:param dict zapp:
Parsed contents of the application `zapp.yaml` specification, as a
`dict`.
:param str zapp_swift_url:
Path of the .zapp in Swift, which looks like this::
'swift://AUTH_abcdef123/test_container/hello.zapp'
See :func:`_get_swift_zapp_url`.
:returns:
Extracted contents of the boot/system.map with the swift
path to the .zapp added to the `devices` for each `group`.
So if the job looks like this::
[{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'hello'}]
the output will look like something like this::
[{'exec': {u'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'image',
'path': 'swift://AUTH_abcdef123/test_container/hello.zapp'},
],
'name': 'hello'}]
"""
fp = tar.extractfile(SYSTEM_MAP_ZAPP_PATH)
# NOTE(larsbutler): the `decode` is needed for python3
# compatibility
job = json.loads(fp.read().decode('utf-8'))
device = {'name': 'image', 'path': zapp_swift_url}
for group in job:
group['devices'].append(device)
return job
def bundle_project(root, refresh_deps=False):
"""
Bundle the project under root.
"""
zapp_yaml = os.path.join(root, 'zapp.yaml')
zapp = yaml.safe_load(open(zapp_yaml))
zapp_name = zapp['meta']['name'] + '.zapp'
zapp_tar_path = os.path.join(root, zapp_name)
tar = tarfile.open(zapp_tar_path, 'w:gz')
job = _generate_job_desc(zapp)
job_json = json.dumps(job)
info = tarfile.TarInfo(name='boot/system.map')
# This size is only correct because json.dumps uses
# ensure_ascii=True by default and we thus have a 1-1
# correspondence between Unicode characters and bytes.
info.size = len(job_json)
LOG.info('adding %s' % info.name)
# In Python 3, we cannot use a str or bytes object with addfile,
# we need a BytesIO object. In Python 2, BytesIO is just StringIO.
# Since json.dumps produces an ASCII-only Unicode string in Python
# 3, it is safe to encode it to ASCII.
tar.addfile(info, BytesIO(job_json.encode('ascii')))
_add_file_to_tar(root, 'zapp.yaml', tar)
sections = ('bundling', 'ui')
# Keep track of the files we add, given the configuration in the zapp.yaml.
file_add_count = 0
for section in sections:
for pattern in zapp.get(section, []):
paths = glob.glob(os.path.join(root, pattern))
if len(paths) == 0:
LOG.warning(
"pattern '%(pat)s' in section '%(sec)s' matched no files",
dict(pat=pattern, sec=section)
)
else:
for path in paths:
_add_file_to_tar(root, path, tar)
file_add_count += len(paths)
if file_add_count == 0:
# None of the files specified in the "bundling" or "ui" sections were
# found. Something is wrong.
raise zpmlib.ZPMException(
"None of the files specified in the 'bundling' or 'ui' sections of"
" the zapp.yaml matched anything."
)
# Do template-specific bundling
zappbundler.bundle(root, zapp, tar, refresh_deps=refresh_deps)
tar.close()
print('created %s' % zapp_name)
def _add_file_to_tar(root, path, tar, arcname=None):
"""
:param root:
Root working directory.
:param path:
File path.
:param tar:
Open :class:`tarfile.TarFile` object to add the ``files`` to.
"""
# TODO(larsbutler): document ``arcname``
LOG.info('adding %s' % path)
path = os.path.join(root, path)
relpath = os.path.relpath(path, root)
if arcname is None:
# In the archive, give the file the same name and path.
arcname = relpath
tar.add(path, arcname=arcname)
def _find_ui_uploads(zapp, tar):
matches = set()
names = tar.getnames()
for pattern in zapp.get('ui', []):
matches.update(fnmatch.filter(names, pattern))
return sorted(matches)
def _post_job(url, token, data, http_conn=None, response_dict=None,
content_type='application/json', content_length=None,
response_body_buffer=None):
# Modelled after swiftclient.client.post_account.
headers = {'X-Auth-Token': token,
'X-Zerovm-Execute': '1.0',
'Content-Type': content_type}
if content_length:
headers['Content-Length'] = str(content_length)
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = swiftclient.http_connection(url)
conn.request('POST', parsed.path, data, headers)
resp = conn.getresponse()
body = resp.read()
swiftclient.http_log((url, 'POST'), {'headers': headers}, resp, body)
swiftclient.store_response(resp, response_dict)
if response_body_buffer is not None:
response_body_buffer.write(body)
class ZeroCloudConnection(swiftclient.Connection):
"""
An extension of the `swiftclient.Connection` which has the capability of
posting ZeroVM jobs to an instance of ZeroCloud (running on Swift).
"""
def authenticate(self):
"""
Authenticate with the provided credentials and cache the storage URL
and auth token as `self.url` and `self.token`, respectively.
"""
self.url, self.token = self.get_auth()
def post_job(self, job, response_dict=None, response_body_buffer=None):
"""Start a ZeroVM job, using a pre-uploaded zapp
:param object job:
Job description. This will be encoded as JSON and sent to
ZeroCloud.
"""
json_data = json.dumps(job)
LOG.debug('JOB: %s' % json_data)
return self._retry(None, _post_job, json_data,
response_dict=response_dict,
response_body_buffer=response_body_buffer)
def post_zapp(self, data, response_dict=None, content_length=None,
response_body_buffer=None):
return self._retry(None, _post_job, data,
response_dict=response_dict,
content_type='application/x-gzip',
content_length=content_length,
response_body_buffer=response_body_buffer)
def _get_zerocloud_conn(args):
version = args.auth_version
# no version was explicitly requested; try to guess it:
if version is None:
version = _guess_auth_version(args)
if version == '1.0':
if any([arg is None for arg in (args.auth, args.user, args.key)]):
raise zpmlib.ZPMException(
"Version 1 auth requires `--auth`, `--user`, and `--key`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.auth, args.user, args.key)
elif version == '2.0':
if any([arg is None for arg in
(args.os_auth_url, args.os_username, args.os_tenant_name,
args.os_password)]):
raise zpmlib.ZPMException(
"Version 2 auth requires `--os-auth-url`, `--os-username`, "
"`--os-password`, and `--os-tenant-name`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.os_auth_url, args.os_username,
args.os_password,
tenant_name=args.os_tenant_name,
auth_version='2.0')
else:
raise zpmlib.ZPMException(NO_AUTH_MSG)
return conn
def _deploy_zapp(conn, target, zapp_path, auth_opts, force=False):
"""Upload all of the necessary files for a zapp.
Returns the name an uploaded index file, or the target if no
index.html file was uploaded.
:param bool force:
Force deployment, even if the target container is not empty. This means
that files could be overwritten and could cause consistency problems
with these objects in Swift.
"""
base_container = target.split('/')[0]
try:
_, objects = conn.get_container(base_container)
if not len(objects) == 0:
if not force:
raise zpmlib.ZPMException(
"Target container ('%s') is not empty.\nDeploying to a "
"non-empty container can cause consistency problems with "
"overwritten objects.\nSpecify the flag `--force/-f` to "
"overwrite anyway."
% base_container
)
except swiftclient.exceptions.ClientException:
# container doesn't exist; create it
LOG.info("Container '%s' not found. Creating it...", base_container)
conn.put_container(base_container)
# If we get here, everything with the container is fine.
index = target + '/'
uploads = _generate_uploads(conn, target, zapp_path, auth_opts)
for path, data, content_type in uploads:
if path.endswith('/index.html'):
index = path
container, obj = path.split('/', 1)
conn.put_object(container, obj, data, content_type=content_type)
return index
def _generate_uploads(conn, target, zapp_path, auth_opts):
"""Generate sequence of (container-and-file-path, data, content-type)
tuples.
"""
tar = tarfile.open(zapp_path, 'r:gz')
zapp_config = yaml.safe_load(tar.extractfile('zapp.yaml'))
remote_zapp_path = '%s/%s' % (target, os.path.basename(zapp_path))
swift_url = _get_swift_zapp_url(conn.url, remote_zapp_path)
job = _prepare_job(tar, zapp_config, swift_url)
yield (remote_zapp_path, gzip.open(zapp_path).read(), 'application/x-tar')
yield ('%s/%s' % (target, SYSTEM_MAP_ZAPP_PATH), json.dumps(job),
'application/json')
for path in _find_ui_uploads(zapp_config, tar):
output = tar.extractfile(path).read()
if path.endswith('.tmpl'):
tmpl = jinja2.Template(output.decode('utf-8'))
output = tmpl.render(auth_opts=auth_opts, zapp=zapp_config)
# drop the .tmpl extension
path = os.path.splitext(path)[0]
ui_path = '%s/%s' % (target, path)
yield (ui_path, output, None)
def _prepare_auth(version, args, conn):
"""
:param str version:
Auth version: "0.0", "1.0", or "2.0". "0.0" indicates "no auth".
:param args:
:class:`argparse.Namespace` instance, with attributes representing the
various authentication parameters
:param conn:
:class:`ZeroCloudConnection` instance.
"""
version = str(float(version))
auth = {'version': version}
if version == '0.0':
auth['swiftUrl'] = conn.url
elif version == '1.0':
auth['authUrl'] = args.auth
auth['username'] = args.user
auth['password'] = args.key
else:
# TODO(mg): inserting the username and password in the
# uploaded file makes testing easy, but should not be done in
# production. See issue #46.
auth['authUrl'] = args.os_auth_url
auth['tenant'] = args.os_tenant_name
auth['username'] = args.os_username
auth['password'] = args.os_password
return auth
def _guess_auth_version(args):
"""Guess the auth version from first the command line args and/or envvars.
Command line arguments override environment variables, so we check those
first.
Auth v1 arguments:
* ``--auth``
* ``--user``
* ``--key``
Auth v2 arguments:
* ``--os-auth-url``
* ``--os-username``
* ``--os-password``
* ``--os-tenant-name``
If all of the v1 and v2 arguments are specified, default to 1.0 (this is
how ``python-swiftclient`` behaves).
If no auth version can be determined from the command line args, we check
environment variables.
Auth v1 vars:
* ``ST_AUTH``
* ``ST_USER``
* ``ST_KEY``
Auth v2 vars:
* ``OS_AUTH_URL``
* ``OS_USERNAME``
* ``OS_PASSWORD``
* ``OS_TENANT_NAME``
The same rule above applies; if both sets of variables are specified,
default to 1.0.
If no auth version can be determined, return `None`.
:param args:
:class:`argparse.Namespace`, representing the args specified on the
command line.
:returns: '1.0', '2.0', or ``None``
"""
v1 = (args.auth, args.user, args.key)
v2 = (args.os_auth_url, args.os_username, args.os_password,
args.os_tenant_name)
if all(v1) and not all(v2):
return '1.0'
elif all(v2) and not all(v1):
return '2.0'
elif all(v1) and all(v2):
# All vars for v1 and v2 auth are set, so we follow the
# `python-swiftclient` behavior and default to 1.0.
return '1.0'
else:
# deduce from envvars
env = os.environ
v1_env = (env.get('ST_AUTH'), env.get('ST_USER'), env.get('ST_KEY'))
v2_env = (env.get('OS_AUTH_URL'), env.get('OS_USERNAME'),
env.get('OS_PASSWORD'), env.get('OS_TENANT_NAME'))
if all(v1_env) and not all(v2_env):
return '1.0'
if all(v2_env) and not all(v1_env):
return '2.0'
elif all(v1_env) and all(v2_env):
# Same as above, if all v1 and v2 vars are set, default to 1.0.
return '1.0'
else:
# Insufficient auth details have been specified.
return None
def deploy_project(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
ui_auth_version = conn.auth_version
# We can now reset the auth for the web UI, if needed
if args.no_ui_auth:
ui_auth_version = '0.0'
auth = _prepare_auth(ui_auth_version, args, conn)
auth_opts = jinja2.Markup(json.dumps(auth))
deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
force=args.force)
print('app deployed to\n %s/%s' % (conn.url, deploy_index))
if args.execute:
# for compatibility with the option name in 'zpm execute'
args.container = args.target
resp_body_buffer = BytesIO()
resp = execute(args, response_body_buffer=resp_body_buffer)
resp_body_buffer.seek(0)
if resp['status'] < 200 or resp['status'] >= 300:
raise zpmlib.ZPMException(resp_body_buffer.read())
if args.summary:
total_time, exec_table = _get_exec_table(resp)
print('Execution summary:')
print(exec_table)
print('Total time: %s' % total_time)
sys.stdout.write(resp_body_buffer.read())
def _get_exec_table(resp):
"""Build an execution summary table from a job execution response.
:param dict resp:
Response dictionary from job execution. Must contain a ``headers`` key
at least (and will typically contain ``status`` and ``reason`` as
well).
:returns:
Tuple of total execution time (`str`),
``prettytable.PrettyTable`` containing the summary of all node
executions in the job.
"""
headers = resp['headers']
total_time, table_data = _get_exec_table_data(headers)
table = prettytable.PrettyTable(EXEC_TABLE_HEADER)
for row in table_data:
table.add_row(row)
return total_time, table
def _get_exec_table_data(headers):
"""Extract a stats table from execution HTTP response headers.
Stats include things like node name, execution time, number of
reads/writes, bytes read/written, etc.
:param dict headers:
`dict` of response headers from a job execution request. It must
contain at least ``x-nexe-system``, ``x-nexe-status``,
``x-nexe-retcode``, ``x-nexe-cdr-line``.
:returns:
Tuple of two items. The first is the total time for the executed job
(as a `str`). The second is a table (2d `list`) of execution data
extracted from ``X-Nexe-System`` and ``X-Nexe-Cdr-Line`` headers.
Each row in the table consists of the following data:
* node name
* node time
* system time
* user time
* number of disk reads
* number of bytes read from disk
* number of disk writes
* number of bytes written to disk
* number of network reads
* number of bytes read from network
* number of network writes
* number of bytes written to network
"""
node_names = iter(headers['x-nexe-system'].split(','))
statuses = iter(headers['x-nexe-status'].split(','))
retcodes = iter(headers['x-nexe-retcode'].split(','))
cdr = headers['x-nexe-cdr-line']
cdr_data = [x.strip() for x in cdr.split(',')]
total_time = cdr_data.pop(0)
cdr_data = iter(cdr_data)
def adviter(x):
return six.advance_iterator(x)
table_data = []
while True:
try:
node_name = adviter(node_names)
status = adviter(statuses)
retcode = adviter(retcodes)
node_time = adviter(cdr_data)
cdr = adviter(cdr_data).split()
row = [node_name, status, retcode, node_time] + cdr
table_data.append(row)
except StopIteration:
break
return total_time, table_data
def execute(args, response_body_buffer=None):
"""Execute a zapp remotely on a ZeroCloud deployment.
:returns:
A `dict` with response data, including the keys 'status', 'reason', and
'headers'.
""" | job_filename = SYSTEM_MAP_ZAPP_PATH
try:
headers, content = conn.get_object(args.container, job_filename)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise zpmlib.ZPMException("Could not find %s" % exc.http_path)
else:
raise zpmlib.ZPMException(str(exc))
job = json.loads(content)
conn.post_job(job, response_dict=resp,
response_body_buffer=response_body_buffer)
LOG.debug('RESP STATUS: %s %s', resp['status'], resp['reason'])
LOG.debug('RESP HEADERS: %s', resp['headers'])
else:
size = os.path.getsize(args.zapp)
zapp_file = open(args.zapp, 'rb')
data_reader = iter(lambda: zapp_file.read(BUFFER_SIZE), b'')
conn.post_zapp(data_reader, response_dict=resp, content_length=size,
response_body_buffer=response_body_buffer)
zapp_file.close()
return resp
def auth(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
print('Auth token: %s' % conn.token)
print('Storage URL: %s' % conn.url) | conn = _get_zerocloud_conn(args)
resp = dict()
if args.container: | random_line_split |
zpm.py | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import glob
import gzip
import json
import os
import shlex
import sys
import tarfile
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import jinja2
import prettytable
import six
import swiftclient
import yaml
import zpmlib
from zpmlib import util
from zpmlib import zappbundler
from zpmlib import zapptemplate
_DEFAULT_UI_TEMPLATES = ['index.html.tmpl', 'style.css', 'zerocloud.js']
_ZAPP_YAML = 'python-zapp.yaml'
_ZAPP_WITH_UI_YAML = 'python-zapp-with-ui.yaml'
LOG = zpmlib.get_logger(__name__)
BUFFER_SIZE = 65536
#: path/filename of the system.map (job description) in every zapp
SYSTEM_MAP_ZAPP_PATH = 'boot/system.map'
#: Message displayed if insufficient auth settings are specified, either on the
#: command line or in environment variables. Shamelessly copied from
#: ``python-swiftclient``.
NO_AUTH_MSG = """\
Auth version 1.0 requires ST_AUTH, ST_USER, and ST_KEY environment variables
to be set or overridden with -A, -U, or -K.
Auth version 2.0 requires OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, and
OS_TENANT_NAME OS_TENANT_ID to be set or overridden with --os-auth-url,
--os-username, --os-password, --os-tenant-name or os-tenant-id. Note:
adding "-V 2" is necessary for this."""
#: Column labels for the execution summary table
EXEC_TABLE_HEADER = [
'Node',
'Status',
'Retcode',
'NodeT',
'SysT',
'UserT',
'DiskReads',
'DiskBytesR',
'DiskWrites',
'DiskBytesW',
'NetworkReads',
'NetworkBytesR',
'NetworkWrites',
'NetworkBytesW',
]
def create_project(location, with_ui=False, template=None):
"""
Create a ZeroVM application project by writing a default `zapp.yaml` in the
specified directory `location`.
:param location:
Directory location to place project files.
:param with_ui:
Defaults to `False`. If `True`, add basic UI template files as well to
``location``.
:param template:
Default: ``None``. If no template is specified, use the default project
template. (See `zpmlib.zapptemplate`.)
:returns: List of created project files.
"""
if os.path.exists(location):
if not os.path.isdir(location):
# target must be an empty directory
raise RuntimeError("Target `location` must be a directory")
else:
os.makedirs(location)
# Run the template builder, and create additional files for the project by
# the type. If ``template`` is none, this is essientially a NOP.
# TODO: just use the afc._created_files
created_files = []
with util.AtomicFileCreator() as afc:
for file_type, path, contents in zapptemplate.template(
location, template, with_ui=with_ui):
afc.create_file(file_type, path, contents)
created_files.append(path)
return created_files
def find_project_root():
"""
Starting from the `cwd`, search up the file system hierarchy until a
``zapp.yaml`` file is found. Once the file is found, return the directory
containing it. If no file is found, raise a `RuntimeError`.
"""
root = os.getcwd()
while not os.path.isfile(os.path.join(root, 'zapp.yaml')):
oldroot, root = root, os.path.dirname(root)
if root == oldroot:
raise RuntimeError("no zapp.yaml file found")
return root
def _generate_job_desc(zapp):
"""
Generate the boot/system.map file contents from the zapp config file.
:param zapp:
`dict` of the contents of a ``zapp.yaml`` file.
:returns:
`dict` of the job description
"""
job = []
# TODO(mg): we should eventually reuse zvsh._nvram_escape
def escape(value):
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def translate_args(cmdline):
# On Python 2, the yaml module loads non-ASCII strings as
# unicode objects. In Python 2.7.2 and earlier, we must give
# shlex.split a str -- but it is an error to give shlex.split
# a bytes object in Python 3.
need_decode = not isinstance(cmdline, str)
if need_decode:
cmdline = cmdline.encode('utf8')
args = shlex.split(cmdline)
if need_decode:
args = [arg.decode('utf8') for arg in args]
return ' '.join(escape(arg) for arg in args)
for zgroup in zapp['execution']['groups']:
# Copy everything, but handle 'env', 'path', and 'args' specially:
jgroup = dict(zgroup)
path = zgroup['path']
# if path is `file://image:exe`, exec->name is "exe"
# if path is `swift://~/container/obj`, exec->name is "obj"
exec_name = None
if path.startswith('file://'):
exec_name = path.split(':')[-1]
elif path.startswith('swift://'):
# If obj is a pseudo path, like foo/bar/obj, we need to
# handle this as well with a careful split.
# If the object path is something like `swift://~/container/obj`,
# then exec_name will be `obj`.
# If the object path is something like
# `swift://./container/foo/bar/obj`, then the exec_name will be
# `foo/bar/obj`.
exec_name = path.split('/', 4)[-1]
jgroup['exec'] = {
'path': zgroup['path'],
'args': translate_args(zgroup['args']),
}
if exec_name is not None:
jgroup['exec']['name'] = exec_name
del jgroup['path'], jgroup['args']
if 'env' in zgroup:
jgroup['exec']['env'] = zgroup['env']
del jgroup['env']
job.append(jgroup)
return job
def _get_swift_zapp_url(swift_service_url, zapp_path):
"""
:param str swift_service_url:
The Swift service URL returned from a Keystone service catalog.
Example: http://localhost:8080/v1/AUTH_469a9cd20b5a4fc5be9438f66bb5ee04
:param str zapp_path:
<container>/<zapp-file-name>. Example:
test_container/myapp.zapp
Here's a typical usage example, with typical input and output:
>>> swift_service_url = ('http://localhost:8080/v1/'
... 'AUTH_469a9cd20b5a4fc5be9438f66bb5ee04')
>>> zapp_path = 'test_container/myapp.zapp'
>>> _get_swift_zapp_url(swift_service_url, zapp_path)
'swift://AUTH_469a9cd20b5a4fc5be9438f66bb5ee04/test_container/myapp.zapp'
"""
swift_path = urlparse.urlparse(swift_service_url).path
# TODO(larsbutler): Why do we need to check if the path contains '/v1/'?
# This is here due to legacy reasons, but it's not clear to me why this is
# needed.
if swift_path.startswith('/v1/'):
swift_path = swift_path[4:]
return 'swift://%s/%s' % (swift_path, zapp_path)
def _prepare_job(tar, zapp, zapp_swift_url):
"""
:param tar:
The application .zapp file, as a :class:`tarfile.TarFile` object.
:param dict zapp:
Parsed contents of the application `zapp.yaml` specification, as a
`dict`.
:param str zapp_swift_url:
Path of the .zapp in Swift, which looks like this::
'swift://AUTH_abcdef123/test_container/hello.zapp'
See :func:`_get_swift_zapp_url`.
:returns:
Extracted contents of the boot/system.map with the swift
path to the .zapp added to the `devices` for each `group`.
So if the job looks like this::
[{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'hello'}]
the output will look like something like this::
[{'exec': {u'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'image',
'path': 'swift://AUTH_abcdef123/test_container/hello.zapp'},
],
'name': 'hello'}]
"""
fp = tar.extractfile(SYSTEM_MAP_ZAPP_PATH)
# NOTE(larsbutler): the `decode` is needed for python3
# compatibility
job = json.loads(fp.read().decode('utf-8'))
device = {'name': 'image', 'path': zapp_swift_url}
for group in job:
group['devices'].append(device)
return job
def bundle_project(root, refresh_deps=False):
"""
Bundle the project under root.
"""
zapp_yaml = os.path.join(root, 'zapp.yaml')
zapp = yaml.safe_load(open(zapp_yaml))
zapp_name = zapp['meta']['name'] + '.zapp'
zapp_tar_path = os.path.join(root, zapp_name)
tar = tarfile.open(zapp_tar_path, 'w:gz')
job = _generate_job_desc(zapp)
job_json = json.dumps(job)
info = tarfile.TarInfo(name='boot/system.map')
# This size is only correct because json.dumps uses
# ensure_ascii=True by default and we thus have a 1-1
# correspondence between Unicode characters and bytes.
info.size = len(job_json)
LOG.info('adding %s' % info.name)
# In Python 3, we cannot use a str or bytes object with addfile,
# we need a BytesIO object. In Python 2, BytesIO is just StringIO.
# Since json.dumps produces an ASCII-only Unicode string in Python
# 3, it is safe to encode it to ASCII.
tar.addfile(info, BytesIO(job_json.encode('ascii')))
_add_file_to_tar(root, 'zapp.yaml', tar)
sections = ('bundling', 'ui')
# Keep track of the files we add, given the configuration in the zapp.yaml.
file_add_count = 0
for section in sections:
for pattern in zapp.get(section, []):
paths = glob.glob(os.path.join(root, pattern))
if len(paths) == 0:
LOG.warning(
"pattern '%(pat)s' in section '%(sec)s' matched no files",
dict(pat=pattern, sec=section)
)
else:
for path in paths:
_add_file_to_tar(root, path, tar)
file_add_count += len(paths)
if file_add_count == 0:
# None of the files specified in the "bundling" or "ui" sections were
# found. Something is wrong.
raise zpmlib.ZPMException(
"None of the files specified in the 'bundling' or 'ui' sections of"
" the zapp.yaml matched anything."
)
# Do template-specific bundling
zappbundler.bundle(root, zapp, tar, refresh_deps=refresh_deps)
tar.close()
print('created %s' % zapp_name)
def _add_file_to_tar(root, path, tar, arcname=None):
"""
:param root:
Root working directory.
:param path:
File path.
:param tar:
Open :class:`tarfile.TarFile` object to add the ``files`` to.
"""
# TODO(larsbutler): document ``arcname``
LOG.info('adding %s' % path)
path = os.path.join(root, path)
relpath = os.path.relpath(path, root)
if arcname is None:
# In the archive, give the file the same name and path.
arcname = relpath
tar.add(path, arcname=arcname)
def _find_ui_uploads(zapp, tar):
matches = set()
names = tar.getnames()
for pattern in zapp.get('ui', []):
matches.update(fnmatch.filter(names, pattern))
return sorted(matches)
def _post_job(url, token, data, http_conn=None, response_dict=None,
content_type='application/json', content_length=None,
response_body_buffer=None):
# Modelled after swiftclient.client.post_account.
headers = {'X-Auth-Token': token,
'X-Zerovm-Execute': '1.0',
'Content-Type': content_type}
if content_length:
headers['Content-Length'] = str(content_length)
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = swiftclient.http_connection(url)
conn.request('POST', parsed.path, data, headers)
resp = conn.getresponse()
body = resp.read()
swiftclient.http_log((url, 'POST'), {'headers': headers}, resp, body)
swiftclient.store_response(resp, response_dict)
if response_body_buffer is not None:
response_body_buffer.write(body)
class ZeroCloudConnection(swiftclient.Connection):
"""
An extension of the `swiftclient.Connection` which has the capability of
posting ZeroVM jobs to an instance of ZeroCloud (running on Swift).
"""
def authenticate(self):
"""
Authenticate with the provided credentials and cache the storage URL
and auth token as `self.url` and `self.token`, respectively.
"""
self.url, self.token = self.get_auth()
def post_job(self, job, response_dict=None, response_body_buffer=None):
"""Start a ZeroVM job, using a pre-uploaded zapp
:param object job:
Job description. This will be encoded as JSON and sent to
ZeroCloud.
"""
json_data = json.dumps(job)
LOG.debug('JOB: %s' % json_data)
return self._retry(None, _post_job, json_data,
response_dict=response_dict,
response_body_buffer=response_body_buffer)
def post_zapp(self, data, response_dict=None, content_length=None,
response_body_buffer=None):
return self._retry(None, _post_job, data,
response_dict=response_dict,
content_type='application/x-gzip',
content_length=content_length,
response_body_buffer=response_body_buffer)
def _get_zerocloud_conn(args):
version = args.auth_version
# no version was explicitly requested; try to guess it:
if version is None:
version = _guess_auth_version(args)
if version == '1.0':
if any([arg is None for arg in (args.auth, args.user, args.key)]):
raise zpmlib.ZPMException(
"Version 1 auth requires `--auth`, `--user`, and `--key`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.auth, args.user, args.key)
elif version == '2.0':
if any([arg is None for arg in
(args.os_auth_url, args.os_username, args.os_tenant_name,
args.os_password)]):
raise zpmlib.ZPMException(
"Version 2 auth requires `--os-auth-url`, `--os-username`, "
"`--os-password`, and `--os-tenant-name`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.os_auth_url, args.os_username,
args.os_password,
tenant_name=args.os_tenant_name,
auth_version='2.0')
else:
raise zpmlib.ZPMException(NO_AUTH_MSG)
return conn
def _deploy_zapp(conn, target, zapp_path, auth_opts, force=False):
"""Upload all of the necessary files for a zapp.
Returns the name an uploaded index file, or the target if no
index.html file was uploaded.
:param bool force:
Force deployment, even if the target container is not empty. This means
that files could be overwritten and could cause consistency problems
with these objects in Swift.
"""
base_container = target.split('/')[0]
try:
_, objects = conn.get_container(base_container)
if not len(objects) == 0:
if not force:
raise zpmlib.ZPMException(
"Target container ('%s') is not empty.\nDeploying to a "
"non-empty container can cause consistency problems with "
"overwritten objects.\nSpecify the flag `--force/-f` to "
"overwrite anyway."
% base_container
)
except swiftclient.exceptions.ClientException:
# container doesn't exist; create it
LOG.info("Container '%s' not found. Creating it...", base_container)
conn.put_container(base_container)
# If we get here, everything with the container is fine.
index = target + '/'
uploads = _generate_uploads(conn, target, zapp_path, auth_opts)
for path, data, content_type in uploads:
if path.endswith('/index.html'):
index = path
container, obj = path.split('/', 1)
conn.put_object(container, obj, data, content_type=content_type)
return index
def | (conn, target, zapp_path, auth_opts):
"""Generate sequence of (container-and-file-path, data, content-type)
tuples.
"""
tar = tarfile.open(zapp_path, 'r:gz')
zapp_config = yaml.safe_load(tar.extractfile('zapp.yaml'))
remote_zapp_path = '%s/%s' % (target, os.path.basename(zapp_path))
swift_url = _get_swift_zapp_url(conn.url, remote_zapp_path)
job = _prepare_job(tar, zapp_config, swift_url)
yield (remote_zapp_path, gzip.open(zapp_path).read(), 'application/x-tar')
yield ('%s/%s' % (target, SYSTEM_MAP_ZAPP_PATH), json.dumps(job),
'application/json')
for path in _find_ui_uploads(zapp_config, tar):
output = tar.extractfile(path).read()
if path.endswith('.tmpl'):
tmpl = jinja2.Template(output.decode('utf-8'))
output = tmpl.render(auth_opts=auth_opts, zapp=zapp_config)
# drop the .tmpl extension
path = os.path.splitext(path)[0]
ui_path = '%s/%s' % (target, path)
yield (ui_path, output, None)
def _prepare_auth(version, args, conn):
"""
:param str version:
Auth version: "0.0", "1.0", or "2.0". "0.0" indicates "no auth".
:param args:
:class:`argparse.Namespace` instance, with attributes representing the
various authentication parameters
:param conn:
:class:`ZeroCloudConnection` instance.
"""
version = str(float(version))
auth = {'version': version}
if version == '0.0':
auth['swiftUrl'] = conn.url
elif version == '1.0':
auth['authUrl'] = args.auth
auth['username'] = args.user
auth['password'] = args.key
else:
# TODO(mg): inserting the username and password in the
# uploaded file makes testing easy, but should not be done in
# production. See issue #46.
auth['authUrl'] = args.os_auth_url
auth['tenant'] = args.os_tenant_name
auth['username'] = args.os_username
auth['password'] = args.os_password
return auth
def _guess_auth_version(args):
"""Guess the auth version from first the command line args and/or envvars.
Command line arguments override environment variables, so we check those
first.
Auth v1 arguments:
* ``--auth``
* ``--user``
* ``--key``
Auth v2 arguments:
* ``--os-auth-url``
* ``--os-username``
* ``--os-password``
* ``--os-tenant-name``
If all of the v1 and v2 arguments are specified, default to 1.0 (this is
how ``python-swiftclient`` behaves).
If no auth version can be determined from the command line args, we check
environment variables.
Auth v1 vars:
* ``ST_AUTH``
* ``ST_USER``
* ``ST_KEY``
Auth v2 vars:
* ``OS_AUTH_URL``
* ``OS_USERNAME``
* ``OS_PASSWORD``
* ``OS_TENANT_NAME``
The same rule above applies; if both sets of variables are specified,
default to 1.0.
If no auth version can be determined, return `None`.
:param args:
:class:`argparse.Namespace`, representing the args specified on the
command line.
:returns: '1.0', '2.0', or ``None``
"""
v1 = (args.auth, args.user, args.key)
v2 = (args.os_auth_url, args.os_username, args.os_password,
args.os_tenant_name)
if all(v1) and not all(v2):
return '1.0'
elif all(v2) and not all(v1):
return '2.0'
elif all(v1) and all(v2):
# All vars for v1 and v2 auth are set, so we follow the
# `python-swiftclient` behavior and default to 1.0.
return '1.0'
else:
# deduce from envvars
env = os.environ
v1_env = (env.get('ST_AUTH'), env.get('ST_USER'), env.get('ST_KEY'))
v2_env = (env.get('OS_AUTH_URL'), env.get('OS_USERNAME'),
env.get('OS_PASSWORD'), env.get('OS_TENANT_NAME'))
if all(v1_env) and not all(v2_env):
return '1.0'
if all(v2_env) and not all(v1_env):
return '2.0'
elif all(v1_env) and all(v2_env):
# Same as above, if all v1 and v2 vars are set, default to 1.0.
return '1.0'
else:
# Insufficient auth details have been specified.
return None
def deploy_project(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
ui_auth_version = conn.auth_version
# We can now reset the auth for the web UI, if needed
if args.no_ui_auth:
ui_auth_version = '0.0'
auth = _prepare_auth(ui_auth_version, args, conn)
auth_opts = jinja2.Markup(json.dumps(auth))
deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
force=args.force)
print('app deployed to\n %s/%s' % (conn.url, deploy_index))
if args.execute:
# for compatibility with the option name in 'zpm execute'
args.container = args.target
resp_body_buffer = BytesIO()
resp = execute(args, response_body_buffer=resp_body_buffer)
resp_body_buffer.seek(0)
if resp['status'] < 200 or resp['status'] >= 300:
raise zpmlib.ZPMException(resp_body_buffer.read())
if args.summary:
total_time, exec_table = _get_exec_table(resp)
print('Execution summary:')
print(exec_table)
print('Total time: %s' % total_time)
sys.stdout.write(resp_body_buffer.read())
def _get_exec_table(resp):
"""Build an execution summary table from a job execution response.
:param dict resp:
Response dictionary from job execution. Must contain a ``headers`` key
at least (and will typically contain ``status`` and ``reason`` as
well).
:returns:
Tuple of total execution time (`str`),
``prettytable.PrettyTable`` containing the summary of all node
executions in the job.
"""
headers = resp['headers']
total_time, table_data = _get_exec_table_data(headers)
table = prettytable.PrettyTable(EXEC_TABLE_HEADER)
for row in table_data:
table.add_row(row)
return total_time, table
def _get_exec_table_data(headers):
"""Extract a stats table from execution HTTP response headers.
Stats include things like node name, execution time, number of
reads/writes, bytes read/written, etc.
:param dict headers:
`dict` of response headers from a job execution request. It must
contain at least ``x-nexe-system``, ``x-nexe-status``,
``x-nexe-retcode``, ``x-nexe-cdr-line``.
:returns:
Tuple of two items. The first is the total time for the executed job
(as a `str`). The second is a table (2d `list`) of execution data
extracted from ``X-Nexe-System`` and ``X-Nexe-Cdr-Line`` headers.
Each row in the table consists of the following data:
* node name
* node time
* system time
* user time
* number of disk reads
* number of bytes read from disk
* number of disk writes
* number of bytes written to disk
* number of network reads
* number of bytes read from network
* number of network writes
* number of bytes written to network
"""
node_names = iter(headers['x-nexe-system'].split(','))
statuses = iter(headers['x-nexe-status'].split(','))
retcodes = iter(headers['x-nexe-retcode'].split(','))
cdr = headers['x-nexe-cdr-line']
cdr_data = [x.strip() for x in cdr.split(',')]
total_time = cdr_data.pop(0)
cdr_data = iter(cdr_data)
def adviter(x):
return six.advance_iterator(x)
table_data = []
while True:
try:
node_name = adviter(node_names)
status = adviter(statuses)
retcode = adviter(retcodes)
node_time = adviter(cdr_data)
cdr = adviter(cdr_data).split()
row = [node_name, status, retcode, node_time] + cdr
table_data.append(row)
except StopIteration:
break
return total_time, table_data
def execute(args, response_body_buffer=None):
"""Execute a zapp remotely on a ZeroCloud deployment.
:returns:
A `dict` with response data, including the keys 'status', 'reason', and
'headers'.
"""
conn = _get_zerocloud_conn(args)
resp = dict()
if args.container:
job_filename = SYSTEM_MAP_ZAPP_PATH
try:
headers, content = conn.get_object(args.container, job_filename)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise zpmlib.ZPMException("Could not find %s" % exc.http_path)
else:
raise zpmlib.ZPMException(str(exc))
job = json.loads(content)
conn.post_job(job, response_dict=resp,
response_body_buffer=response_body_buffer)
LOG.debug('RESP STATUS: %s %s', resp['status'], resp['reason'])
LOG.debug('RESP HEADERS: %s', resp['headers'])
else:
size = os.path.getsize(args.zapp)
zapp_file = open(args.zapp, 'rb')
data_reader = iter(lambda: zapp_file.read(BUFFER_SIZE), b'')
conn.post_zapp(data_reader, response_dict=resp, content_length=size,
response_body_buffer=response_body_buffer)
zapp_file.close()
return resp
def auth(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
print('Auth token: %s' % conn.token)
print('Storage URL: %s' % conn.url)
| _generate_uploads | identifier_name |
zpm.py | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import glob
import gzip
import json
import os
import shlex
import sys
import tarfile
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import jinja2
import prettytable
import six
import swiftclient
import yaml
import zpmlib
from zpmlib import util
from zpmlib import zappbundler
from zpmlib import zapptemplate
_DEFAULT_UI_TEMPLATES = ['index.html.tmpl', 'style.css', 'zerocloud.js']
_ZAPP_YAML = 'python-zapp.yaml'
_ZAPP_WITH_UI_YAML = 'python-zapp-with-ui.yaml'
LOG = zpmlib.get_logger(__name__)
BUFFER_SIZE = 65536
#: path/filename of the system.map (job description) in every zapp
SYSTEM_MAP_ZAPP_PATH = 'boot/system.map'
#: Message displayed if insufficient auth settings are specified, either on the
#: command line or in environment variables. Shamelessly copied from
#: ``python-swiftclient``.
NO_AUTH_MSG = """\
Auth version 1.0 requires ST_AUTH, ST_USER, and ST_KEY environment variables
to be set or overridden with -A, -U, or -K.
Auth version 2.0 requires OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, and
OS_TENANT_NAME OS_TENANT_ID to be set or overridden with --os-auth-url,
--os-username, --os-password, --os-tenant-name or os-tenant-id. Note:
adding "-V 2" is necessary for this."""
#: Column labels for the execution summary table
EXEC_TABLE_HEADER = [
'Node',
'Status',
'Retcode',
'NodeT',
'SysT',
'UserT',
'DiskReads',
'DiskBytesR',
'DiskWrites',
'DiskBytesW',
'NetworkReads',
'NetworkBytesR',
'NetworkWrites',
'NetworkBytesW',
]
def create_project(location, with_ui=False, template=None):
"""
Create a ZeroVM application project by writing a default `zapp.yaml` in the
specified directory `location`.
:param location:
Directory location to place project files.
:param with_ui:
Defaults to `False`. If `True`, add basic UI template files as well to
``location``.
:param template:
Default: ``None``. If no template is specified, use the default project
template. (See `zpmlib.zapptemplate`.)
:returns: List of created project files.
"""
if os.path.exists(location):
if not os.path.isdir(location):
# target must be an empty directory
raise RuntimeError("Target `location` must be a directory")
else:
os.makedirs(location)
# Run the template builder, and create additional files for the project by
# the type. If ``template`` is none, this is essientially a NOP.
# TODO: just use the afc._created_files
created_files = []
with util.AtomicFileCreator() as afc:
for file_type, path, contents in zapptemplate.template(
location, template, with_ui=with_ui):
afc.create_file(file_type, path, contents)
created_files.append(path)
return created_files
def find_project_root():
"""
Starting from the `cwd`, search up the file system hierarchy until a
``zapp.yaml`` file is found. Once the file is found, return the directory
containing it. If no file is found, raise a `RuntimeError`.
"""
root = os.getcwd()
while not os.path.isfile(os.path.join(root, 'zapp.yaml')):
oldroot, root = root, os.path.dirname(root)
if root == oldroot:
raise RuntimeError("no zapp.yaml file found")
return root
def _generate_job_desc(zapp):
"""
Generate the boot/system.map file contents from the zapp config file.
:param zapp:
`dict` of the contents of a ``zapp.yaml`` file.
:returns:
`dict` of the job description
"""
job = []
# TODO(mg): we should eventually reuse zvsh._nvram_escape
def escape(value):
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def translate_args(cmdline):
# On Python 2, the yaml module loads non-ASCII strings as
# unicode objects. In Python 2.7.2 and earlier, we must give
# shlex.split a str -- but it is an error to give shlex.split
# a bytes object in Python 3.
need_decode = not isinstance(cmdline, str)
if need_decode:
cmdline = cmdline.encode('utf8')
args = shlex.split(cmdline)
if need_decode:
args = [arg.decode('utf8') for arg in args]
return ' '.join(escape(arg) for arg in args)
for zgroup in zapp['execution']['groups']:
# Copy everything, but handle 'env', 'path', and 'args' specially:
jgroup = dict(zgroup)
path = zgroup['path']
# if path is `file://image:exe`, exec->name is "exe"
# if path is `swift://~/container/obj`, exec->name is "obj"
exec_name = None
if path.startswith('file://'):
exec_name = path.split(':')[-1]
elif path.startswith('swift://'):
# If obj is a pseudo path, like foo/bar/obj, we need to
# handle this as well with a careful split.
# If the object path is something like `swift://~/container/obj`,
# then exec_name will be `obj`.
# If the object path is something like
# `swift://./container/foo/bar/obj`, then the exec_name will be
# `foo/bar/obj`.
exec_name = path.split('/', 4)[-1]
jgroup['exec'] = {
'path': zgroup['path'],
'args': translate_args(zgroup['args']),
}
if exec_name is not None:
jgroup['exec']['name'] = exec_name
del jgroup['path'], jgroup['args']
if 'env' in zgroup:
jgroup['exec']['env'] = zgroup['env']
del jgroup['env']
job.append(jgroup)
return job
def _get_swift_zapp_url(swift_service_url, zapp_path):
|
def _prepare_job(tar, zapp, zapp_swift_url):
"""
:param tar:
The application .zapp file, as a :class:`tarfile.TarFile` object.
:param dict zapp:
Parsed contents of the application `zapp.yaml` specification, as a
`dict`.
:param str zapp_swift_url:
Path of the .zapp in Swift, which looks like this::
'swift://AUTH_abcdef123/test_container/hello.zapp'
See :func:`_get_swift_zapp_url`.
:returns:
Extracted contents of the boot/system.map with the swift
path to the .zapp added to the `devices` for each `group`.
So if the job looks like this::
[{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'hello'}]
the output will look like something like this::
[{'exec': {u'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'image',
'path': 'swift://AUTH_abcdef123/test_container/hello.zapp'},
],
'name': 'hello'}]
"""
fp = tar.extractfile(SYSTEM_MAP_ZAPP_PATH)
# NOTE(larsbutler): the `decode` is needed for python3
# compatibility
job = json.loads(fp.read().decode('utf-8'))
device = {'name': 'image', 'path': zapp_swift_url}
for group in job:
group['devices'].append(device)
return job
def bundle_project(root, refresh_deps=False):
"""
Bundle the project under root.
"""
zapp_yaml = os.path.join(root, 'zapp.yaml')
zapp = yaml.safe_load(open(zapp_yaml))
zapp_name = zapp['meta']['name'] + '.zapp'
zapp_tar_path = os.path.join(root, zapp_name)
tar = tarfile.open(zapp_tar_path, 'w:gz')
job = _generate_job_desc(zapp)
job_json = json.dumps(job)
info = tarfile.TarInfo(name='boot/system.map')
# This size is only correct because json.dumps uses
# ensure_ascii=True by default and we thus have a 1-1
# correspondence between Unicode characters and bytes.
info.size = len(job_json)
LOG.info('adding %s' % info.name)
# In Python 3, we cannot use a str or bytes object with addfile,
# we need a BytesIO object. In Python 2, BytesIO is just StringIO.
# Since json.dumps produces an ASCII-only Unicode string in Python
# 3, it is safe to encode it to ASCII.
tar.addfile(info, BytesIO(job_json.encode('ascii')))
_add_file_to_tar(root, 'zapp.yaml', tar)
sections = ('bundling', 'ui')
# Keep track of the files we add, given the configuration in the zapp.yaml.
file_add_count = 0
for section in sections:
for pattern in zapp.get(section, []):
paths = glob.glob(os.path.join(root, pattern))
if len(paths) == 0:
LOG.warning(
"pattern '%(pat)s' in section '%(sec)s' matched no files",
dict(pat=pattern, sec=section)
)
else:
for path in paths:
_add_file_to_tar(root, path, tar)
file_add_count += len(paths)
if file_add_count == 0:
# None of the files specified in the "bundling" or "ui" sections were
# found. Something is wrong.
raise zpmlib.ZPMException(
"None of the files specified in the 'bundling' or 'ui' sections of"
" the zapp.yaml matched anything."
)
# Do template-specific bundling
zappbundler.bundle(root, zapp, tar, refresh_deps=refresh_deps)
tar.close()
print('created %s' % zapp_name)
def _add_file_to_tar(root, path, tar, arcname=None):
"""
:param root:
Root working directory.
:param path:
File path.
:param tar:
Open :class:`tarfile.TarFile` object to add the ``files`` to.
"""
# TODO(larsbutler): document ``arcname``
LOG.info('adding %s' % path)
path = os.path.join(root, path)
relpath = os.path.relpath(path, root)
if arcname is None:
# In the archive, give the file the same name and path.
arcname = relpath
tar.add(path, arcname=arcname)
def _find_ui_uploads(zapp, tar):
matches = set()
names = tar.getnames()
for pattern in zapp.get('ui', []):
matches.update(fnmatch.filter(names, pattern))
return sorted(matches)
def _post_job(url, token, data, http_conn=None, response_dict=None,
content_type='application/json', content_length=None,
response_body_buffer=None):
# Modelled after swiftclient.client.post_account.
headers = {'X-Auth-Token': token,
'X-Zerovm-Execute': '1.0',
'Content-Type': content_type}
if content_length:
headers['Content-Length'] = str(content_length)
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = swiftclient.http_connection(url)
conn.request('POST', parsed.path, data, headers)
resp = conn.getresponse()
body = resp.read()
swiftclient.http_log((url, 'POST'), {'headers': headers}, resp, body)
swiftclient.store_response(resp, response_dict)
if response_body_buffer is not None:
response_body_buffer.write(body)
class ZeroCloudConnection(swiftclient.Connection):
"""
An extension of the `swiftclient.Connection` which has the capability of
posting ZeroVM jobs to an instance of ZeroCloud (running on Swift).
"""
def authenticate(self):
"""
Authenticate with the provided credentials and cache the storage URL
and auth token as `self.url` and `self.token`, respectively.
"""
self.url, self.token = self.get_auth()
def post_job(self, job, response_dict=None, response_body_buffer=None):
"""Start a ZeroVM job, using a pre-uploaded zapp
:param object job:
Job description. This will be encoded as JSON and sent to
ZeroCloud.
"""
json_data = json.dumps(job)
LOG.debug('JOB: %s' % json_data)
return self._retry(None, _post_job, json_data,
response_dict=response_dict,
response_body_buffer=response_body_buffer)
def post_zapp(self, data, response_dict=None, content_length=None,
response_body_buffer=None):
return self._retry(None, _post_job, data,
response_dict=response_dict,
content_type='application/x-gzip',
content_length=content_length,
response_body_buffer=response_body_buffer)
def _get_zerocloud_conn(args):
version = args.auth_version
# no version was explicitly requested; try to guess it:
if version is None:
version = _guess_auth_version(args)
if version == '1.0':
if any([arg is None for arg in (args.auth, args.user, args.key)]):
raise zpmlib.ZPMException(
"Version 1 auth requires `--auth`, `--user`, and `--key`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.auth, args.user, args.key)
elif version == '2.0':
if any([arg is None for arg in
(args.os_auth_url, args.os_username, args.os_tenant_name,
args.os_password)]):
raise zpmlib.ZPMException(
"Version 2 auth requires `--os-auth-url`, `--os-username`, "
"`--os-password`, and `--os-tenant-name`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.os_auth_url, args.os_username,
args.os_password,
tenant_name=args.os_tenant_name,
auth_version='2.0')
else:
raise zpmlib.ZPMException(NO_AUTH_MSG)
return conn
def _deploy_zapp(conn, target, zapp_path, auth_opts, force=False):
"""Upload all of the necessary files for a zapp.
Returns the name an uploaded index file, or the target if no
index.html file was uploaded.
:param bool force:
Force deployment, even if the target container is not empty. This means
that files could be overwritten and could cause consistency problems
with these objects in Swift.
"""
base_container = target.split('/')[0]
try:
_, objects = conn.get_container(base_container)
if not len(objects) == 0:
if not force:
raise zpmlib.ZPMException(
"Target container ('%s') is not empty.\nDeploying to a "
"non-empty container can cause consistency problems with "
"overwritten objects.\nSpecify the flag `--force/-f` to "
"overwrite anyway."
% base_container
)
except swiftclient.exceptions.ClientException:
# container doesn't exist; create it
LOG.info("Container '%s' not found. Creating it...", base_container)
conn.put_container(base_container)
# If we get here, everything with the container is fine.
index = target + '/'
uploads = _generate_uploads(conn, target, zapp_path, auth_opts)
for path, data, content_type in uploads:
if path.endswith('/index.html'):
index = path
container, obj = path.split('/', 1)
conn.put_object(container, obj, data, content_type=content_type)
return index
def _generate_uploads(conn, target, zapp_path, auth_opts):
"""Generate sequence of (container-and-file-path, data, content-type)
tuples.
"""
tar = tarfile.open(zapp_path, 'r:gz')
zapp_config = yaml.safe_load(tar.extractfile('zapp.yaml'))
remote_zapp_path = '%s/%s' % (target, os.path.basename(zapp_path))
swift_url = _get_swift_zapp_url(conn.url, remote_zapp_path)
job = _prepare_job(tar, zapp_config, swift_url)
yield (remote_zapp_path, gzip.open(zapp_path).read(), 'application/x-tar')
yield ('%s/%s' % (target, SYSTEM_MAP_ZAPP_PATH), json.dumps(job),
'application/json')
for path in _find_ui_uploads(zapp_config, tar):
output = tar.extractfile(path).read()
if path.endswith('.tmpl'):
tmpl = jinja2.Template(output.decode('utf-8'))
output = tmpl.render(auth_opts=auth_opts, zapp=zapp_config)
# drop the .tmpl extension
path = os.path.splitext(path)[0]
ui_path = '%s/%s' % (target, path)
yield (ui_path, output, None)
def _prepare_auth(version, args, conn):
"""
:param str version:
Auth version: "0.0", "1.0", or "2.0". "0.0" indicates "no auth".
:param args:
:class:`argparse.Namespace` instance, with attributes representing the
various authentication parameters
:param conn:
:class:`ZeroCloudConnection` instance.
"""
version = str(float(version))
auth = {'version': version}
if version == '0.0':
auth['swiftUrl'] = conn.url
elif version == '1.0':
auth['authUrl'] = args.auth
auth['username'] = args.user
auth['password'] = args.key
else:
# TODO(mg): inserting the username and password in the
# uploaded file makes testing easy, but should not be done in
# production. See issue #46.
auth['authUrl'] = args.os_auth_url
auth['tenant'] = args.os_tenant_name
auth['username'] = args.os_username
auth['password'] = args.os_password
return auth
def _guess_auth_version(args):
"""Guess the auth version from first the command line args and/or envvars.
Command line arguments override environment variables, so we check those
first.
Auth v1 arguments:
* ``--auth``
* ``--user``
* ``--key``
Auth v2 arguments:
* ``--os-auth-url``
* ``--os-username``
* ``--os-password``
* ``--os-tenant-name``
If all of the v1 and v2 arguments are specified, default to 1.0 (this is
how ``python-swiftclient`` behaves).
If no auth version can be determined from the command line args, we check
environment variables.
Auth v1 vars:
* ``ST_AUTH``
* ``ST_USER``
* ``ST_KEY``
Auth v2 vars:
* ``OS_AUTH_URL``
* ``OS_USERNAME``
* ``OS_PASSWORD``
* ``OS_TENANT_NAME``
The same rule above applies; if both sets of variables are specified,
default to 1.0.
If no auth version can be determined, return `None`.
:param args:
:class:`argparse.Namespace`, representing the args specified on the
command line.
:returns: '1.0', '2.0', or ``None``
"""
v1 = (args.auth, args.user, args.key)
v2 = (args.os_auth_url, args.os_username, args.os_password,
args.os_tenant_name)
if all(v1) and not all(v2):
return '1.0'
elif all(v2) and not all(v1):
return '2.0'
elif all(v1) and all(v2):
# All vars for v1 and v2 auth are set, so we follow the
# `python-swiftclient` behavior and default to 1.0.
return '1.0'
else:
# deduce from envvars
env = os.environ
v1_env = (env.get('ST_AUTH'), env.get('ST_USER'), env.get('ST_KEY'))
v2_env = (env.get('OS_AUTH_URL'), env.get('OS_USERNAME'),
env.get('OS_PASSWORD'), env.get('OS_TENANT_NAME'))
if all(v1_env) and not all(v2_env):
return '1.0'
if all(v2_env) and not all(v1_env):
return '2.0'
elif all(v1_env) and all(v2_env):
# Same as above, if all v1 and v2 vars are set, default to 1.0.
return '1.0'
else:
# Insufficient auth details have been specified.
return None
def deploy_project(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
ui_auth_version = conn.auth_version
# We can now reset the auth for the web UI, if needed
if args.no_ui_auth:
ui_auth_version = '0.0'
auth = _prepare_auth(ui_auth_version, args, conn)
auth_opts = jinja2.Markup(json.dumps(auth))
deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
force=args.force)
print('app deployed to\n %s/%s' % (conn.url, deploy_index))
if args.execute:
# for compatibility with the option name in 'zpm execute'
args.container = args.target
resp_body_buffer = BytesIO()
resp = execute(args, response_body_buffer=resp_body_buffer)
resp_body_buffer.seek(0)
if resp['status'] < 200 or resp['status'] >= 300:
raise zpmlib.ZPMException(resp_body_buffer.read())
if args.summary:
total_time, exec_table = _get_exec_table(resp)
print('Execution summary:')
print(exec_table)
print('Total time: %s' % total_time)
sys.stdout.write(resp_body_buffer.read())
def _get_exec_table(resp):
"""Build an execution summary table from a job execution response.
:param dict resp:
Response dictionary from job execution. Must contain a ``headers`` key
at least (and will typically contain ``status`` and ``reason`` as
well).
:returns:
Tuple of total execution time (`str`),
``prettytable.PrettyTable`` containing the summary of all node
executions in the job.
"""
headers = resp['headers']
total_time, table_data = _get_exec_table_data(headers)
table = prettytable.PrettyTable(EXEC_TABLE_HEADER)
for row in table_data:
table.add_row(row)
return total_time, table
def _get_exec_table_data(headers):
"""Extract a stats table from execution HTTP response headers.
Stats include things like node name, execution time, number of
reads/writes, bytes read/written, etc.
:param dict headers:
`dict` of response headers from a job execution request. It must
contain at least ``x-nexe-system``, ``x-nexe-status``,
``x-nexe-retcode``, ``x-nexe-cdr-line``.
:returns:
Tuple of two items. The first is the total time for the executed job
(as a `str`). The second is a table (2d `list`) of execution data
extracted from ``X-Nexe-System`` and ``X-Nexe-Cdr-Line`` headers.
Each row in the table consists of the following data:
* node name
* node time
* system time
* user time
* number of disk reads
* number of bytes read from disk
* number of disk writes
* number of bytes written to disk
* number of network reads
* number of bytes read from network
* number of network writes
* number of bytes written to network
"""
node_names = iter(headers['x-nexe-system'].split(','))
statuses = iter(headers['x-nexe-status'].split(','))
retcodes = iter(headers['x-nexe-retcode'].split(','))
cdr = headers['x-nexe-cdr-line']
cdr_data = [x.strip() for x in cdr.split(',')]
total_time = cdr_data.pop(0)
cdr_data = iter(cdr_data)
def adviter(x):
return six.advance_iterator(x)
table_data = []
while True:
try:
node_name = adviter(node_names)
status = adviter(statuses)
retcode = adviter(retcodes)
node_time = adviter(cdr_data)
cdr = adviter(cdr_data).split()
row = [node_name, status, retcode, node_time] + cdr
table_data.append(row)
except StopIteration:
break
return total_time, table_data
def execute(args, response_body_buffer=None):
"""Execute a zapp remotely on a ZeroCloud deployment.
:returns:
A `dict` with response data, including the keys 'status', 'reason', and
'headers'.
"""
conn = _get_zerocloud_conn(args)
resp = dict()
if args.container:
job_filename = SYSTEM_MAP_ZAPP_PATH
try:
headers, content = conn.get_object(args.container, job_filename)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise zpmlib.ZPMException("Could not find %s" % exc.http_path)
else:
raise zpmlib.ZPMException(str(exc))
job = json.loads(content)
conn.post_job(job, response_dict=resp,
response_body_buffer=response_body_buffer)
LOG.debug('RESP STATUS: %s %s', resp['status'], resp['reason'])
LOG.debug('RESP HEADERS: %s', resp['headers'])
else:
size = os.path.getsize(args.zapp)
zapp_file = open(args.zapp, 'rb')
data_reader = iter(lambda: zapp_file.read(BUFFER_SIZE), b'')
conn.post_zapp(data_reader, response_dict=resp, content_length=size,
response_body_buffer=response_body_buffer)
zapp_file.close()
return resp
def auth(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
print('Auth token: %s' % conn.token)
print('Storage URL: %s' % conn.url)
| """
:param str swift_service_url:
The Swift service URL returned from a Keystone service catalog.
Example: http://localhost:8080/v1/AUTH_469a9cd20b5a4fc5be9438f66bb5ee04
:param str zapp_path:
<container>/<zapp-file-name>. Example:
test_container/myapp.zapp
Here's a typical usage example, with typical input and output:
>>> swift_service_url = ('http://localhost:8080/v1/'
... 'AUTH_469a9cd20b5a4fc5be9438f66bb5ee04')
>>> zapp_path = 'test_container/myapp.zapp'
>>> _get_swift_zapp_url(swift_service_url, zapp_path)
'swift://AUTH_469a9cd20b5a4fc5be9438f66bb5ee04/test_container/myapp.zapp'
"""
swift_path = urlparse.urlparse(swift_service_url).path
# TODO(larsbutler): Why do we need to check if the path contains '/v1/'?
# This is here due to legacy reasons, but it's not clear to me why this is
# needed.
if swift_path.startswith('/v1/'):
swift_path = swift_path[4:]
return 'swift://%s/%s' % (swift_path, zapp_path) | identifier_body |
zpm.py | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import glob
import gzip
import json
import os
import shlex
import sys
import tarfile
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import jinja2
import prettytable
import six
import swiftclient
import yaml
import zpmlib
from zpmlib import util
from zpmlib import zappbundler
from zpmlib import zapptemplate
_DEFAULT_UI_TEMPLATES = ['index.html.tmpl', 'style.css', 'zerocloud.js']
_ZAPP_YAML = 'python-zapp.yaml'
_ZAPP_WITH_UI_YAML = 'python-zapp-with-ui.yaml'
LOG = zpmlib.get_logger(__name__)
BUFFER_SIZE = 65536
#: path/filename of the system.map (job description) in every zapp
SYSTEM_MAP_ZAPP_PATH = 'boot/system.map'
#: Message displayed if insufficient auth settings are specified, either on the
#: command line or in environment variables. Shamelessly copied from
#: ``python-swiftclient``.
NO_AUTH_MSG = """\
Auth version 1.0 requires ST_AUTH, ST_USER, and ST_KEY environment variables
to be set or overridden with -A, -U, or -K.
Auth version 2.0 requires OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, and
OS_TENANT_NAME OS_TENANT_ID to be set or overridden with --os-auth-url,
--os-username, --os-password, --os-tenant-name or os-tenant-id. Note:
adding "-V 2" is necessary for this."""
#: Column labels for the execution summary table
EXEC_TABLE_HEADER = [
'Node',
'Status',
'Retcode',
'NodeT',
'SysT',
'UserT',
'DiskReads',
'DiskBytesR',
'DiskWrites',
'DiskBytesW',
'NetworkReads',
'NetworkBytesR',
'NetworkWrites',
'NetworkBytesW',
]
def create_project(location, with_ui=False, template=None):
"""
Create a ZeroVM application project by writing a default `zapp.yaml` in the
specified directory `location`.
:param location:
Directory location to place project files.
:param with_ui:
Defaults to `False`. If `True`, add basic UI template files as well to
``location``.
:param template:
Default: ``None``. If no template is specified, use the default project
template. (See `zpmlib.zapptemplate`.)
:returns: List of created project files.
"""
if os.path.exists(location):
if not os.path.isdir(location):
# target must be an empty directory
raise RuntimeError("Target `location` must be a directory")
else:
os.makedirs(location)
# Run the template builder, and create additional files for the project by
# the type. If ``template`` is none, this is essientially a NOP.
# TODO: just use the afc._created_files
created_files = []
with util.AtomicFileCreator() as afc:
for file_type, path, contents in zapptemplate.template(
location, template, with_ui=with_ui):
afc.create_file(file_type, path, contents)
created_files.append(path)
return created_files
def find_project_root():
"""
Starting from the `cwd`, search up the file system hierarchy until a
``zapp.yaml`` file is found. Once the file is found, return the directory
containing it. If no file is found, raise a `RuntimeError`.
"""
root = os.getcwd()
while not os.path.isfile(os.path.join(root, 'zapp.yaml')):
oldroot, root = root, os.path.dirname(root)
if root == oldroot:
raise RuntimeError("no zapp.yaml file found")
return root
def _generate_job_desc(zapp):
"""
Generate the boot/system.map file contents from the zapp config file.
:param zapp:
`dict` of the contents of a ``zapp.yaml`` file.
:returns:
`dict` of the job description
"""
job = []
# TODO(mg): we should eventually reuse zvsh._nvram_escape
def escape(value):
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def translate_args(cmdline):
# On Python 2, the yaml module loads non-ASCII strings as
# unicode objects. In Python 2.7.2 and earlier, we must give
# shlex.split a str -- but it is an error to give shlex.split
# a bytes object in Python 3.
need_decode = not isinstance(cmdline, str)
if need_decode:
cmdline = cmdline.encode('utf8')
args = shlex.split(cmdline)
if need_decode:
args = [arg.decode('utf8') for arg in args]
return ' '.join(escape(arg) for arg in args)
for zgroup in zapp['execution']['groups']:
# Copy everything, but handle 'env', 'path', and 'args' specially:
jgroup = dict(zgroup)
path = zgroup['path']
# if path is `file://image:exe`, exec->name is "exe"
# if path is `swift://~/container/obj`, exec->name is "obj"
exec_name = None
if path.startswith('file://'):
exec_name = path.split(':')[-1]
elif path.startswith('swift://'):
# If obj is a pseudo path, like foo/bar/obj, we need to
# handle this as well with a careful split.
# If the object path is something like `swift://~/container/obj`,
# then exec_name will be `obj`.
# If the object path is something like
# `swift://./container/foo/bar/obj`, then the exec_name will be
# `foo/bar/obj`.
exec_name = path.split('/', 4)[-1]
jgroup['exec'] = {
'path': zgroup['path'],
'args': translate_args(zgroup['args']),
}
if exec_name is not None:
jgroup['exec']['name'] = exec_name
del jgroup['path'], jgroup['args']
if 'env' in zgroup:
jgroup['exec']['env'] = zgroup['env']
del jgroup['env']
job.append(jgroup)
return job
def _get_swift_zapp_url(swift_service_url, zapp_path):
"""
:param str swift_service_url:
The Swift service URL returned from a Keystone service catalog.
Example: http://localhost:8080/v1/AUTH_469a9cd20b5a4fc5be9438f66bb5ee04
:param str zapp_path:
<container>/<zapp-file-name>. Example:
test_container/myapp.zapp
Here's a typical usage example, with typical input and output:
>>> swift_service_url = ('http://localhost:8080/v1/'
... 'AUTH_469a9cd20b5a4fc5be9438f66bb5ee04')
>>> zapp_path = 'test_container/myapp.zapp'
>>> _get_swift_zapp_url(swift_service_url, zapp_path)
'swift://AUTH_469a9cd20b5a4fc5be9438f66bb5ee04/test_container/myapp.zapp'
"""
swift_path = urlparse.urlparse(swift_service_url).path
# TODO(larsbutler): Why do we need to check if the path contains '/v1/'?
# This is here due to legacy reasons, but it's not clear to me why this is
# needed.
if swift_path.startswith('/v1/'):
swift_path = swift_path[4:]
return 'swift://%s/%s' % (swift_path, zapp_path)
def _prepare_job(tar, zapp, zapp_swift_url):
"""
:param tar:
The application .zapp file, as a :class:`tarfile.TarFile` object.
:param dict zapp:
Parsed contents of the application `zapp.yaml` specification, as a
`dict`.
:param str zapp_swift_url:
Path of the .zapp in Swift, which looks like this::
'swift://AUTH_abcdef123/test_container/hello.zapp'
See :func:`_get_swift_zapp_url`.
:returns:
Extracted contents of the boot/system.map with the swift
path to the .zapp added to the `devices` for each `group`.
So if the job looks like this::
[{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'hello'}]
the output will look like something like this::
[{'exec': {u'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'image',
'path': 'swift://AUTH_abcdef123/test_container/hello.zapp'},
],
'name': 'hello'}]
"""
fp = tar.extractfile(SYSTEM_MAP_ZAPP_PATH)
# NOTE(larsbutler): the `decode` is needed for python3
# compatibility
job = json.loads(fp.read().decode('utf-8'))
device = {'name': 'image', 'path': zapp_swift_url}
for group in job:
group['devices'].append(device)
return job
def bundle_project(root, refresh_deps=False):
"""
Bundle the project under root.
"""
zapp_yaml = os.path.join(root, 'zapp.yaml')
zapp = yaml.safe_load(open(zapp_yaml))
zapp_name = zapp['meta']['name'] + '.zapp'
zapp_tar_path = os.path.join(root, zapp_name)
tar = tarfile.open(zapp_tar_path, 'w:gz')
job = _generate_job_desc(zapp)
job_json = json.dumps(job)
info = tarfile.TarInfo(name='boot/system.map')
# This size is only correct because json.dumps uses
# ensure_ascii=True by default and we thus have a 1-1
# correspondence between Unicode characters and bytes.
info.size = len(job_json)
LOG.info('adding %s' % info.name)
# In Python 3, we cannot use a str or bytes object with addfile,
# we need a BytesIO object. In Python 2, BytesIO is just StringIO.
# Since json.dumps produces an ASCII-only Unicode string in Python
# 3, it is safe to encode it to ASCII.
tar.addfile(info, BytesIO(job_json.encode('ascii')))
_add_file_to_tar(root, 'zapp.yaml', tar)
sections = ('bundling', 'ui')
# Keep track of the files we add, given the configuration in the zapp.yaml.
file_add_count = 0
for section in sections:
for pattern in zapp.get(section, []):
paths = glob.glob(os.path.join(root, pattern))
if len(paths) == 0:
LOG.warning(
"pattern '%(pat)s' in section '%(sec)s' matched no files",
dict(pat=pattern, sec=section)
)
else:
for path in paths:
_add_file_to_tar(root, path, tar)
file_add_count += len(paths)
if file_add_count == 0:
# None of the files specified in the "bundling" or "ui" sections were
# found. Something is wrong.
raise zpmlib.ZPMException(
"None of the files specified in the 'bundling' or 'ui' sections of"
" the zapp.yaml matched anything."
)
# Do template-specific bundling
zappbundler.bundle(root, zapp, tar, refresh_deps=refresh_deps)
tar.close()
print('created %s' % zapp_name)
def _add_file_to_tar(root, path, tar, arcname=None):
"""
:param root:
Root working directory.
:param path:
File path.
:param tar:
Open :class:`tarfile.TarFile` object to add the ``files`` to.
"""
# TODO(larsbutler): document ``arcname``
LOG.info('adding %s' % path)
path = os.path.join(root, path)
relpath = os.path.relpath(path, root)
if arcname is None:
# In the archive, give the file the same name and path.
arcname = relpath
tar.add(path, arcname=arcname)
def _find_ui_uploads(zapp, tar):
matches = set()
names = tar.getnames()
for pattern in zapp.get('ui', []):
matches.update(fnmatch.filter(names, pattern))
return sorted(matches)
def _post_job(url, token, data, http_conn=None, response_dict=None,
content_type='application/json', content_length=None,
response_body_buffer=None):
# Modelled after swiftclient.client.post_account.
headers = {'X-Auth-Token': token,
'X-Zerovm-Execute': '1.0',
'Content-Type': content_type}
if content_length:
headers['Content-Length'] = str(content_length)
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = swiftclient.http_connection(url)
conn.request('POST', parsed.path, data, headers)
resp = conn.getresponse()
body = resp.read()
swiftclient.http_log((url, 'POST'), {'headers': headers}, resp, body)
swiftclient.store_response(resp, response_dict)
if response_body_buffer is not None:
response_body_buffer.write(body)
class ZeroCloudConnection(swiftclient.Connection):
"""
An extension of the `swiftclient.Connection` which has the capability of
posting ZeroVM jobs to an instance of ZeroCloud (running on Swift).
"""
def authenticate(self):
"""
Authenticate with the provided credentials and cache the storage URL
and auth token as `self.url` and `self.token`, respectively.
"""
self.url, self.token = self.get_auth()
def post_job(self, job, response_dict=None, response_body_buffer=None):
"""Start a ZeroVM job, using a pre-uploaded zapp
:param object job:
Job description. This will be encoded as JSON and sent to
ZeroCloud.
"""
json_data = json.dumps(job)
LOG.debug('JOB: %s' % json_data)
return self._retry(None, _post_job, json_data,
response_dict=response_dict,
response_body_buffer=response_body_buffer)
def post_zapp(self, data, response_dict=None, content_length=None,
response_body_buffer=None):
return self._retry(None, _post_job, data,
response_dict=response_dict,
content_type='application/x-gzip',
content_length=content_length,
response_body_buffer=response_body_buffer)
def _get_zerocloud_conn(args):
version = args.auth_version
# no version was explicitly requested; try to guess it:
if version is None:
version = _guess_auth_version(args)
if version == '1.0':
if any([arg is None for arg in (args.auth, args.user, args.key)]):
raise zpmlib.ZPMException(
"Version 1 auth requires `--auth`, `--user`, and `--key`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.auth, args.user, args.key)
elif version == '2.0':
if any([arg is None for arg in
(args.os_auth_url, args.os_username, args.os_tenant_name,
args.os_password)]):
raise zpmlib.ZPMException(
"Version 2 auth requires `--os-auth-url`, `--os-username`, "
"`--os-password`, and `--os-tenant-name`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.os_auth_url, args.os_username,
args.os_password,
tenant_name=args.os_tenant_name,
auth_version='2.0')
else:
raise zpmlib.ZPMException(NO_AUTH_MSG)
return conn
def _deploy_zapp(conn, target, zapp_path, auth_opts, force=False):
"""Upload all of the necessary files for a zapp.
Returns the name an uploaded index file, or the target if no
index.html file was uploaded.
:param bool force:
Force deployment, even if the target container is not empty. This means
that files could be overwritten and could cause consistency problems
with these objects in Swift.
"""
base_container = target.split('/')[0]
try:
_, objects = conn.get_container(base_container)
if not len(objects) == 0:
|
except swiftclient.exceptions.ClientException:
# container doesn't exist; create it
LOG.info("Container '%s' not found. Creating it...", base_container)
conn.put_container(base_container)
# If we get here, everything with the container is fine.
index = target + '/'
uploads = _generate_uploads(conn, target, zapp_path, auth_opts)
for path, data, content_type in uploads:
if path.endswith('/index.html'):
index = path
container, obj = path.split('/', 1)
conn.put_object(container, obj, data, content_type=content_type)
return index
def _generate_uploads(conn, target, zapp_path, auth_opts):
"""Generate sequence of (container-and-file-path, data, content-type)
tuples.
"""
tar = tarfile.open(zapp_path, 'r:gz')
zapp_config = yaml.safe_load(tar.extractfile('zapp.yaml'))
remote_zapp_path = '%s/%s' % (target, os.path.basename(zapp_path))
swift_url = _get_swift_zapp_url(conn.url, remote_zapp_path)
job = _prepare_job(tar, zapp_config, swift_url)
yield (remote_zapp_path, gzip.open(zapp_path).read(), 'application/x-tar')
yield ('%s/%s' % (target, SYSTEM_MAP_ZAPP_PATH), json.dumps(job),
'application/json')
for path in _find_ui_uploads(zapp_config, tar):
output = tar.extractfile(path).read()
if path.endswith('.tmpl'):
tmpl = jinja2.Template(output.decode('utf-8'))
output = tmpl.render(auth_opts=auth_opts, zapp=zapp_config)
# drop the .tmpl extension
path = os.path.splitext(path)[0]
ui_path = '%s/%s' % (target, path)
yield (ui_path, output, None)
def _prepare_auth(version, args, conn):
"""
:param str version:
Auth version: "0.0", "1.0", or "2.0". "0.0" indicates "no auth".
:param args:
:class:`argparse.Namespace` instance, with attributes representing the
various authentication parameters
:param conn:
:class:`ZeroCloudConnection` instance.
"""
version = str(float(version))
auth = {'version': version}
if version == '0.0':
auth['swiftUrl'] = conn.url
elif version == '1.0':
auth['authUrl'] = args.auth
auth['username'] = args.user
auth['password'] = args.key
else:
# TODO(mg): inserting the username and password in the
# uploaded file makes testing easy, but should not be done in
# production. See issue #46.
auth['authUrl'] = args.os_auth_url
auth['tenant'] = args.os_tenant_name
auth['username'] = args.os_username
auth['password'] = args.os_password
return auth
def _guess_auth_version(args):
"""Guess the auth version from first the command line args and/or envvars.
Command line arguments override environment variables, so we check those
first.
Auth v1 arguments:
* ``--auth``
* ``--user``
* ``--key``
Auth v2 arguments:
* ``--os-auth-url``
* ``--os-username``
* ``--os-password``
* ``--os-tenant-name``
If all of the v1 and v2 arguments are specified, default to 1.0 (this is
how ``python-swiftclient`` behaves).
If no auth version can be determined from the command line args, we check
environment variables.
Auth v1 vars:
* ``ST_AUTH``
* ``ST_USER``
* ``ST_KEY``
Auth v2 vars:
* ``OS_AUTH_URL``
* ``OS_USERNAME``
* ``OS_PASSWORD``
* ``OS_TENANT_NAME``
The same rule above applies; if both sets of variables are specified,
default to 1.0.
If no auth version can be determined, return `None`.
:param args:
:class:`argparse.Namespace`, representing the args specified on the
command line.
:returns: '1.0', '2.0', or ``None``
"""
v1 = (args.auth, args.user, args.key)
v2 = (args.os_auth_url, args.os_username, args.os_password,
args.os_tenant_name)
if all(v1) and not all(v2):
return '1.0'
elif all(v2) and not all(v1):
return '2.0'
elif all(v1) and all(v2):
# All vars for v1 and v2 auth are set, so we follow the
# `python-swiftclient` behavior and default to 1.0.
return '1.0'
else:
# deduce from envvars
env = os.environ
v1_env = (env.get('ST_AUTH'), env.get('ST_USER'), env.get('ST_KEY'))
v2_env = (env.get('OS_AUTH_URL'), env.get('OS_USERNAME'),
env.get('OS_PASSWORD'), env.get('OS_TENANT_NAME'))
if all(v1_env) and not all(v2_env):
return '1.0'
if all(v2_env) and not all(v1_env):
return '2.0'
elif all(v1_env) and all(v2_env):
# Same as above, if all v1 and v2 vars are set, default to 1.0.
return '1.0'
else:
# Insufficient auth details have been specified.
return None
def deploy_project(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
ui_auth_version = conn.auth_version
# We can now reset the auth for the web UI, if needed
if args.no_ui_auth:
ui_auth_version = '0.0'
auth = _prepare_auth(ui_auth_version, args, conn)
auth_opts = jinja2.Markup(json.dumps(auth))
deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
force=args.force)
print('app deployed to\n %s/%s' % (conn.url, deploy_index))
if args.execute:
# for compatibility with the option name in 'zpm execute'
args.container = args.target
resp_body_buffer = BytesIO()
resp = execute(args, response_body_buffer=resp_body_buffer)
resp_body_buffer.seek(0)
if resp['status'] < 200 or resp['status'] >= 300:
raise zpmlib.ZPMException(resp_body_buffer.read())
if args.summary:
total_time, exec_table = _get_exec_table(resp)
print('Execution summary:')
print(exec_table)
print('Total time: %s' % total_time)
sys.stdout.write(resp_body_buffer.read())
def _get_exec_table(resp):
"""Build an execution summary table from a job execution response.
:param dict resp:
Response dictionary from job execution. Must contain a ``headers`` key
at least (and will typically contain ``status`` and ``reason`` as
well).
:returns:
Tuple of total execution time (`str`),
``prettytable.PrettyTable`` containing the summary of all node
executions in the job.
"""
headers = resp['headers']
total_time, table_data = _get_exec_table_data(headers)
table = prettytable.PrettyTable(EXEC_TABLE_HEADER)
for row in table_data:
table.add_row(row)
return total_time, table
def _get_exec_table_data(headers):
"""Extract a stats table from execution HTTP response headers.
Stats include things like node name, execution time, number of
reads/writes, bytes read/written, etc.
:param dict headers:
`dict` of response headers from a job execution request. It must
contain at least ``x-nexe-system``, ``x-nexe-status``,
``x-nexe-retcode``, ``x-nexe-cdr-line``.
:returns:
Tuple of two items. The first is the total time for the executed job
(as a `str`). The second is a table (2d `list`) of execution data
extracted from ``X-Nexe-System`` and ``X-Nexe-Cdr-Line`` headers.
Each row in the table consists of the following data:
* node name
* node time
* system time
* user time
* number of disk reads
* number of bytes read from disk
* number of disk writes
* number of bytes written to disk
* number of network reads
* number of bytes read from network
* number of network writes
* number of bytes written to network
"""
node_names = iter(headers['x-nexe-system'].split(','))
statuses = iter(headers['x-nexe-status'].split(','))
retcodes = iter(headers['x-nexe-retcode'].split(','))
cdr = headers['x-nexe-cdr-line']
cdr_data = [x.strip() for x in cdr.split(',')]
total_time = cdr_data.pop(0)
cdr_data = iter(cdr_data)
def adviter(x):
return six.advance_iterator(x)
table_data = []
while True:
try:
node_name = adviter(node_names)
status = adviter(statuses)
retcode = adviter(retcodes)
node_time = adviter(cdr_data)
cdr = adviter(cdr_data).split()
row = [node_name, status, retcode, node_time] + cdr
table_data.append(row)
except StopIteration:
break
return total_time, table_data
def execute(args, response_body_buffer=None):
"""Execute a zapp remotely on a ZeroCloud deployment.
:returns:
A `dict` with response data, including the keys 'status', 'reason', and
'headers'.
"""
conn = _get_zerocloud_conn(args)
resp = dict()
if args.container:
job_filename = SYSTEM_MAP_ZAPP_PATH
try:
headers, content = conn.get_object(args.container, job_filename)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise zpmlib.ZPMException("Could not find %s" % exc.http_path)
else:
raise zpmlib.ZPMException(str(exc))
job = json.loads(content)
conn.post_job(job, response_dict=resp,
response_body_buffer=response_body_buffer)
LOG.debug('RESP STATUS: %s %s', resp['status'], resp['reason'])
LOG.debug('RESP HEADERS: %s', resp['headers'])
else:
size = os.path.getsize(args.zapp)
zapp_file = open(args.zapp, 'rb')
data_reader = iter(lambda: zapp_file.read(BUFFER_SIZE), b'')
conn.post_zapp(data_reader, response_dict=resp, content_length=size,
response_body_buffer=response_body_buffer)
zapp_file.close()
return resp
def auth(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
print('Auth token: %s' % conn.token)
print('Storage URL: %s' % conn.url)
| if not force:
raise zpmlib.ZPMException(
"Target container ('%s') is not empty.\nDeploying to a "
"non-empty container can cause consistency problems with "
"overwritten objects.\nSpecify the flag `--force/-f` to "
"overwrite anyway."
% base_container
) | conditional_block |
spinner.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use Buildable;
use Widget;
use ffi;
use glib;
use glib::StaticType;
use glib::Value;
use glib::object::Downcast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std::boxed::Box as Box_;
use std::mem;
use std::mem::transmute;
use std::ptr;
glib_wrapper! {
pub struct Spinner(Object<ffi::GtkSpinner, ffi::GtkSpinnerClass>): Widget, Buildable;
match fn {
get_type => || ffi::gtk_spinner_get_type(),
}
}
impl Spinner {
pub fn new() -> Spinner {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_spinner_new()).downcast_unchecked()
}
}
}
impl Default for Spinner {
fn default() -> Self {
Self::new()
}
}
pub trait SpinnerExt {
fn start(&self);
fn stop(&self);
fn get_property_active(&self) -> bool;
fn set_property_active(&self, active: bool);
fn connect_property_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; | fn start(&self) {
unsafe {
ffi::gtk_spinner_start(self.to_glib_none().0);
}
}
fn stop(&self) {
unsafe {
ffi::gtk_spinner_stop(self.to_glib_none().0);
}
}
fn get_property_active(&self) -> bool {
unsafe {
let mut value = Value::from_type(<bool as StaticType>::static_type());
gobject_ffi::g_object_get_property(self.to_glib_none().0, "active".to_glib_none().0, value.to_glib_none_mut().0);
value.get().unwrap()
}
}
fn set_property_active(&self, active: bool) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "active".to_glib_none().0, Value::from(&active).to_glib_none().0);
}
}
fn connect_property_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::active",
transmute(notify_active_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
}
unsafe extern "C" fn notify_active_trampoline<P>(this: *mut ffi::GtkSpinner, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<Spinner> {
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&Spinner::from_glib_borrow(this).downcast_unchecked())
} | }
impl<O: IsA<Spinner> + IsA<glib::object::Object>> SpinnerExt for O { | random_line_split |
spinner.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use Buildable;
use Widget;
use ffi;
use glib;
use glib::StaticType;
use glib::Value;
use glib::object::Downcast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std::boxed::Box as Box_;
use std::mem;
use std::mem::transmute;
use std::ptr;
glib_wrapper! {
pub struct Spinner(Object<ffi::GtkSpinner, ffi::GtkSpinnerClass>): Widget, Buildable;
match fn {
get_type => || ffi::gtk_spinner_get_type(),
}
}
impl Spinner {
pub fn new() -> Spinner {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_spinner_new()).downcast_unchecked()
}
}
}
impl Default for Spinner {
fn default() -> Self {
Self::new()
}
}
pub trait SpinnerExt {
fn start(&self);
fn stop(&self);
fn get_property_active(&self) -> bool;
fn set_property_active(&self, active: bool);
fn connect_property_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<Spinner> + IsA<glib::object::Object>> SpinnerExt for O {
fn | (&self) {
unsafe {
ffi::gtk_spinner_start(self.to_glib_none().0);
}
}
fn stop(&self) {
unsafe {
ffi::gtk_spinner_stop(self.to_glib_none().0);
}
}
fn get_property_active(&self) -> bool {
unsafe {
let mut value = Value::from_type(<bool as StaticType>::static_type());
gobject_ffi::g_object_get_property(self.to_glib_none().0, "active".to_glib_none().0, value.to_glib_none_mut().0);
value.get().unwrap()
}
}
fn set_property_active(&self, active: bool) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "active".to_glib_none().0, Value::from(&active).to_glib_none().0);
}
}
fn connect_property_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::active",
transmute(notify_active_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
}
unsafe extern "C" fn notify_active_trampoline<P>(this: *mut ffi::GtkSpinner, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<Spinner> {
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&Spinner::from_glib_borrow(this).downcast_unchecked())
}
| start | identifier_name |
spinner.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use Buildable;
use Widget;
use ffi;
use glib;
use glib::StaticType;
use glib::Value;
use glib::object::Downcast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std::boxed::Box as Box_;
use std::mem;
use std::mem::transmute;
use std::ptr;
glib_wrapper! {
pub struct Spinner(Object<ffi::GtkSpinner, ffi::GtkSpinnerClass>): Widget, Buildable;
match fn {
get_type => || ffi::gtk_spinner_get_type(),
}
}
impl Spinner {
pub fn new() -> Spinner {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_spinner_new()).downcast_unchecked()
}
}
}
impl Default for Spinner {
fn default() -> Self {
Self::new()
}
}
pub trait SpinnerExt {
fn start(&self);
fn stop(&self);
fn get_property_active(&self) -> bool;
fn set_property_active(&self, active: bool);
fn connect_property_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<Spinner> + IsA<glib::object::Object>> SpinnerExt for O {
fn start(&self) {
unsafe {
ffi::gtk_spinner_start(self.to_glib_none().0);
}
}
fn stop(&self) {
unsafe {
ffi::gtk_spinner_stop(self.to_glib_none().0);
}
}
fn get_property_active(&self) -> bool {
unsafe {
let mut value = Value::from_type(<bool as StaticType>::static_type());
gobject_ffi::g_object_get_property(self.to_glib_none().0, "active".to_glib_none().0, value.to_glib_none_mut().0);
value.get().unwrap()
}
}
fn set_property_active(&self, active: bool) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "active".to_glib_none().0, Value::from(&active).to_glib_none().0);
}
}
fn connect_property_active_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId |
}
unsafe extern "C" fn notify_active_trampoline<P>(this: *mut ffi::GtkSpinner, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<Spinner> {
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&Spinner::from_glib_borrow(this).downcast_unchecked())
}
| {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::active",
transmute(notify_active_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
} | identifier_body |
media_rule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An [`@media`][media] urle.
//!
//! [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
use cssparser::SourceLocation;
use media_queries::MediaList;
use shared_lock::{DeepCloneWithLock, Locked, SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt;
use style_traits::ToCss;
use stylearc::Arc;
use stylesheets::CssRules;
/// An [`@media`][media] urle.
///
/// [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
#[derive(Debug)]
pub struct MediaRule {
/// The list of media queries used by this media rule.
pub media_queries: Arc<Locked<MediaList>>,
/// The nested rules to this media rule.
pub rules: Arc<Locked<CssRules>>,
/// The source position where this media rule was found.
pub source_location: SourceLocation,
}
impl ToCssWithGuard for MediaRule {
// Serialization of MediaRule is not specced.
// https://drafts.csswg.org/cssom/#serialize-a-css-rule CSSMediaRule
fn to_css<W>(&self, guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result
where W: fmt::Write {
dest.write_str("@media ")?;
self.media_queries.read_with(guard).to_css(dest)?;
dest.write_str(" {")?;
for rule in self.rules.read_with(guard).0.iter() {
dest.write_str(" ")?;
rule.to_css(guard, dest)?;
}
dest.write_str(" }")
}
}
impl DeepCloneWithLock for MediaRule {
fn | (
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard
) -> Self {
let media_queries = self.media_queries.read_with(guard);
let rules = self.rules.read_with(guard);
MediaRule {
media_queries: Arc::new(lock.wrap(media_queries.clone())),
rules: Arc::new(lock.wrap(rules.deep_clone_with_lock(lock, guard))),
source_location: self.source_location.clone(),
}
}
}
| deep_clone_with_lock | identifier_name |
media_rule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An [`@media`][media] urle.
//!
//! [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
use cssparser::SourceLocation;
use media_queries::MediaList;
use shared_lock::{DeepCloneWithLock, Locked, SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt;
use style_traits::ToCss;
use stylearc::Arc;
use stylesheets::CssRules;
/// An [`@media`][media] urle.
///
/// [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
#[derive(Debug)]
pub struct MediaRule {
/// The list of media queries used by this media rule.
pub media_queries: Arc<Locked<MediaList>>,
/// The nested rules to this media rule.
pub rules: Arc<Locked<CssRules>>,
/// The source position where this media rule was found.
pub source_location: SourceLocation,
}
impl ToCssWithGuard for MediaRule {
// Serialization of MediaRule is not specced.
// https://drafts.csswg.org/cssom/#serialize-a-css-rule CSSMediaRule
fn to_css<W>(&self, guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result
where W: fmt::Write {
dest.write_str("@media ")?;
self.media_queries.read_with(guard).to_css(dest)?;
dest.write_str(" {")?;
for rule in self.rules.read_with(guard).0.iter() {
dest.write_str(" ")?;
rule.to_css(guard, dest)?;
}
dest.write_str(" }")
}
}
impl DeepCloneWithLock for MediaRule {
fn deep_clone_with_lock(
&self, | MediaRule {
media_queries: Arc::new(lock.wrap(media_queries.clone())),
rules: Arc::new(lock.wrap(rules.deep_clone_with_lock(lock, guard))),
source_location: self.source_location.clone(),
}
}
} | lock: &SharedRwLock,
guard: &SharedRwLockReadGuard
) -> Self {
let media_queries = self.media_queries.read_with(guard);
let rules = self.rules.read_with(guard); | random_line_split |
media_rule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An [`@media`][media] urle.
//!
//! [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
use cssparser::SourceLocation;
use media_queries::MediaList;
use shared_lock::{DeepCloneWithLock, Locked, SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt;
use style_traits::ToCss;
use stylearc::Arc;
use stylesheets::CssRules;
/// An [`@media`][media] urle.
///
/// [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
#[derive(Debug)]
pub struct MediaRule {
/// The list of media queries used by this media rule.
pub media_queries: Arc<Locked<MediaList>>,
/// The nested rules to this media rule.
pub rules: Arc<Locked<CssRules>>,
/// The source position where this media rule was found.
pub source_location: SourceLocation,
}
impl ToCssWithGuard for MediaRule {
// Serialization of MediaRule is not specced.
// https://drafts.csswg.org/cssom/#serialize-a-css-rule CSSMediaRule
fn to_css<W>(&self, guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result
where W: fmt::Write {
dest.write_str("@media ")?;
self.media_queries.read_with(guard).to_css(dest)?;
dest.write_str(" {")?;
for rule in self.rules.read_with(guard).0.iter() {
dest.write_str(" ")?;
rule.to_css(guard, dest)?;
}
dest.write_str(" }")
}
}
impl DeepCloneWithLock for MediaRule {
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard
) -> Self |
}
| {
let media_queries = self.media_queries.read_with(guard);
let rules = self.rules.read_with(guard);
MediaRule {
media_queries: Arc::new(lock.wrap(media_queries.clone())),
rules: Arc::new(lock.wrap(rules.deep_clone_with_lock(lock, guard))),
source_location: self.source_location.clone(),
}
} | identifier_body |
CSS.js | djsex.css = {
/*
* http://stackoverflow.com/questions/524696/how-to-create-a-style-tag-with-javascript
*/
create: function(stylesheet) {
var head = document.getElementsByTagName('head')[0],
style = document.createElement('style'),
rules = document.createTextNode(stylesheet);
style.type = 'text/css';
if(style.styleSheet)
style.styleSheet.cssText = rules.nodeValue;
else style.appendChild(rules);
head.appendChild(style);
},
appendClass: function (el, classname) {
if(el.className) {
var classes = el.className.split(" ");
var alreadyclassed = false;
classes.forEach(function(thisclassname) { | classes.push(classname);
el.className = classes.join(" ")
} else {
el.className=classname;
}
},
deleteClass: function (el, classname) {
if(el.className) {
var classes = el.className.split(" ");
for(i=0; i<=classes.length; i++) {
if((classes[i]) && (classes[i]==classname))
classes.splice(i,1);
}
el.className = classes.join(" ");
}
},
}; | if(classname == thisclassname)
alreadyclassed=true;
});
if(!alreadyclassed) | random_line_split |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::{CssWriter, ToCss};
use values::animated::color::RGBA as AnimatedRGBA;
use values::animated::ToAnimatedValue;
use values::generics::color::Color as GenericColor;
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
/// The computed value of the `color` property.
pub type ColorPropertyValue = RGBA;
/// A computed value for `<color>`.
pub type Color = GenericColor<RGBAColor>;
impl Color {
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn to_rgba(&self, fg_color: RGBA) -> RGBA {
let (color, ratios) = match *self {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
GenericColor::Numeric(color) => return color,
GenericColor::Foreground => return fg_color,
GenericColor::Complex(color, ratios) => (color, ratios),
};
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * bg_ratio + fg_alpha * fg_ratio
// color = (self_color * self_alpha * bg_ratio +
// fg_color * fg_alpha * fg_ratio) / alpha
let p1 = ratios.bg;
let a1 = color.alpha_f32();
let r1 = a1 * color.red_f32();
let g1 = a1 * color.green_f32();
let b1 = a1 * color.blue_f32();
let p2 = ratios.fg;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a <= 0. |
let a = f32::min(a, 1.);
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: fmt::Write,
{
match *self {
GenericColor::Numeric(color) => color.to_css(dest),
GenericColor::Foreground => CSSParserColor::CurrentColor.to_css(dest),
_ => Ok(()),
}
}
}
impl ToAnimatedValue for RGBA {
type AnimatedValue = AnimatedRGBA;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
AnimatedRGBA::new(
self.red_f32(),
self.green_f32(),
self.blue_f32(),
self.alpha_f32(),
)
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
// RGBA::from_floats clamps each component values.
RGBA::from_floats(animated.red, animated.green, animated.blue, animated.alpha)
}
}
| {
return RGBA::transparent();
} | conditional_block |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::{CssWriter, ToCss};
use values::animated::color::RGBA as AnimatedRGBA;
use values::animated::ToAnimatedValue;
use values::generics::color::Color as GenericColor;
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
/// The computed value of the `color` property.
pub type ColorPropertyValue = RGBA;
/// A computed value for `<color>`.
pub type Color = GenericColor<RGBAColor>;
impl Color {
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn to_rgba(&self, fg_color: RGBA) -> RGBA {
let (color, ratios) = match *self {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
GenericColor::Numeric(color) => return color,
GenericColor::Foreground => return fg_color,
GenericColor::Complex(color, ratios) => (color, ratios),
};
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * bg_ratio + fg_alpha * fg_ratio |
let p1 = ratios.bg;
let a1 = color.alpha_f32();
let r1 = a1 * color.red_f32();
let g1 = a1 * color.green_f32();
let b1 = a1 * color.blue_f32();
let p2 = ratios.fg;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a <= 0. {
return RGBA::transparent();
}
let a = f32::min(a, 1.);
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: fmt::Write,
{
match *self {
GenericColor::Numeric(color) => color.to_css(dest),
GenericColor::Foreground => CSSParserColor::CurrentColor.to_css(dest),
_ => Ok(()),
}
}
}
impl ToAnimatedValue for RGBA {
type AnimatedValue = AnimatedRGBA;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
AnimatedRGBA::new(
self.red_f32(),
self.green_f32(),
self.blue_f32(),
self.alpha_f32(),
)
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
// RGBA::from_floats clamps each component values.
RGBA::from_floats(animated.red, animated.green, animated.blue, animated.alpha)
}
} | // color = (self_color * self_alpha * bg_ratio +
// fg_color * fg_alpha * fg_ratio) / alpha | random_line_split |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::{CssWriter, ToCss};
use values::animated::color::RGBA as AnimatedRGBA;
use values::animated::ToAnimatedValue;
use values::generics::color::Color as GenericColor;
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
/// The computed value of the `color` property.
pub type ColorPropertyValue = RGBA;
/// A computed value for `<color>`.
pub type Color = GenericColor<RGBAColor>;
impl Color {
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn to_rgba(&self, fg_color: RGBA) -> RGBA |
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: fmt::Write,
{
match *self {
GenericColor::Numeric(color) => color.to_css(dest),
GenericColor::Foreground => CSSParserColor::CurrentColor.to_css(dest),
_ => Ok(()),
}
}
}
impl ToAnimatedValue for RGBA {
type AnimatedValue = AnimatedRGBA;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
AnimatedRGBA::new(
self.red_f32(),
self.green_f32(),
self.blue_f32(),
self.alpha_f32(),
)
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
// RGBA::from_floats clamps each component values.
RGBA::from_floats(animated.red, animated.green, animated.blue, animated.alpha)
}
}
| {
let (color, ratios) = match *self {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
GenericColor::Numeric(color) => return color,
GenericColor::Foreground => return fg_color,
GenericColor::Complex(color, ratios) => (color, ratios),
};
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * bg_ratio + fg_alpha * fg_ratio
// color = (self_color * self_alpha * bg_ratio +
// fg_color * fg_alpha * fg_ratio) / alpha
let p1 = ratios.bg;
let a1 = color.alpha_f32();
let r1 = a1 * color.red_f32();
let g1 = a1 * color.green_f32();
let b1 = a1 * color.blue_f32();
let p2 = ratios.fg;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a <= 0. {
return RGBA::transparent();
}
let a = f32::min(a, 1.);
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
} | identifier_body |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::{CssWriter, ToCss};
use values::animated::color::RGBA as AnimatedRGBA;
use values::animated::ToAnimatedValue;
use values::generics::color::Color as GenericColor;
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
/// The computed value of the `color` property.
pub type ColorPropertyValue = RGBA;
/// A computed value for `<color>`.
pub type Color = GenericColor<RGBAColor>;
impl Color {
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn | (&self, fg_color: RGBA) -> RGBA {
let (color, ratios) = match *self {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
GenericColor::Numeric(color) => return color,
GenericColor::Foreground => return fg_color,
GenericColor::Complex(color, ratios) => (color, ratios),
};
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * bg_ratio + fg_alpha * fg_ratio
// color = (self_color * self_alpha * bg_ratio +
// fg_color * fg_alpha * fg_ratio) / alpha
let p1 = ratios.bg;
let a1 = color.alpha_f32();
let r1 = a1 * color.red_f32();
let g1 = a1 * color.green_f32();
let b1 = a1 * color.blue_f32();
let p2 = ratios.fg;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a <= 0. {
return RGBA::transparent();
}
let a = f32::min(a, 1.);
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: fmt::Write,
{
match *self {
GenericColor::Numeric(color) => color.to_css(dest),
GenericColor::Foreground => CSSParserColor::CurrentColor.to_css(dest),
_ => Ok(()),
}
}
}
impl ToAnimatedValue for RGBA {
type AnimatedValue = AnimatedRGBA;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
AnimatedRGBA::new(
self.red_f32(),
self.green_f32(),
self.blue_f32(),
self.alpha_f32(),
)
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
// RGBA::from_floats clamps each component values.
RGBA::from_floats(animated.red, animated.green, animated.blue, animated.alpha)
}
}
| to_rgba | identifier_name |
main.rs | #![deny(rust_2018_idioms, deprecated)]
mod commands;
use std::collections::HashSet;
use commands::*;
use serde::Deserialize;
use serenity::async_trait;
use serenity::client::bridge::gateway::GatewayIntents;
use serenity::framework::standard::help_commands::with_embeds;
use serenity::framework::standard::macros::*;
use serenity::framework::standard::*;
use serenity::model::prelude::*;
use serenity::prelude::*;
use tracing::{error, info};
struct | ;
#[async_trait]
impl EventHandler for Handler {
async fn ready(&self, ctx: Context, _: Ready) {
ctx.set_activity(Activity::playing("charades")).await;
}
}
#[group("General")]
#[commands(ping, avatar_url)]
struct General;
#[help]
async fn my_help(
ctx: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = with_embeds(ctx, msg, args, help_options, groups, owners).await;
Ok(())
}
#[hook]
async fn dispatch_error(_: &Context, _: &Message, error: DispatchError) {
error!("dispatch error: {:?}", error);
}
#[hook]
async fn after(ctx: &Context, msg: &Message, command: &str, error: CommandResult) {
info!("Command `{}` was used by {}", command, msg.author.name);
if let Err(err) = error {
let err = err.to_string();
if err.starts_with("user : ") {
let without_user = &err["user: ".len()..];
let _ = msg.channel_id.say(&ctx.http, without_user).await;
} else {
error!("`{:?}`", err);
}
}
}
#[derive(Deserialize)]
struct Config {
token: String,
prefix: String,
}
fn read_config() -> Result<Config, Box<dyn std::error::Error>> {
let path = std::env::var("KITTY_CONFIG").unwrap_or_else(|_| "config.json".to_string());
let content = std::fs::read_to_string(path)?;
Ok(serde_json::from_str(&content)?)
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "info");
}
tracing_subscriber::fmt::init();
let config = read_config()?;
let framework = StandardFramework::new()
.configure(|c| c.prefix(&config.prefix))
.on_dispatch_error(dispatch_error)
.after(after)
.help(&MY_HELP)
.group(&GENERAL_GROUP);
let mut client = Client::builder(&config.token)
.intents(GatewayIntents::all())
.event_handler(Handler)
.framework(framework)
.await?;
let shard_manager = client.shard_manager.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
shard_manager.lock().await.shutdown_all().await;
});
client.start_autosharded().await?;
Ok(())
}
| Handler | identifier_name |
main.rs | #![deny(rust_2018_idioms, deprecated)]
mod commands;
use std::collections::HashSet;
use commands::*;
use serde::Deserialize;
use serenity::async_trait;
use serenity::client::bridge::gateway::GatewayIntents;
use serenity::framework::standard::help_commands::with_embeds;
use serenity::framework::standard::macros::*;
use serenity::framework::standard::*;
use serenity::model::prelude::*;
use serenity::prelude::*;
use tracing::{error, info};
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn ready(&self, ctx: Context, _: Ready) {
ctx.set_activity(Activity::playing("charades")).await;
}
}
#[group("General")]
#[commands(ping, avatar_url)]
struct General;
#[help]
async fn my_help(
ctx: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = with_embeds(ctx, msg, args, help_options, groups, owners).await;
Ok(())
}
#[hook]
async fn dispatch_error(_: &Context, _: &Message, error: DispatchError) {
error!("dispatch error: {:?}", error);
}
#[hook]
async fn after(ctx: &Context, msg: &Message, command: &str, error: CommandResult) {
info!("Command `{}` was used by {}", command, msg.author.name);
if let Err(err) = error {
let err = err.to_string();
if err.starts_with("user : ") {
let without_user = &err["user: ".len()..];
let _ = msg.channel_id.say(&ctx.http, without_user).await;
} else |
}
}
#[derive(Deserialize)]
struct Config {
token: String,
prefix: String,
}
fn read_config() -> Result<Config, Box<dyn std::error::Error>> {
let path = std::env::var("KITTY_CONFIG").unwrap_or_else(|_| "config.json".to_string());
let content = std::fs::read_to_string(path)?;
Ok(serde_json::from_str(&content)?)
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "info");
}
tracing_subscriber::fmt::init();
let config = read_config()?;
let framework = StandardFramework::new()
.configure(|c| c.prefix(&config.prefix))
.on_dispatch_error(dispatch_error)
.after(after)
.help(&MY_HELP)
.group(&GENERAL_GROUP);
let mut client = Client::builder(&config.token)
.intents(GatewayIntents::all())
.event_handler(Handler)
.framework(framework)
.await?;
let shard_manager = client.shard_manager.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
shard_manager.lock().await.shutdown_all().await;
});
client.start_autosharded().await?;
Ok(())
}
| {
error!("`{:?}`", err);
} | conditional_block |
main.rs | #![deny(rust_2018_idioms, deprecated)]
mod commands;
use std::collections::HashSet;
use commands::*;
use serde::Deserialize;
use serenity::async_trait;
use serenity::client::bridge::gateway::GatewayIntents;
use serenity::framework::standard::help_commands::with_embeds;
use serenity::framework::standard::macros::*;
use serenity::framework::standard::*;
use serenity::model::prelude::*;
use serenity::prelude::*;
use tracing::{error, info};
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn ready(&self, ctx: Context, _: Ready) {
ctx.set_activity(Activity::playing("charades")).await;
}
}
#[group("General")]
#[commands(ping, avatar_url)]
struct General;
#[help]
async fn my_help(
ctx: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = with_embeds(ctx, msg, args, help_options, groups, owners).await;
Ok(())
}
#[hook]
async fn dispatch_error(_: &Context, _: &Message, error: DispatchError) {
error!("dispatch error: {:?}", error);
}
#[hook]
async fn after(ctx: &Context, msg: &Message, command: &str, error: CommandResult) {
info!("Command `{}` was used by {}", command, msg.author.name);
if let Err(err) = error {
let err = err.to_string();
if err.starts_with("user : ") {
let without_user = &err["user: ".len()..];
let _ = msg.channel_id.say(&ctx.http, without_user).await;
} else {
error!("`{:?}`", err);
}
}
}
#[derive(Deserialize)]
struct Config {
token: String,
prefix: String,
}
fn read_config() -> Result<Config, Box<dyn std::error::Error>> {
let path = std::env::var("KITTY_CONFIG").unwrap_or_else(|_| "config.json".to_string());
let content = std::fs::read_to_string(path)?;
Ok(serde_json::from_str(&content)?)
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "info");
}
tracing_subscriber::fmt::init();
let config = read_config()?;
let framework = StandardFramework::new()
.configure(|c| c.prefix(&config.prefix))
.on_dispatch_error(dispatch_error)
.after(after)
.help(&MY_HELP)
.group(&GENERAL_GROUP);
let mut client = Client::builder(&config.token)
.intents(GatewayIntents::all())
.event_handler(Handler)
.framework(framework)
.await?;
| let shard_manager = client.shard_manager.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
shard_manager.lock().await.shutdown_all().await;
});
client.start_autosharded().await?;
Ok(())
} | random_line_split | |
p2p_feefilter.py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received by test connection for all txs at
# feerate of 20 sat/byte
node1.settxfee(Decimal("0.02000000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte on test connection
self.nodes[0].p2p.send_and_ping(msg_feefilter(1500000))
# Test that txs are still being received by test connection (paying 15 sat/byte)
node1.settxfee(Decimal("0.01500000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.01000000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.02000000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
| FeeFilterTest().main() | conditional_block | |
p2p_feefilter.py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn()) | # Test that invs are received by test connection for all txs at
# feerate of 20 sat/byte
node1.settxfee(Decimal("0.02000000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte on test connection
self.nodes[0].p2p.send_and_ping(msg_feefilter(1500000))
# Test that txs are still being received by test connection (paying 15 sat/byte)
node1.settxfee(Decimal("0.01500000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.01000000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.02000000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main() | random_line_split | |
p2p_feefilter.py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class | (P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received by test connection for all txs at
# feerate of 20 sat/byte
node1.settxfee(Decimal("0.02000000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte on test connection
self.nodes[0].p2p.send_and_ping(msg_feefilter(1500000))
# Test that txs are still being received by test connection (paying 15 sat/byte)
node1.settxfee(Decimal("0.01500000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.01000000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.02000000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| TestP2PConn | identifier_name |
p2p_feefilter.py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
def hashToHex(hash):
|
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received by test connection for all txs at
# feerate of 20 sat/byte
node1.settxfee(Decimal("0.02000000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte on test connection
self.nodes[0].p2p.send_and_ping(msg_feefilter(1500000))
# Test that txs are still being received by test connection (paying 15 sat/byte)
node1.settxfee(Decimal("0.01500000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.01000000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.02000000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| return format(hash, '064x') | identifier_body |
lights.rs | extern crate nalgebra as na;
use glium;
use glium::uniforms::UniformValue;
const MAX_SPHERICAL_LIGHTS: u32 = 32;
#[derive(Copy, Clone)]
pub struct SphericalLight
{
position: [f32; 3],
color: [f32; 3],
range: f32,
}
implement_uniform_block!(SphericalLight, position, color, range);
//Testing, remove if it doesn't work TODO
impl glium::uniforms::Uniforms for SphericalLight
{
fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("position", UniformValue::Vec3(self.position.clone()));
f("position", UniformValue::Vec3(self.color.clone()));
f("color", UniformValue::Float(self.range.clone()));
}
}
impl SphericalLight
{
pub fn new() -> SphericalLight
{
SphericalLight
{
position: [0.0; 3],
color: [0.0; 3],
range: 0.0,
}
}
pub fn set_position(mut self, position: [f32; 3])
{
self.position = position;
}
}
#[derive(Copy, Clone)]
pub struct LightUniform
{
sphere_light_count: u32,
sphere_lights: [SphericalLight; MAX_SPHERICAL_LIGHTS as usize],
padding: [f32; 3],
}
implement_uniform_block!(LightUniform, sphere_light_count, sphere_lights, padding);
impl glium::uniforms::Uniforms for LightUniform
{
fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("sphere_light_count", UniformValue::UnsignedInt(self.sphere_light_count));
for i in 0..MAX_SPHERICAL_LIGHTS
{
f(&format!(
"sphere_lights[{}].position", i),
UniformValue::Vec3(self.sphere_lights[i as usize].position.clone())
);
f(&format!(
"sphere_lights[{}].color", i),
UniformValue::Vec3(self.sphere_lights[i as usize].color.clone())
);
f(&format!(
"sphere_lights[{}].range", i),
UniformValue::Float(self.sphere_lights[i as usize].range)
);
}
}
}
impl LightUniform
{
pub fn | () -> LightUniform
{
LightUniform
{
sphere_light_count: 0,
sphere_lights: [SphericalLight::new(); MAX_SPHERICAL_LIGHTS as usize],
padding: [0.0; 3]
}
}
pub fn add_light(&mut self, light: SphericalLight)
{
if self.sphere_lights.len() < MAX_SPHERICAL_LIGHTS as usize
{
self.sphere_lights[self.sphere_light_count as usize] = light;
self.sphere_light_count += 1;
}
else
{
panic!("Too many lights have been added");
}
}
}
| new | identifier_name |
lights.rs | extern crate nalgebra as na;
use glium;
use glium::uniforms::UniformValue;
const MAX_SPHERICAL_LIGHTS: u32 = 32;
#[derive(Copy, Clone)]
pub struct SphericalLight
{
position: [f32; 3],
color: [f32; 3],
range: f32,
}
implement_uniform_block!(SphericalLight, position, color, range);
//Testing, remove if it doesn't work TODO
impl glium::uniforms::Uniforms for SphericalLight
{
fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("position", UniformValue::Vec3(self.position.clone()));
f("position", UniformValue::Vec3(self.color.clone()));
f("color", UniformValue::Float(self.range.clone()));
}
}
impl SphericalLight
{
pub fn new() -> SphericalLight
{
SphericalLight
{
position: [0.0; 3],
color: [0.0; 3],
range: 0.0,
}
}
pub fn set_position(mut self, position: [f32; 3])
{
self.position = position;
}
}
#[derive(Copy, Clone)]
pub struct LightUniform
{
sphere_light_count: u32,
sphere_lights: [SphericalLight; MAX_SPHERICAL_LIGHTS as usize],
padding: [f32; 3],
}
implement_uniform_block!(LightUniform, sphere_light_count, sphere_lights, padding);
impl glium::uniforms::Uniforms for LightUniform
{
fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("sphere_light_count", UniformValue::UnsignedInt(self.sphere_light_count));
for i in 0..MAX_SPHERICAL_LIGHTS
{
f(&format!(
"sphere_lights[{}].position", i),
UniformValue::Vec3(self.sphere_lights[i as usize].position.clone())
);
f(&format!(
"sphere_lights[{}].color", i),
UniformValue::Vec3(self.sphere_lights[i as usize].color.clone())
);
f(&format!(
"sphere_lights[{}].range", i),
UniformValue::Float(self.sphere_lights[i as usize].range)
);
}
}
}
impl LightUniform
{
pub fn new() -> LightUniform
{
LightUniform
{
sphere_light_count: 0,
sphere_lights: [SphericalLight::new(); MAX_SPHERICAL_LIGHTS as usize],
padding: [0.0; 3]
}
}
pub fn add_light(&mut self, light: SphericalLight)
|
}
| {
if self.sphere_lights.len() < MAX_SPHERICAL_LIGHTS as usize
{
self.sphere_lights[self.sphere_light_count as usize] = light;
self.sphere_light_count += 1;
}
else
{
panic!("Too many lights have been added");
}
} | identifier_body |
lights.rs | extern crate nalgebra as na;
use glium;
use glium::uniforms::UniformValue;
const MAX_SPHERICAL_LIGHTS: u32 = 32;
#[derive(Copy, Clone)]
pub struct SphericalLight
{
position: [f32; 3],
color: [f32; 3],
range: f32,
}
implement_uniform_block!(SphericalLight, position, color, range);
//Testing, remove if it doesn't work TODO
impl glium::uniforms::Uniforms for SphericalLight
{ | }
impl SphericalLight
{
pub fn new() -> SphericalLight
{
SphericalLight
{
position: [0.0; 3],
color: [0.0; 3],
range: 0.0,
}
}
pub fn set_position(mut self, position: [f32; 3])
{
self.position = position;
}
}
#[derive(Copy, Clone)]
pub struct LightUniform
{
sphere_light_count: u32,
sphere_lights: [SphericalLight; MAX_SPHERICAL_LIGHTS as usize],
padding: [f32; 3],
}
implement_uniform_block!(LightUniform, sphere_light_count, sphere_lights, padding);
impl glium::uniforms::Uniforms for LightUniform
{
fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("sphere_light_count", UniformValue::UnsignedInt(self.sphere_light_count));
for i in 0..MAX_SPHERICAL_LIGHTS
{
f(&format!(
"sphere_lights[{}].position", i),
UniformValue::Vec3(self.sphere_lights[i as usize].position.clone())
);
f(&format!(
"sphere_lights[{}].color", i),
UniformValue::Vec3(self.sphere_lights[i as usize].color.clone())
);
f(&format!(
"sphere_lights[{}].range", i),
UniformValue::Float(self.sphere_lights[i as usize].range)
);
}
}
}
impl LightUniform
{
pub fn new() -> LightUniform
{
LightUniform
{
sphere_light_count: 0,
sphere_lights: [SphericalLight::new(); MAX_SPHERICAL_LIGHTS as usize],
padding: [0.0; 3]
}
}
pub fn add_light(&mut self, light: SphericalLight)
{
if self.sphere_lights.len() < MAX_SPHERICAL_LIGHTS as usize
{
self.sphere_lights[self.sphere_light_count as usize] = light;
self.sphere_light_count += 1;
}
else
{
panic!("Too many lights have been added");
}
}
} | fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("position", UniformValue::Vec3(self.position.clone()));
f("position", UniformValue::Vec3(self.color.clone()));
f("color", UniformValue::Float(self.range.clone()));
} | random_line_split |
lights.rs | extern crate nalgebra as na;
use glium;
use glium::uniforms::UniformValue;
const MAX_SPHERICAL_LIGHTS: u32 = 32;
#[derive(Copy, Clone)]
pub struct SphericalLight
{
position: [f32; 3],
color: [f32; 3],
range: f32,
}
implement_uniform_block!(SphericalLight, position, color, range);
//Testing, remove if it doesn't work TODO
impl glium::uniforms::Uniforms for SphericalLight
{
fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("position", UniformValue::Vec3(self.position.clone()));
f("position", UniformValue::Vec3(self.color.clone()));
f("color", UniformValue::Float(self.range.clone()));
}
}
impl SphericalLight
{
pub fn new() -> SphericalLight
{
SphericalLight
{
position: [0.0; 3],
color: [0.0; 3],
range: 0.0,
}
}
pub fn set_position(mut self, position: [f32; 3])
{
self.position = position;
}
}
#[derive(Copy, Clone)]
pub struct LightUniform
{
sphere_light_count: u32,
sphere_lights: [SphericalLight; MAX_SPHERICAL_LIGHTS as usize],
padding: [f32; 3],
}
implement_uniform_block!(LightUniform, sphere_light_count, sphere_lights, padding);
impl glium::uniforms::Uniforms for LightUniform
{
fn visit_values<'a, F: FnMut(&str, UniformValue<'a>)>(&'a self, mut f: F) {
f("sphere_light_count", UniformValue::UnsignedInt(self.sphere_light_count));
for i in 0..MAX_SPHERICAL_LIGHTS
{
f(&format!(
"sphere_lights[{}].position", i),
UniformValue::Vec3(self.sphere_lights[i as usize].position.clone())
);
f(&format!(
"sphere_lights[{}].color", i),
UniformValue::Vec3(self.sphere_lights[i as usize].color.clone())
);
f(&format!(
"sphere_lights[{}].range", i),
UniformValue::Float(self.sphere_lights[i as usize].range)
);
}
}
}
impl LightUniform
{
pub fn new() -> LightUniform
{
LightUniform
{
sphere_light_count: 0,
sphere_lights: [SphericalLight::new(); MAX_SPHERICAL_LIGHTS as usize],
padding: [0.0; 3]
}
}
pub fn add_light(&mut self, light: SphericalLight)
{
if self.sphere_lights.len() < MAX_SPHERICAL_LIGHTS as usize
|
else
{
panic!("Too many lights have been added");
}
}
}
| {
self.sphere_lights[self.sphere_light_count as usize] = light;
self.sphere_light_count += 1;
} | conditional_block |
reverse-string.rs | //! Tests for reverse-string
//!
//! Generated by [script][script] using [canonical data][canonical-data]
//!
//! [script]: https://github.com/exercism/rust/blob/master/bin/init_exercise.py
//! [canonical-data]: https://raw.githubusercontent.com/exercism/problem-specifications/master/exercises/reverse-string/canonical_data.json
extern crate reverse_string;
use reverse_string::*;
/// Process a single test case for the property `reverse`
fn process_reverse_case(input: &str, expected: &str) {
assert_eq!(&reverse(input), expected)
}
#[test]
/// empty string
fn test_empty_string() {
process_reverse_case("", "");
}
#[test]
/// a word
fn test_a_word() {
process_reverse_case("robot", "tobor");
}
#[test]
/// a capitalized word
fn test_a_capitalized_word() |
#[test]
/// a sentence with punctuation
fn test_a_sentence_with_punctuation() {
process_reverse_case("I'm hungry!", "!yrgnuh m'I");
}
#[test]
/// a palindrome
fn test_a_palindrome() {
process_reverse_case("racecar", "racecar");
}
| {
process_reverse_case("Ramen", "nemaR");
} | identifier_body |
reverse-string.rs | //! Tests for reverse-string
//!
//! Generated by [script][script] using [canonical data][canonical-data]
//!
//! [script]: https://github.com/exercism/rust/blob/master/bin/init_exercise.py
//! [canonical-data]: https://raw.githubusercontent.com/exercism/problem-specifications/master/exercises/reverse-string/canonical_data.json
extern crate reverse_string;
use reverse_string::*;
/// Process a single test case for the property `reverse`
fn process_reverse_case(input: &str, expected: &str) {
assert_eq!(&reverse(input), expected)
}
#[test]
/// empty string
fn | () {
process_reverse_case("", "");
}
#[test]
/// a word
fn test_a_word() {
process_reverse_case("robot", "tobor");
}
#[test]
/// a capitalized word
fn test_a_capitalized_word() {
process_reverse_case("Ramen", "nemaR");
}
#[test]
/// a sentence with punctuation
fn test_a_sentence_with_punctuation() {
process_reverse_case("I'm hungry!", "!yrgnuh m'I");
}
#[test]
/// a palindrome
fn test_a_palindrome() {
process_reverse_case("racecar", "racecar");
}
| test_empty_string | identifier_name |
reverse-string.rs | //! Tests for reverse-string
//!
//! Generated by [script][script] using [canonical data][canonical-data]
//!
//! [script]: https://github.com/exercism/rust/blob/master/bin/init_exercise.py
//! [canonical-data]: https://raw.githubusercontent.com/exercism/problem-specifications/master/exercises/reverse-string/canonical_data.json
extern crate reverse_string;
use reverse_string::*;
/// Process a single test case for the property `reverse`
fn process_reverse_case(input: &str, expected: &str) {
assert_eq!(&reverse(input), expected)
}
#[test]
/// empty string
fn test_empty_string() {
process_reverse_case("", "");
}
| fn test_a_word() {
process_reverse_case("robot", "tobor");
}
#[test]
/// a capitalized word
fn test_a_capitalized_word() {
process_reverse_case("Ramen", "nemaR");
}
#[test]
/// a sentence with punctuation
fn test_a_sentence_with_punctuation() {
process_reverse_case("I'm hungry!", "!yrgnuh m'I");
}
#[test]
/// a palindrome
fn test_a_palindrome() {
process_reverse_case("racecar", "racecar");
} |
#[test]
/// a word | random_line_split |
simple_receiver.rs | // Copyright 2015 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License,
// version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which
// licence you accepted on initial access to the Software (the "Licences").
//
// By contributing code to the SAFE Network Software, or to this project generally, you agree to be
// bound by the terms of the MaidSafe Contributor Agreement, version 1.0. This, along with the
// Licenses can be found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR.
//
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
//
// Please review the Licences for the specific language governing permissions and limitations
// relating to use of the SAFE Network Software.
#![feature(convert, negate_unsigned, rustc_private)]
#![forbid(warnings)]
#![deny(bad_style, deprecated, drop_with_repr_extern, improper_ctypes, non_shorthand_field_patterns,
overflowing_literals, plugin_as_library, private_no_mangle_fns, private_no_mangle_statics,
raw_pointer_derive, stable_features, unconditional_recursion, unknown_lints,
unsafe_code, unused_allocation, unused_attributes,
unused_comparisons, unused_features, unused_parens, while_true)]
#![warn(trivial_casts, trivial_numeric_casts, unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results, variant_size_differences)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate crust;
use std::str::FromStr;
use std::sync::mpsc::channel;
use crust::{ConnectionManager, write_config_file, Port};
fn fibonacci_number(n: u64) -> u64 {
match n {
0 => 0,
1 => 1,
n => fibonacci_number(n - 1) + fibonacci_number(n - 2)
}
}
fn main() {
match env_logger::init() {
Ok(()) => {},
Err(e) => debug!("Error initialising logger; continuing without: {:?}", e)
}
let _ = write_config_file(None, None,Some(9999)).unwrap();
// We receive events (e.g. new connection, message received) from the ConnectionManager via an
// asynchronous channel.
let (channel_sender, channel_receiver) = channel();
let mut connection_manager = ConnectionManager::new(channel_sender);
// Start listening. Try to listen on port 8888 for TCP and for UDP broadcasts (beacon) on 9999.
let listening_endpoints = match connection_manager.start_accepting(vec![Port::Tcp(8888u16)]) {
Ok(endpoints) => endpoints,
Err(why) => {
println!("ConnectionManager failed to start listening on TCP port 8888: {}", why);
std::process::exit(1);
}
};
print!("Listening for new connections on ");
for endpoint in &listening_endpoints {
print!("{:?}, ", *endpoint);
}; | match event {
crust::Event::NewMessage(endpoint, bytes) => {
// For this example, we only expect to receive encoded `u8`s
let requested_value = match String::from_utf8(bytes) {
Ok(message) => {
match u8::from_str(message.as_str()) {
Ok(value) => value,
Err(why) => {
println!("Error parsing message: {}", why);
continue;
},
}
},
Err(why) => {
println!("Error receiving message: {}", why);
continue;
},
};
// Calculate the Fibonacci number for the requested value and respond with that
let fibonacci_result = fibonacci_number(requested_value as u64);
println!("Received \"{}\" from {:?} - replying with \"{}\"", requested_value,
endpoint, fibonacci_result);
let response =
format!("The Fibonacci number for {} is {}", requested_value, fibonacci_result);
if let Err(why) = connection_manager.send(endpoint.clone(), response.into_bytes()) {
println!("Failed to send reply to {:?}: {}", endpoint, why)
}
},
crust::Event::NewConnection(endpoint) => {
println!("New connection made to {:?}", endpoint);
},
crust::Event::LostConnection(endpoint) => {
println!("Lost connection to {:?}", endpoint);
},
crust::Event::NewBootstrapConnection(endpoint) => {
println!("New Bootstrap connection made to {:?}", endpoint);
}
}
}
println!("Stopped receiving.");
} | println!("Run the simple_sender example in another terminal to send messages to this node.");
// Receive the next event
while let Ok(event) = channel_receiver.recv() {
// Handle the event | random_line_split |
simple_receiver.rs | // Copyright 2015 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License,
// version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which
// licence you accepted on initial access to the Software (the "Licences").
//
// By contributing code to the SAFE Network Software, or to this project generally, you agree to be
// bound by the terms of the MaidSafe Contributor Agreement, version 1.0. This, along with the
// Licenses can be found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR.
//
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
//
// Please review the Licences for the specific language governing permissions and limitations
// relating to use of the SAFE Network Software.
#![feature(convert, negate_unsigned, rustc_private)]
#![forbid(warnings)]
#![deny(bad_style, deprecated, drop_with_repr_extern, improper_ctypes, non_shorthand_field_patterns,
overflowing_literals, plugin_as_library, private_no_mangle_fns, private_no_mangle_statics,
raw_pointer_derive, stable_features, unconditional_recursion, unknown_lints,
unsafe_code, unused_allocation, unused_attributes,
unused_comparisons, unused_features, unused_parens, while_true)]
#![warn(trivial_casts, trivial_numeric_casts, unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results, variant_size_differences)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate crust;
use std::str::FromStr;
use std::sync::mpsc::channel;
use crust::{ConnectionManager, write_config_file, Port};
fn fibonacci_number(n: u64) -> u64 {
match n {
0 => 0,
1 => 1,
n => fibonacci_number(n - 1) + fibonacci_number(n - 2)
}
}
fn main() | {
match env_logger::init() {
Ok(()) => {},
Err(e) => debug!("Error initialising logger; continuing without: {:?}", e)
}
let _ = write_config_file(None, None,Some(9999)).unwrap();
// We receive events (e.g. new connection, message received) from the ConnectionManager via an
// asynchronous channel.
let (channel_sender, channel_receiver) = channel();
let mut connection_manager = ConnectionManager::new(channel_sender);
// Start listening. Try to listen on port 8888 for TCP and for UDP broadcasts (beacon) on 9999.
let listening_endpoints = match connection_manager.start_accepting(vec![Port::Tcp(8888u16)]) {
Ok(endpoints) => endpoints,
Err(why) => {
println!("ConnectionManager failed to start listening on TCP port 8888: {}", why);
std::process::exit(1);
}
};
print!("Listening for new connections on ");
for endpoint in &listening_endpoints {
print!("{:?}, ", *endpoint);
};
println!("Run the simple_sender example in another terminal to send messages to this node.");
// Receive the next event
while let Ok(event) = channel_receiver.recv() {
// Handle the event
match event {
crust::Event::NewMessage(endpoint, bytes) => {
// For this example, we only expect to receive encoded `u8`s
let requested_value = match String::from_utf8(bytes) {
Ok(message) => {
match u8::from_str(message.as_str()) {
Ok(value) => value,
Err(why) => {
println!("Error parsing message: {}", why);
continue;
},
}
},
Err(why) => {
println!("Error receiving message: {}", why);
continue;
},
};
// Calculate the Fibonacci number for the requested value and respond with that
let fibonacci_result = fibonacci_number(requested_value as u64);
println!("Received \"{}\" from {:?} - replying with \"{}\"", requested_value,
endpoint, fibonacci_result);
let response =
format!("The Fibonacci number for {} is {}", requested_value, fibonacci_result);
if let Err(why) = connection_manager.send(endpoint.clone(), response.into_bytes()) {
println!("Failed to send reply to {:?}: {}", endpoint, why)
}
},
crust::Event::NewConnection(endpoint) => {
println!("New connection made to {:?}", endpoint);
},
crust::Event::LostConnection(endpoint) => {
println!("Lost connection to {:?}", endpoint);
},
crust::Event::NewBootstrapConnection(endpoint) => {
println!("New Bootstrap connection made to {:?}", endpoint);
}
}
}
println!("Stopped receiving.");
} | identifier_body | |
simple_receiver.rs | // Copyright 2015 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License,
// version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which
// licence you accepted on initial access to the Software (the "Licences").
//
// By contributing code to the SAFE Network Software, or to this project generally, you agree to be
// bound by the terms of the MaidSafe Contributor Agreement, version 1.0. This, along with the
// Licenses can be found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR.
//
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
//
// Please review the Licences for the specific language governing permissions and limitations
// relating to use of the SAFE Network Software.
#![feature(convert, negate_unsigned, rustc_private)]
#![forbid(warnings)]
#![deny(bad_style, deprecated, drop_with_repr_extern, improper_ctypes, non_shorthand_field_patterns,
overflowing_literals, plugin_as_library, private_no_mangle_fns, private_no_mangle_statics,
raw_pointer_derive, stable_features, unconditional_recursion, unknown_lints,
unsafe_code, unused_allocation, unused_attributes,
unused_comparisons, unused_features, unused_parens, while_true)]
#![warn(trivial_casts, trivial_numeric_casts, unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results, variant_size_differences)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate crust;
use std::str::FromStr;
use std::sync::mpsc::channel;
use crust::{ConnectionManager, write_config_file, Port};
fn fibonacci_number(n: u64) -> u64 {
match n {
0 => 0,
1 => 1,
n => fibonacci_number(n - 1) + fibonacci_number(n - 2)
}
}
fn | () {
match env_logger::init() {
Ok(()) => {},
Err(e) => debug!("Error initialising logger; continuing without: {:?}", e)
}
let _ = write_config_file(None, None,Some(9999)).unwrap();
// We receive events (e.g. new connection, message received) from the ConnectionManager via an
// asynchronous channel.
let (channel_sender, channel_receiver) = channel();
let mut connection_manager = ConnectionManager::new(channel_sender);
// Start listening. Try to listen on port 8888 for TCP and for UDP broadcasts (beacon) on 9999.
let listening_endpoints = match connection_manager.start_accepting(vec![Port::Tcp(8888u16)]) {
Ok(endpoints) => endpoints,
Err(why) => {
println!("ConnectionManager failed to start listening on TCP port 8888: {}", why);
std::process::exit(1);
}
};
print!("Listening for new connections on ");
for endpoint in &listening_endpoints {
print!("{:?}, ", *endpoint);
};
println!("Run the simple_sender example in another terminal to send messages to this node.");
// Receive the next event
while let Ok(event) = channel_receiver.recv() {
// Handle the event
match event {
crust::Event::NewMessage(endpoint, bytes) => {
// For this example, we only expect to receive encoded `u8`s
let requested_value = match String::from_utf8(bytes) {
Ok(message) => {
match u8::from_str(message.as_str()) {
Ok(value) => value,
Err(why) => {
println!("Error parsing message: {}", why);
continue;
},
}
},
Err(why) => {
println!("Error receiving message: {}", why);
continue;
},
};
// Calculate the Fibonacci number for the requested value and respond with that
let fibonacci_result = fibonacci_number(requested_value as u64);
println!("Received \"{}\" from {:?} - replying with \"{}\"", requested_value,
endpoint, fibonacci_result);
let response =
format!("The Fibonacci number for {} is {}", requested_value, fibonacci_result);
if let Err(why) = connection_manager.send(endpoint.clone(), response.into_bytes()) {
println!("Failed to send reply to {:?}: {}", endpoint, why)
}
},
crust::Event::NewConnection(endpoint) => {
println!("New connection made to {:?}", endpoint);
},
crust::Event::LostConnection(endpoint) => {
println!("Lost connection to {:?}", endpoint);
},
crust::Event::NewBootstrapConnection(endpoint) => {
println!("New Bootstrap connection made to {:?}", endpoint);
}
}
}
println!("Stopped receiving.");
}
| main | identifier_name |
keyfsm.rs | use bitflags::bitflags;
mod keymap {
static KEYCODE_LUT: [u8; 132] =
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
[
0x00, 0x43, 0x00, 0x3F, 0x3D, 0x3B, 0x3C, 0x58, 0x00, 0x44, 0x42, 0x40, 0x3E, 0x0F,
0x29, 0x00, 0x00, 0x38, 0x2A, 0x00, 0x1D, 0x10, 0x02, 0x00, 0x00, 0x00, 0x2C, 0x1F,
0x1E, 0x11, 0x03, 0x00, 0x00, 0x2E, 0x2D, 0x20, 0x12, 0x05, 0x04, 0x00, 0x00, 0x39,
0x2F, 0x21, 0x14, 0x13, 0x06, 0x00, 0x00, 0x31, 0x30, 0x23, 0x22, 0x15, 0x07, 0x00,
0x00, 0x00, 0x32, 0x24, 0x16, 0x08, 0x09, 0x00, 0x00, 0x33, 0x25, 0x17, 0x18, 0x0B,
0x0A, 0x00, 0x00, 0x34, 0x35, 0x26, 0x27, 0x19, 0x0C, 0x00, 0x00, 0x00, 0x28, 0x00,
0x1A, 0x0D, 0x00, 0x00, 0x3A, 0x36, 0x1C, 0x1B, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x4F, 0x00, 0x4B, 0x47, 0x00, 0x00, 0x00,
0x52, 0x53, 0x50, 0x4C, 0x4D, 0x48, 0x01, 0x45, 0x57, 0x4E, 0x51, 0x4A, 0x37, 0x49,
0x46, 0x00, 0x00, 0x00, 0x00, 0x41,
];
pub fn to_xt(at_in: u8) -> Option<u8> {
KEYCODE_LUT.get(usize::from(at_in)).copied()
}
}
pub enum Cmd {
WaitForKey,
ClearBuffer, // If Reset Occurs.
ToggleLed(LedMask),
SendXtKey(u8),
}
impl Cmd {
// XT command
pub const SELF_TEST_PASSED: u8 = 0xaa;
// AT commands
pub const SET_LEDS: u8 = 0xed;
#[allow(dead_code)]
pub const ECHO: u8 = 0xee;
pub const RESET: u8 = 0xff;
}
bitflags! {
#[derive(Default)]
pub struct LedMask: u8 {
const SCROLL = 0b0000_0001;
const NUM = 0b0000_0010;
const CAPS = 0b0000_0100;
}
}
pub enum ProcReply {
// JustInitialized,
NothingToDo,
GrabbedKey(u8),
SentKey(u8),
ClearedBuffer,
LedToggled(LedMask),
KeyboardReset,
//SentEcho,
}
impl ProcReply {
pub fn | () -> ProcReply {
ProcReply::NothingToDo
}
}
enum State {
NotInKey,
SimpleKey(u8),
PossibleBreakCode,
KnownBreakCode(u8),
UnmodifiedKey(u8),
ToggleLedFirst(u8),
// InPause(u8), // Number of keycodes in pause left to handle- alternate impl.
Inconsistent,
ExpectingBufferClear,
}
pub struct Fsm {
curr_state: State,
expecting_pause: bool,
led_mask: LedMask,
}
impl Fsm {
#[allow(dead_code)]
const ERROR1: u8 = 0x00;
const CAPS: u8 = 0x58;
const NUM: u8 = 0x77;
const SCROLL: u8 = 0x7e;
const SELF_TEST_PASSED: u8 = 0xaa;
const PREFIX: u8 = 0xe0;
const PREFIX_PAUSE: u8 = 0xe1;
const ECHO: u8 = 0xee;
const BREAK: u8 = 0xf0;
const ACK: u8 = 0xfa;
#[allow(dead_code)]
const SELF_TEST_FAILED1: u8 = 0xfc;
#[allow(dead_code)]
const SELF_TEST_FAILED2: u8 = 0xfd;
const NAK: u8 = 0xfe;
#[allow(dead_code)]
const ERROR2: u8 = 0xff;
pub fn start() -> Fsm {
Fsm {
curr_state: State::NotInKey,
expecting_pause: false,
led_mask: Default::default(),
}
}
pub fn run(&mut self, curr_reply: &ProcReply) -> Result<Cmd, ()> {
let next_state = self.next_state(curr_reply);
let next_cmd = match next_state {
State::NotInKey | State::PossibleBreakCode => Ok(Cmd::WaitForKey),
State::SimpleKey(k) => keymap::to_xt(k).ok_or(()).map(Cmd::SendXtKey),
State::KnownBreakCode(b) => {
keymap::to_xt(b).ok_or(()).map(|b| Cmd::SendXtKey(b | 0x80))
}
State::UnmodifiedKey(u) => Ok(Cmd::SendXtKey(u)),
State::ToggleLedFirst(l) => match l {
Self::SCROLL => Ok(Cmd::ToggleLed(self.led_mask ^ LedMask::SCROLL)),
Self::NUM => Ok(Cmd::ToggleLed(self.led_mask ^ LedMask::NUM)),
Self::CAPS => Ok(Cmd::ToggleLed(self.led_mask ^ LedMask::CAPS)),
_ => Err(()),
},
State::ExpectingBufferClear => Ok(Cmd::ClearBuffer),
State::Inconsistent => Err(()),
};
self.curr_state = next_state;
next_cmd
}
fn next_state(&mut self, curr_reply: &ProcReply) -> State {
match (&self.curr_state, curr_reply) {
(_, &ProcReply::KeyboardReset) => State::ExpectingBufferClear,
(&State::NotInKey, &ProcReply::NothingToDo)
| (&State::SimpleKey(_), &ProcReply::SentKey(_))
| (&State::KnownBreakCode(_), &ProcReply::SentKey(_))
| (&State::UnmodifiedKey(_), &ProcReply::SentKey(_))
| (&State::ExpectingBufferClear, &ProcReply::ClearedBuffer) => State::NotInKey,
(&State::NotInKey, &ProcReply::GrabbedKey(k)) => {
match k {
// TODO: 0xfa, 0xfe, and 0xee should never be sent unprompted.
Self::SELF_TEST_PASSED | Self::ACK | Self::NAK | Self::ECHO => State::NotInKey,
Self::BREAK => State::PossibleBreakCode,
Self::PREFIX => State::UnmodifiedKey(k),
Self::PREFIX_PAUSE => {
self.expecting_pause = true;
State::UnmodifiedKey(k)
}
_ => State::SimpleKey(k),
}
}
(&State::PossibleBreakCode, &ProcReply::GrabbedKey(k)) => {
match k {
// LEDs => State::ToggleLed()
Self::SCROLL | Self::CAPS => State::ToggleLedFirst(k),
Self::NUM => {
if self.expecting_pause {
self.expecting_pause = false;
State::KnownBreakCode(k)
} else {
State::ToggleLedFirst(k)
}
}
_ => State::KnownBreakCode(k),
}
}
(&State::ToggleLedFirst(l), &ProcReply::LedToggled(m)) => {
self.led_mask = m;
State::KnownBreakCode(l)
}
(_, _) => State::Inconsistent,
}
}
}
| init | identifier_name |
keyfsm.rs | use bitflags::bitflags;
mod keymap {
static KEYCODE_LUT: [u8; 132] =
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
[
0x00, 0x43, 0x00, 0x3F, 0x3D, 0x3B, 0x3C, 0x58, 0x00, 0x44, 0x42, 0x40, 0x3E, 0x0F,
0x29, 0x00, 0x00, 0x38, 0x2A, 0x00, 0x1D, 0x10, 0x02, 0x00, 0x00, 0x00, 0x2C, 0x1F,
0x1E, 0x11, 0x03, 0x00, 0x00, 0x2E, 0x2D, 0x20, 0x12, 0x05, 0x04, 0x00, 0x00, 0x39,
0x2F, 0x21, 0x14, 0x13, 0x06, 0x00, 0x00, 0x31, 0x30, 0x23, 0x22, 0x15, 0x07, 0x00,
0x00, 0x00, 0x32, 0x24, 0x16, 0x08, 0x09, 0x00, 0x00, 0x33, 0x25, 0x17, 0x18, 0x0B,
0x0A, 0x00, 0x00, 0x34, 0x35, 0x26, 0x27, 0x19, 0x0C, 0x00, 0x00, 0x00, 0x28, 0x00,
0x1A, 0x0D, 0x00, 0x00, 0x3A, 0x36, 0x1C, 0x1B, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x4F, 0x00, 0x4B, 0x47, 0x00, 0x00, 0x00,
0x52, 0x53, 0x50, 0x4C, 0x4D, 0x48, 0x01, 0x45, 0x57, 0x4E, 0x51, 0x4A, 0x37, 0x49,
0x46, 0x00, 0x00, 0x00, 0x00, 0x41,
];
pub fn to_xt(at_in: u8) -> Option<u8> {
KEYCODE_LUT.get(usize::from(at_in)).copied()
}
}
pub enum Cmd {
WaitForKey,
ClearBuffer, // If Reset Occurs.
ToggleLed(LedMask),
SendXtKey(u8),
}
impl Cmd {
// XT command
pub const SELF_TEST_PASSED: u8 = 0xaa;
// AT commands
pub const SET_LEDS: u8 = 0xed;
#[allow(dead_code)]
pub const ECHO: u8 = 0xee;
pub const RESET: u8 = 0xff;
}
bitflags! {
#[derive(Default)]
pub struct LedMask: u8 {
const SCROLL = 0b0000_0001;
const NUM = 0b0000_0010;
const CAPS = 0b0000_0100;
}
}
pub enum ProcReply {
// JustInitialized,
NothingToDo,
GrabbedKey(u8),
SentKey(u8),
ClearedBuffer,
LedToggled(LedMask),
KeyboardReset,
//SentEcho,
}
impl ProcReply {
pub fn init() -> ProcReply {
ProcReply::NothingToDo
}
}
enum State {
NotInKey,
SimpleKey(u8),
PossibleBreakCode,
KnownBreakCode(u8),
UnmodifiedKey(u8),
ToggleLedFirst(u8),
// InPause(u8), // Number of keycodes in pause left to handle- alternate impl.
Inconsistent,
ExpectingBufferClear,
}
pub struct Fsm {
curr_state: State,
expecting_pause: bool,
led_mask: LedMask,
}
impl Fsm {
#[allow(dead_code)]
const ERROR1: u8 = 0x00;
const CAPS: u8 = 0x58;
const NUM: u8 = 0x77;
const SCROLL: u8 = 0x7e;
const SELF_TEST_PASSED: u8 = 0xaa;
const PREFIX: u8 = 0xe0;
const PREFIX_PAUSE: u8 = 0xe1;
const ECHO: u8 = 0xee;
const BREAK: u8 = 0xf0;
const ACK: u8 = 0xfa;
#[allow(dead_code)]
const SELF_TEST_FAILED1: u8 = 0xfc;
#[allow(dead_code)]
const SELF_TEST_FAILED2: u8 = 0xfd;
const NAK: u8 = 0xfe;
#[allow(dead_code)]
const ERROR2: u8 = 0xff;
pub fn start() -> Fsm {
Fsm {
curr_state: State::NotInKey,
expecting_pause: false,
led_mask: Default::default(),
}
}
pub fn run(&mut self, curr_reply: &ProcReply) -> Result<Cmd, ()> {
let next_state = self.next_state(curr_reply);
let next_cmd = match next_state {
State::NotInKey | State::PossibleBreakCode => Ok(Cmd::WaitForKey),
State::SimpleKey(k) => keymap::to_xt(k).ok_or(()).map(Cmd::SendXtKey),
State::KnownBreakCode(b) => {
keymap::to_xt(b).ok_or(()).map(|b| Cmd::SendXtKey(b | 0x80))
}
State::UnmodifiedKey(u) => Ok(Cmd::SendXtKey(u)),
State::ToggleLedFirst(l) => match l {
Self::SCROLL => Ok(Cmd::ToggleLed(self.led_mask ^ LedMask::SCROLL)),
Self::NUM => Ok(Cmd::ToggleLed(self.led_mask ^ LedMask::NUM)),
Self::CAPS => Ok(Cmd::ToggleLed(self.led_mask ^ LedMask::CAPS)),
_ => Err(()),
},
State::ExpectingBufferClear => Ok(Cmd::ClearBuffer),
State::Inconsistent => Err(()),
};
self.curr_state = next_state;
next_cmd
} | | (&State::SimpleKey(_), &ProcReply::SentKey(_))
| (&State::KnownBreakCode(_), &ProcReply::SentKey(_))
| (&State::UnmodifiedKey(_), &ProcReply::SentKey(_))
| (&State::ExpectingBufferClear, &ProcReply::ClearedBuffer) => State::NotInKey,
(&State::NotInKey, &ProcReply::GrabbedKey(k)) => {
match k {
// TODO: 0xfa, 0xfe, and 0xee should never be sent unprompted.
Self::SELF_TEST_PASSED | Self::ACK | Self::NAK | Self::ECHO => State::NotInKey,
Self::BREAK => State::PossibleBreakCode,
Self::PREFIX => State::UnmodifiedKey(k),
Self::PREFIX_PAUSE => {
self.expecting_pause = true;
State::UnmodifiedKey(k)
}
_ => State::SimpleKey(k),
}
}
(&State::PossibleBreakCode, &ProcReply::GrabbedKey(k)) => {
match k {
// LEDs => State::ToggleLed()
Self::SCROLL | Self::CAPS => State::ToggleLedFirst(k),
Self::NUM => {
if self.expecting_pause {
self.expecting_pause = false;
State::KnownBreakCode(k)
} else {
State::ToggleLedFirst(k)
}
}
_ => State::KnownBreakCode(k),
}
}
(&State::ToggleLedFirst(l), &ProcReply::LedToggled(m)) => {
self.led_mask = m;
State::KnownBreakCode(l)
}
(_, _) => State::Inconsistent,
}
}
} |
fn next_state(&mut self, curr_reply: &ProcReply) -> State {
match (&self.curr_state, curr_reply) {
(_, &ProcReply::KeyboardReset) => State::ExpectingBufferClear,
(&State::NotInKey, &ProcReply::NothingToDo) | random_line_split |
test_html5lib.py | """Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError, e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(
soup.decode(), self.document_for(markup))
self.assertTrue(
"the html5lib tree builder doesn't support parse_only" in
str(w[0].message))
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
self.assertEqual(b"<p>foo</p>", soup.p.encode())
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_containing_identical_whitespace_nodes(self):
"""Verify that we keep the two whitespace nodes in this
document distinct when reparenting the adjacent <tbody> tags.
"""
markup = '<table> <tbody><tbody><ims></tbody> </table>'
soup = self.soup(markup)
space1, space2 = soup.find_all(string=' ')
tbody1, tbody2 = soup.find_all('tbody')
assert space1.next_element is tbody1
assert tbody2.next_element is space2
def test_reparented_markup_containing_children(self):
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
soup = self.soup(markup)
noscript = soup.noscript
self.assertEqual("target", noscript.next_element)
target = soup.find(string='target')
# The 'aftermath' string was duplicated; we want the second one.
final_aftermath = soup.find_all(string='aftermath')[-1]
# The <noscript> tag was moved beneath a copy of the <a> tag,
# but the 'target' string within is still connected to the
# (second) 'aftermath' string.
self.assertEqual(final_aftermath, target.next_element)
self.assertEqual(target, final_aftermath.previous_element)
def test_processing_instruction(self):
"""Processing instructions become comments."""
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
assert str(soup).startswith("<!--?PITarget PIContent?-->")
def test_cloned_multivalue_node(self):
markup = b"""<a class="my_class"><p></a>"""
soup = self.soup(markup)
a1, a2 = soup.find_all('a')
self.assertEqual(a1, a2)
assert a1 is not a2
def test_foster_parenting(self):
markup = b"""<table><td></tbody>A"""
soup = self.soup(markup)
self.assertEqual(u"<body>A<table><tbody><tr><td></td></tr></tbody></table></body>", soup.body.decode())
def | (self):
"""
Test that extraction does not destroy the tree.
https://bugs.launchpad.net/beautifulsoup/+bug/1782928
"""
markup = """
<html><head></head>
<style>
</style><script></script><body><p>hello</p></body></html>
"""
soup = self.soup(markup)
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.assertEqual(len(soup.find_all("p")), 1)
def test_empty_comment(self):
"""
Test that empty comment does not break structure.
https://bugs.launchpad.net/beautifulsoup/+bug/1806598
"""
markup = """
<html>
<body>
<form>
<!----><input type="text">
</form>
</body>
</html>
"""
soup = self.soup(markup)
inputs = []
for form in soup.find_all('form'):
inputs.extend(form.find_all('input'))
self.assertEqual(len(inputs), 1)
def test_tracking_line_numbers(self):
# The html.parser TreeBuilder keeps track of line number and
# position of each element.
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
soup = self.soup(markup)
self.assertEqual(2, soup.p.sourceline)
self.assertEqual(5, soup.p.sourcepos)
self.assertEqual("sourceline", soup.p.find('sourceline').name)
# You can deactivate this behavior.
soup = self.soup(markup, store_line_numbers=False)
self.assertEqual("sourceline", soup.p.sourceline.name)
self.assertEqual("sourcepos", soup.p.sourcepos.name)
| test_extraction | identifier_name |
test_html5lib.py | """Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError, e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(
soup.decode(), self.document_for(markup))
self.assertTrue(
"the html5lib tree builder doesn't support parse_only" in
str(w[0].message))
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
self.assertEqual(b"<p>foo</p>", soup.p.encode())
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_containing_identical_whitespace_nodes(self):
"""Verify that we keep the two whitespace nodes in this
document distinct when reparenting the adjacent <tbody> tags.
"""
markup = '<table> <tbody><tbody><ims></tbody> </table>'
soup = self.soup(markup)
space1, space2 = soup.find_all(string=' ')
tbody1, tbody2 = soup.find_all('tbody')
assert space1.next_element is tbody1
assert tbody2.next_element is space2
def test_reparented_markup_containing_children(self):
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
soup = self.soup(markup)
noscript = soup.noscript
self.assertEqual("target", noscript.next_element)
target = soup.find(string='target')
# The 'aftermath' string was duplicated; we want the second one.
final_aftermath = soup.find_all(string='aftermath')[-1]
# The <noscript> tag was moved beneath a copy of the <a> tag,
# but the 'target' string within is still connected to the
# (second) 'aftermath' string.
self.assertEqual(final_aftermath, target.next_element)
self.assertEqual(target, final_aftermath.previous_element)
def test_processing_instruction(self):
|
def test_cloned_multivalue_node(self):
markup = b"""<a class="my_class"><p></a>"""
soup = self.soup(markup)
a1, a2 = soup.find_all('a')
self.assertEqual(a1, a2)
assert a1 is not a2
def test_foster_parenting(self):
markup = b"""<table><td></tbody>A"""
soup = self.soup(markup)
self.assertEqual(u"<body>A<table><tbody><tr><td></td></tr></tbody></table></body>", soup.body.decode())
def test_extraction(self):
"""
Test that extraction does not destroy the tree.
https://bugs.launchpad.net/beautifulsoup/+bug/1782928
"""
markup = """
<html><head></head>
<style>
</style><script></script><body><p>hello</p></body></html>
"""
soup = self.soup(markup)
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.assertEqual(len(soup.find_all("p")), 1)
def test_empty_comment(self):
"""
Test that empty comment does not break structure.
https://bugs.launchpad.net/beautifulsoup/+bug/1806598
"""
markup = """
<html>
<body>
<form>
<!----><input type="text">
</form>
</body>
</html>
"""
soup = self.soup(markup)
inputs = []
for form in soup.find_all('form'):
inputs.extend(form.find_all('input'))
self.assertEqual(len(inputs), 1)
def test_tracking_line_numbers(self):
# The html.parser TreeBuilder keeps track of line number and
# position of each element.
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
soup = self.soup(markup)
self.assertEqual(2, soup.p.sourceline)
self.assertEqual(5, soup.p.sourcepos)
self.assertEqual("sourceline", soup.p.find('sourceline').name)
# You can deactivate this behavior.
soup = self.soup(markup, store_line_numbers=False)
self.assertEqual("sourceline", soup.p.sourceline.name)
self.assertEqual("sourcepos", soup.p.sourcepos.name)
| """Processing instructions become comments."""
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
assert str(soup).startswith("<!--?PITarget PIContent?-->") | identifier_body |
test_html5lib.py | """Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError, e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(
soup.decode(), self.document_for(markup))
self.assertTrue(
"the html5lib tree builder doesn't support parse_only" in
str(w[0].message))
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
self.assertEqual(b"<p>foo</p>", soup.p.encode())
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_containing_identical_whitespace_nodes(self):
"""Verify that we keep the two whitespace nodes in this
document distinct when reparenting the adjacent <tbody> tags.
"""
markup = '<table> <tbody><tbody><ims></tbody> </table>'
soup = self.soup(markup)
space1, space2 = soup.find_all(string=' ')
tbody1, tbody2 = soup.find_all('tbody')
assert space1.next_element is tbody1
assert tbody2.next_element is space2
def test_reparented_markup_containing_children(self):
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
soup = self.soup(markup)
noscript = soup.noscript
self.assertEqual("target", noscript.next_element)
target = soup.find(string='target')
# The 'aftermath' string was duplicated; we want the second one.
final_aftermath = soup.find_all(string='aftermath')[-1]
# The <noscript> tag was moved beneath a copy of the <a> tag,
# but the 'target' string within is still connected to the
# (second) 'aftermath' string.
self.assertEqual(final_aftermath, target.next_element)
self.assertEqual(target, final_aftermath.previous_element)
def test_processing_instruction(self):
"""Processing instructions become comments."""
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
assert str(soup).startswith("<!--?PITarget PIContent?-->")
def test_cloned_multivalue_node(self):
markup = b"""<a class="my_class"><p></a>"""
soup = self.soup(markup)
a1, a2 = soup.find_all('a')
self.assertEqual(a1, a2)
assert a1 is not a2
def test_foster_parenting(self):
markup = b"""<table><td></tbody>A"""
soup = self.soup(markup)
self.assertEqual(u"<body>A<table><tbody><tr><td></td></tr></tbody></table></body>", soup.body.decode())
def test_extraction(self):
"""
Test that extraction does not destroy the tree.
https://bugs.launchpad.net/beautifulsoup/+bug/1782928
"""
markup = """
<html><head></head>
<style>
</style><script></script><body><p>hello</p></body></html>
"""
soup = self.soup(markup)
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.assertEqual(len(soup.find_all("p")), 1)
def test_empty_comment(self):
"""
Test that empty comment does not break structure.
https://bugs.launchpad.net/beautifulsoup/+bug/1806598
"""
markup = """
<html>
<body>
<form>
<!----><input type="text">
</form>
</body>
</html>
"""
soup = self.soup(markup)
inputs = []
for form in soup.find_all('form'):
|
self.assertEqual(len(inputs), 1)
def test_tracking_line_numbers(self):
# The html.parser TreeBuilder keeps track of line number and
# position of each element.
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
soup = self.soup(markup)
self.assertEqual(2, soup.p.sourceline)
self.assertEqual(5, soup.p.sourcepos)
self.assertEqual("sourceline", soup.p.find('sourceline').name)
# You can deactivate this behavior.
soup = self.soup(markup, store_line_numbers=False)
self.assertEqual("sourceline", soup.p.sourceline.name)
self.assertEqual("sourcepos", soup.p.sourcepos.name)
| inputs.extend(form.find_all('input')) | conditional_block |
test_html5lib.py | """Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError, e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(
soup.decode(), self.document_for(markup))
self.assertTrue(
"the html5lib tree builder doesn't support parse_only" in
str(w[0].message))
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
self.assertEqual(b"<p>foo</p>", soup.p.encode())
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_containing_identical_whitespace_nodes(self):
"""Verify that we keep the two whitespace nodes in this
document distinct when reparenting the adjacent <tbody> tags.
"""
markup = '<table> <tbody><tbody><ims></tbody> </table>'
soup = self.soup(markup)
space1, space2 = soup.find_all(string=' ')
tbody1, tbody2 = soup.find_all('tbody')
assert space1.next_element is tbody1
assert tbody2.next_element is space2
def test_reparented_markup_containing_children(self):
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
soup = self.soup(markup)
noscript = soup.noscript
self.assertEqual("target", noscript.next_element) | # The <noscript> tag was moved beneath a copy of the <a> tag,
# but the 'target' string within is still connected to the
# (second) 'aftermath' string.
self.assertEqual(final_aftermath, target.next_element)
self.assertEqual(target, final_aftermath.previous_element)
def test_processing_instruction(self):
"""Processing instructions become comments."""
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
assert str(soup).startswith("<!--?PITarget PIContent?-->")
def test_cloned_multivalue_node(self):
markup = b"""<a class="my_class"><p></a>"""
soup = self.soup(markup)
a1, a2 = soup.find_all('a')
self.assertEqual(a1, a2)
assert a1 is not a2
def test_foster_parenting(self):
markup = b"""<table><td></tbody>A"""
soup = self.soup(markup)
self.assertEqual(u"<body>A<table><tbody><tr><td></td></tr></tbody></table></body>", soup.body.decode())
def test_extraction(self):
"""
Test that extraction does not destroy the tree.
https://bugs.launchpad.net/beautifulsoup/+bug/1782928
"""
markup = """
<html><head></head>
<style>
</style><script></script><body><p>hello</p></body></html>
"""
soup = self.soup(markup)
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.assertEqual(len(soup.find_all("p")), 1)
def test_empty_comment(self):
"""
Test that empty comment does not break structure.
https://bugs.launchpad.net/beautifulsoup/+bug/1806598
"""
markup = """
<html>
<body>
<form>
<!----><input type="text">
</form>
</body>
</html>
"""
soup = self.soup(markup)
inputs = []
for form in soup.find_all('form'):
inputs.extend(form.find_all('input'))
self.assertEqual(len(inputs), 1)
def test_tracking_line_numbers(self):
# The html.parser TreeBuilder keeps track of line number and
# position of each element.
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
soup = self.soup(markup)
self.assertEqual(2, soup.p.sourceline)
self.assertEqual(5, soup.p.sourcepos)
self.assertEqual("sourceline", soup.p.find('sourceline').name)
# You can deactivate this behavior.
soup = self.soup(markup, store_line_numbers=False)
self.assertEqual("sourceline", soup.p.sourceline.name)
self.assertEqual("sourcepos", soup.p.sourcepos.name) | target = soup.find(string='target')
# The 'aftermath' string was duplicated; we want the second one.
final_aftermath = soup.find_all(string='aftermath')[-1]
| random_line_split |
boxed.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! Boxes are useful in two situations: recursive data structures, and
//! occasionally when returning data. [The Pointer chapter of the
//! Book](../../../book/pointers.html#best-practices-1) explains these cases in
//! detail.
//!
//! # Examples
//!
//! Creating a box:
//!
//! ```
//! let x = Box::new(5);
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Box(Cons(2, Box(Nil))))`.
#![stable(feature = "rust1", since = "1.0.0")]
use core::prelude::*;
use core::any::Any;
use core::cmp::Ordering;
use core::default::Default;
use core::error::{Error, FromError};
use core::fmt;
use core::hash::{self, Hash};
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::Unique;
use core::raw::TraitObject;
/// A value that represents the heap. This is the default place that the `box`
/// keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// ```rust
/// #![feature(box_syntax)]
/// use std::boxed::HEAP;
///
/// fn main() {
/// let foo = box(HEAP) 5;
/// let foo = box 5;
/// }
/// ```
#[lang = "exchange_heap"]
#[unstable(feature = "alloc",
reason = "may be renamed; uncertain about custom allocator design")]
pub static HEAP: () = ();
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then moves `x` into it.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
}
impl<T : ?Sized> Box<T> {
/// Constructs a box from the raw pointer.
///
/// After this function call, pointer is owned by resulting box.
/// In particular, it means that `Box` destructor calls destructor
/// of `T` and releases memory. Since the way `Box` allocates and
/// releases memory is unspecified, the only valid pointer to pass
/// to this function is the one taken from another `Box` with
/// `boxed::into_raw` function.
///
/// Function is unsafe, because improper use of this function may
/// lead to memory problems like double-free, for example if the
/// function is called twice on the same raw pointer.
#[unstable(feature = "alloc",
reason = "may be renamed or moved out of Box scope")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
mem::transmute(raw)
}
}
/// Consumes the `Box`, returning the wrapped raw pointer.
///
/// After call to this function, caller is responsible for the memory
/// previously managed by `Box`, in particular caller should properly
/// destroy `T` and release memory. The proper way to do it is to
/// convert pointer back to `Box` with `Box::from_raw` function, because
/// `Box` does not specify, how memory is allocated.
///
/// Function is unsafe, because result of this function is no longer
/// automatically managed that may lead to memory or other resource
/// leak.
///
/// # Examples
/// ```
/// use std::boxed;
///
/// let seventeen = Box::new(17u32);
/// let raw = unsafe { boxed::into_raw(seventeen) };
/// let boxed_again = unsafe { Box::from_raw(raw) };
/// ```
#[unstable(feature = "alloc",
reason = "may be renamed")]
#[inline]
pub unsafe fn into_raw<T : ?Sized>(b: Box<T>) -> *mut T {
mem::transmute(b)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<T> { box Default::default() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<[T]> { Box::<[T; 0]>::new([]) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) |
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { PartialEq::eq(&**self, &**other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { PartialEq::ne(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool { PartialOrd::lt(&**self, &**other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { PartialOrd::le(&**self, &**other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { PartialOrd::ge(&**self, &**other) }
#[inline]
fn gt(&self, other: &Box<T>) -> bool { PartialOrd::gt(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
/// Extension methods for an owning `Any` trait object.
#[unstable(feature = "alloc",
reason = "this trait will likely disappear once compiler bugs blocking \
a direct impl on `Box<Any>` have been fixed ")]
// FIXME(#18737): this should be a direct impl on `Box<Any>`. If you're
// removing this please make sure that you can downcase on
// `Box<Any + Send>` as well as `Box<Any>`
pub trait BoxAny {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
#[stable(feature = "rust1", since = "1.0.0")]
fn downcast<T: 'static>(self) -> Result<Box<T>, Self>;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl BoxAny for Box<Any> {
#[inline]
fn downcast<T: 'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let raw = into_raw(self);
let to: TraitObject =
mem::transmute::<*mut Any, TraitObject>(raw);
// Extract the data pointer
Ok(Box::from_raw(to.data as *mut T))
}
} else {
Err(self)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T { &mut **self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> { (**self).next() }
fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> { (**self).next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, E: Error + 'a> FromError<E> for Box<Error + 'a> {
fn from_error(err: E) -> Box<Error + 'a> {
Box::new(err)
}
}
| {
(**self).clone_from(&(**source));
} | identifier_body |
boxed.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! Boxes are useful in two situations: recursive data structures, and
//! occasionally when returning data. [The Pointer chapter of the
//! Book](../../../book/pointers.html#best-practices-1) explains these cases in
//! detail.
//!
//! # Examples
//!
//! Creating a box:
//!
//! ```
//! let x = Box::new(5);
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Box(Cons(2, Box(Nil))))`.
#![stable(feature = "rust1", since = "1.0.0")]
use core::prelude::*;
use core::any::Any;
use core::cmp::Ordering;
use core::default::Default;
use core::error::{Error, FromError};
use core::fmt;
use core::hash::{self, Hash};
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::Unique;
use core::raw::TraitObject;
/// A value that represents the heap. This is the default place that the `box`
/// keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// ```rust
/// #![feature(box_syntax)]
/// use std::boxed::HEAP;
///
/// fn main() {
/// let foo = box(HEAP) 5;
/// let foo = box 5;
/// }
/// ```
#[lang = "exchange_heap"]
#[unstable(feature = "alloc",
reason = "may be renamed; uncertain about custom allocator design")]
pub static HEAP: () = ();
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then moves `x` into it.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
}
impl<T : ?Sized> Box<T> {
/// Constructs a box from the raw pointer.
///
/// After this function call, pointer is owned by resulting box.
/// In particular, it means that `Box` destructor calls destructor
/// of `T` and releases memory. Since the way `Box` allocates and
/// releases memory is unspecified, the only valid pointer to pass
/// to this function is the one taken from another `Box` with
/// `boxed::into_raw` function.
///
/// Function is unsafe, because improper use of this function may
/// lead to memory problems like double-free, for example if the
/// function is called twice on the same raw pointer.
#[unstable(feature = "alloc",
reason = "may be renamed or moved out of Box scope")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
mem::transmute(raw)
}
}
/// Consumes the `Box`, returning the wrapped raw pointer.
///
/// After call to this function, caller is responsible for the memory
/// previously managed by `Box`, in particular caller should properly
/// destroy `T` and release memory. The proper way to do it is to
/// convert pointer back to `Box` with `Box::from_raw` function, because
/// `Box` does not specify, how memory is allocated.
///
/// Function is unsafe, because result of this function is no longer
/// automatically managed that may lead to memory or other resource
/// leak.
///
/// # Examples
/// ```
/// use std::boxed;
///
/// let seventeen = Box::new(17u32);
/// let raw = unsafe { boxed::into_raw(seventeen) };
/// let boxed_again = unsafe { Box::from_raw(raw) };
/// ```
#[unstable(feature = "alloc",
reason = "may be renamed")]
#[inline]
pub unsafe fn into_raw<T : ?Sized>(b: Box<T>) -> *mut T {
mem::transmute(b)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<T> { box Default::default() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<[T]> { Box::<[T; 0]>::new([]) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { PartialEq::eq(&**self, &**other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { PartialEq::ne(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool { PartialOrd::lt(&**self, &**other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { PartialOrd::le(&**self, &**other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { PartialOrd::ge(&**self, &**other) }
#[inline]
fn gt(&self, other: &Box<T>) -> bool { PartialOrd::gt(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
/// Extension methods for an owning `Any` trait object.
#[unstable(feature = "alloc",
reason = "this trait will likely disappear once compiler bugs blocking \
a direct impl on `Box<Any>` have been fixed ")]
// FIXME(#18737): this should be a direct impl on `Box<Any>`. If you're
// removing this please make sure that you can downcase on
// `Box<Any + Send>` as well as `Box<Any>`
pub trait BoxAny {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
#[stable(feature = "rust1", since = "1.0.0")]
fn downcast<T: 'static>(self) -> Result<Box<T>, Self>;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl BoxAny for Box<Any> {
#[inline]
fn downcast<T: 'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let raw = into_raw(self);
let to: TraitObject =
mem::transmute::<*mut Any, TraitObject>(raw);
// Extract the data pointer
Ok(Box::from_raw(to.data as *mut T))
}
} else {
Err(self)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T { &mut **self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> { (**self).next() }
fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> { (**self).next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, E: Error + 'a> FromError<E> for Box<Error + 'a> {
fn from_error(err: E) -> Box<Error + 'a> {
Box::new(err)
}
} | }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Box<Any> { | random_line_split |
boxed.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! Boxes are useful in two situations: recursive data structures, and
//! occasionally when returning data. [The Pointer chapter of the
//! Book](../../../book/pointers.html#best-practices-1) explains these cases in
//! detail.
//!
//! # Examples
//!
//! Creating a box:
//!
//! ```
//! let x = Box::new(5);
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Box(Cons(2, Box(Nil))))`.
#![stable(feature = "rust1", since = "1.0.0")]
use core::prelude::*;
use core::any::Any;
use core::cmp::Ordering;
use core::default::Default;
use core::error::{Error, FromError};
use core::fmt;
use core::hash::{self, Hash};
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::Unique;
use core::raw::TraitObject;
/// A value that represents the heap. This is the default place that the `box`
/// keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// ```rust
/// #![feature(box_syntax)]
/// use std::boxed::HEAP;
///
/// fn main() {
/// let foo = box(HEAP) 5;
/// let foo = box 5;
/// }
/// ```
#[lang = "exchange_heap"]
#[unstable(feature = "alloc",
reason = "may be renamed; uncertain about custom allocator design")]
pub static HEAP: () = ();
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then moves `x` into it.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
}
impl<T : ?Sized> Box<T> {
/// Constructs a box from the raw pointer.
///
/// After this function call, pointer is owned by resulting box.
/// In particular, it means that `Box` destructor calls destructor
/// of `T` and releases memory. Since the way `Box` allocates and
/// releases memory is unspecified, the only valid pointer to pass
/// to this function is the one taken from another `Box` with
/// `boxed::into_raw` function.
///
/// Function is unsafe, because improper use of this function may
/// lead to memory problems like double-free, for example if the
/// function is called twice on the same raw pointer.
#[unstable(feature = "alloc",
reason = "may be renamed or moved out of Box scope")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
mem::transmute(raw)
}
}
/// Consumes the `Box`, returning the wrapped raw pointer.
///
/// After call to this function, caller is responsible for the memory
/// previously managed by `Box`, in particular caller should properly
/// destroy `T` and release memory. The proper way to do it is to
/// convert pointer back to `Box` with `Box::from_raw` function, because
/// `Box` does not specify, how memory is allocated.
///
/// Function is unsafe, because result of this function is no longer
/// automatically managed that may lead to memory or other resource
/// leak.
///
/// # Examples
/// ```
/// use std::boxed;
///
/// let seventeen = Box::new(17u32);
/// let raw = unsafe { boxed::into_raw(seventeen) };
/// let boxed_again = unsafe { Box::from_raw(raw) };
/// ```
#[unstable(feature = "alloc",
reason = "may be renamed")]
#[inline]
pub unsafe fn into_raw<T : ?Sized>(b: Box<T>) -> *mut T {
mem::transmute(b)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<T> { box Default::default() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<[T]> { Box::<[T; 0]>::new([]) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { PartialEq::eq(&**self, &**other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { PartialEq::ne(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn | (&self, other: &Box<T>) -> bool { PartialOrd::lt(&**self, &**other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { PartialOrd::le(&**self, &**other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { PartialOrd::ge(&**self, &**other) }
#[inline]
fn gt(&self, other: &Box<T>) -> bool { PartialOrd::gt(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
/// Extension methods for an owning `Any` trait object.
#[unstable(feature = "alloc",
reason = "this trait will likely disappear once compiler bugs blocking \
a direct impl on `Box<Any>` have been fixed ")]
// FIXME(#18737): this should be a direct impl on `Box<Any>`. If you're
// removing this please make sure that you can downcase on
// `Box<Any + Send>` as well as `Box<Any>`
pub trait BoxAny {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
#[stable(feature = "rust1", since = "1.0.0")]
fn downcast<T: 'static>(self) -> Result<Box<T>, Self>;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl BoxAny for Box<Any> {
#[inline]
fn downcast<T: 'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let raw = into_raw(self);
let to: TraitObject =
mem::transmute::<*mut Any, TraitObject>(raw);
// Extract the data pointer
Ok(Box::from_raw(to.data as *mut T))
}
} else {
Err(self)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T { &mut **self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> { (**self).next() }
fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> { (**self).next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, E: Error + 'a> FromError<E> for Box<Error + 'a> {
fn from_error(err: E) -> Box<Error + 'a> {
Box::new(err)
}
}
| lt | identifier_name |
boxed.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! Boxes are useful in two situations: recursive data structures, and
//! occasionally when returning data. [The Pointer chapter of the
//! Book](../../../book/pointers.html#best-practices-1) explains these cases in
//! detail.
//!
//! # Examples
//!
//! Creating a box:
//!
//! ```
//! let x = Box::new(5);
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Box(Cons(2, Box(Nil))))`.
#![stable(feature = "rust1", since = "1.0.0")]
use core::prelude::*;
use core::any::Any;
use core::cmp::Ordering;
use core::default::Default;
use core::error::{Error, FromError};
use core::fmt;
use core::hash::{self, Hash};
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::Unique;
use core::raw::TraitObject;
/// A value that represents the heap. This is the default place that the `box`
/// keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// ```rust
/// #![feature(box_syntax)]
/// use std::boxed::HEAP;
///
/// fn main() {
/// let foo = box(HEAP) 5;
/// let foo = box 5;
/// }
/// ```
#[lang = "exchange_heap"]
#[unstable(feature = "alloc",
reason = "may be renamed; uncertain about custom allocator design")]
pub static HEAP: () = ();
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then moves `x` into it.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
}
impl<T : ?Sized> Box<T> {
/// Constructs a box from the raw pointer.
///
/// After this function call, pointer is owned by resulting box.
/// In particular, it means that `Box` destructor calls destructor
/// of `T` and releases memory. Since the way `Box` allocates and
/// releases memory is unspecified, the only valid pointer to pass
/// to this function is the one taken from another `Box` with
/// `boxed::into_raw` function.
///
/// Function is unsafe, because improper use of this function may
/// lead to memory problems like double-free, for example if the
/// function is called twice on the same raw pointer.
#[unstable(feature = "alloc",
reason = "may be renamed or moved out of Box scope")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
mem::transmute(raw)
}
}
/// Consumes the `Box`, returning the wrapped raw pointer.
///
/// After call to this function, caller is responsible for the memory
/// previously managed by `Box`, in particular caller should properly
/// destroy `T` and release memory. The proper way to do it is to
/// convert pointer back to `Box` with `Box::from_raw` function, because
/// `Box` does not specify, how memory is allocated.
///
/// Function is unsafe, because result of this function is no longer
/// automatically managed that may lead to memory or other resource
/// leak.
///
/// # Examples
/// ```
/// use std::boxed;
///
/// let seventeen = Box::new(17u32);
/// let raw = unsafe { boxed::into_raw(seventeen) };
/// let boxed_again = unsafe { Box::from_raw(raw) };
/// ```
#[unstable(feature = "alloc",
reason = "may be renamed")]
#[inline]
pub unsafe fn into_raw<T : ?Sized>(b: Box<T>) -> *mut T {
mem::transmute(b)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<T> { box Default::default() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Box<[T]> { Box::<[T; 0]>::new([]) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { PartialEq::eq(&**self, &**other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { PartialEq::ne(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool { PartialOrd::lt(&**self, &**other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { PartialOrd::le(&**self, &**other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { PartialOrd::ge(&**self, &**other) }
#[inline]
fn gt(&self, other: &Box<T>) -> bool { PartialOrd::gt(&**self, &**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
/// Extension methods for an owning `Any` trait object.
#[unstable(feature = "alloc",
reason = "this trait will likely disappear once compiler bugs blocking \
a direct impl on `Box<Any>` have been fixed ")]
// FIXME(#18737): this should be a direct impl on `Box<Any>`. If you're
// removing this please make sure that you can downcase on
// `Box<Any + Send>` as well as `Box<Any>`
pub trait BoxAny {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
#[stable(feature = "rust1", since = "1.0.0")]
fn downcast<T: 'static>(self) -> Result<Box<T>, Self>;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl BoxAny for Box<Any> {
#[inline]
fn downcast<T: 'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let raw = into_raw(self);
let to: TraitObject =
mem::transmute::<*mut Any, TraitObject>(raw);
// Extract the data pointer
Ok(Box::from_raw(to.data as *mut T))
}
} else |
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T { &mut **self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> { (**self).next() }
fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> { (**self).next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, E: Error + 'a> FromError<E> for Box<Error + 'a> {
fn from_error(err: E) -> Box<Error + 'a> {
Box::new(err)
}
}
| {
Err(self)
} | conditional_block |
ast_util.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::*;
use ast;
use ast_util;
use codemap;
use codemap::Span;
use owned_slice::OwnedSlice;
use parse::token;
use print::pprust;
use ptr::P;
use visit::Visitor;
use visit;
use std::cmp;
use std::u32;
pub fn path_name_i(idents: &[Ident]) -> String {
// FIXME: Bad copies (#2543 -- same for everything else that says "bad")
idents.iter().map(|i| {
token::get_ident(*i).to_string()
}).collect::<Vec<String>>().connect("::")
}
pub fn local_def(id: NodeId) -> DefId {
ast::DefId { krate: LOCAL_CRATE, node: id }
}
pub fn is_local(did: ast::DefId) -> bool { did.krate == LOCAL_CRATE }
pub fn stmt_id(s: &Stmt) -> NodeId {
match s.node {
StmtDecl(_, id) => id,
StmtExpr(_, id) => id,
StmtSemi(_, id) => id,
StmtMac(..) => panic!("attempted to analyze unexpanded stmt")
}
}
pub fn binop_to_string(op: BinOp_) -> &'static str {
match op {
BiAdd => "+",
BiSub => "-",
BiMul => "*",
BiDiv => "/",
BiRem => "%",
BiAnd => "&&",
BiOr => "||",
BiBitXor => "^",
BiBitAnd => "&",
BiBitOr => "|",
BiShl => "<<",
BiShr => ">>",
BiEq => "==",
BiLt => "<",
BiLe => "<=",
BiNe => "!=",
BiGe => ">=",
BiGt => ">"
}
}
pub fn lazy_binop(b: BinOp_) -> bool {
match b {
BiAnd => true,
BiOr => true,
_ => false
}
}
pub fn is_shift_binop(b: BinOp_) -> bool {
match b {
BiShl => true,
BiShr => true,
_ => false
}
}
pub fn is_comparison_binop(b: BinOp_) -> bool {
match b {
BiEq | BiLt | BiLe | BiNe | BiGt | BiGe =>
true,
BiAnd | BiOr | BiAdd | BiSub | BiMul | BiDiv | BiRem |
BiBitXor | BiBitAnd | BiBitOr | BiShl | BiShr =>
false,
}
}
/// Returns `true` if the binary operator takes its arguments by value
pub fn is_by_value_binop(b: BinOp_) -> bool {
!is_comparison_binop(b)
}
/// Returns `true` if the unary operator takes its argument by value
pub fn is_by_value_unop(u: UnOp) -> bool {
match u {
UnNeg | UnNot => true,
_ => false,
}
}
pub fn unop_to_string(op: UnOp) -> &'static str {
match op {
UnUniq => "box() ",
UnDeref => "*",
UnNot => "!",
UnNeg => "-",
}
}
pub fn is_path(e: P<Expr>) -> bool {
match e.node { ExprPath(..) => true, _ => false }
}
/// Get a string representation of a signed int type, with its value.
/// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String {
let s = match t {
TyIs => "isize",
TyI8 => "i8",
TyI16 => "i16",
TyI32 => "i32",
TyI64 => "i64"
};
match val {
// cast to a u64 so we can correctly print INT64_MIN. All integral types
// are parsed as u64, so we wouldn't want to print an extra negative
// sign.
Some(n) => format!("{}{}", n as u64, s),
None => s.to_string()
}
}
pub fn int_ty_max(t: IntTy) -> u64 {
match t {
TyI8 => 0x80,
TyI16 => 0x8000,
TyIs | TyI32 => 0x80000000, // actually ni about TyIs
TyI64 => 0x8000000000000000
}
}
/// Get a string representation of an unsigned int type, with its value.
/// We want to avoid "42u" in favor of "42us". "42uint" is right out.
pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String {
let s = match t {
TyUs => "usize",
TyU8 => "u8",
TyU16 => "u16",
TyU32 => "u32",
TyU64 => "u64"
};
match val {
Some(n) => format!("{}{}", n, s),
None => s.to_string()
}
}
pub fn uint_ty_max(t: UintTy) -> u64 {
match t {
TyU8 => 0xff,
TyU16 => 0xffff,
TyUs | TyU32 => 0xffffffff, // actually ni about TyUs
TyU64 => 0xffffffffffffffff
}
}
pub fn float_ty_to_string(t: FloatTy) -> String {
match t {
TyF32 => "f32".to_string(),
TyF64 => "f64".to_string(),
}
}
// convert a span and an identifier to the corresponding
// 1-segment path
pub fn ident_to_path(s: Span, identifier: Ident) -> Path {
ast::Path {
span: s,
global: false,
segments: vec!(
ast::PathSegment {
identifier: identifier,
parameters: ast::AngleBracketedParameters(ast::AngleBracketedParameterData {
lifetimes: Vec::new(),
types: OwnedSlice::empty(),
bindings: OwnedSlice::empty(),
})
}
),
}
}
// If path is a single segment ident path, return that ident. Otherwise, return
// None.
pub fn path_to_ident(path: &Path) -> Option<Ident> {
if path.segments.len() != 1 {
return None;
}
let segment = &path.segments[0];
if !segment.parameters.is_empty() {
return None;
}
Some(segment.identifier)
}
pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> P<Pat> {
P(Pat {
id: id,
node: PatIdent(BindByValue(MutImmutable), codemap::Spanned{span:s, node:i}, None),
span: s
})
}
pub fn name_to_dummy_lifetime(name: Name) -> Lifetime {
Lifetime { id: DUMMY_NODE_ID,
span: codemap::DUMMY_SP,
name: name }
}
/// Generate a "pretty" name for an `impl` from its type and trait.
/// This is designed so that symbols of `impl`'d methods give some
/// hint of where they came from, (previously they would all just be
/// listed as `__extensions__::method_name::hash`, with no indication
/// of the type).
pub fn impl_pretty_name(trait_ref: &Option<TraitRef>, ty: Option<&Ty>) -> Ident {
let mut pretty = match ty {
Some(t) => pprust::ty_to_string(t),
None => String::from("..")
};
match *trait_ref {
Some(ref trait_ref) => {
pretty.push('.');
pretty.push_str(&pprust::path_to_string(&trait_ref.path));
}
None => {}
}
token::gensym_ident(&pretty[..])
}
pub fn struct_field_visibility(field: ast::StructField) -> Visibility {
match field.node.kind {
ast::NamedField(_, v) | ast::UnnamedField(v) => v
}
}
/// Maps a binary operator to its precedence
pub fn operator_prec(op: ast::BinOp_) -> usize {
match op {
// 'as' sits here with 12
BiMul | BiDiv | BiRem => 11,
BiAdd | BiSub => 10,
BiShl | BiShr => 9,
BiBitAnd => 8,
BiBitXor => 7,
BiBitOr => 6,
BiLt | BiLe | BiGe | BiGt | BiEq | BiNe => 3,
BiAnd => 2,
BiOr => 1
}
}
/// Precedence of the `as` operator, which is a binary operator
/// not appearing in the prior table.
pub const AS_PREC: usize = 12;
pub fn empty_generics() -> Generics {
Generics {
lifetimes: Vec::new(),
ty_params: OwnedSlice::empty(),
where_clause: WhereClause {
id: DUMMY_NODE_ID,
predicates: Vec::new(),
}
}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
pub struct IdRange {
pub min: NodeId,
pub max: NodeId,
}
impl IdRange {
pub fn max() -> IdRange {
IdRange {
min: u32::MAX,
max: u32::MIN,
}
}
pub fn empty(&self) -> bool {
self.min >= self.max
}
pub fn add(&mut self, id: NodeId) {
self.min = cmp::min(self.min, id);
self.max = cmp::max(self.max, id + 1);
}
}
pub trait IdVisitingOperation {
fn visit_id(&mut self, node_id: NodeId);
}
/// A visitor that applies its operation to all of the node IDs
/// in a visitable thing.
pub struct IdVisitor<'a, O:'a> {
pub operation: &'a mut O,
pub pass_through_items: bool,
pub visited_outermost: bool,
}
impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> {
fn visit_generics_helper(&mut self, generics: &Generics) {
for type_parameter in &*generics.ty_params {
self.operation.visit_id(type_parameter.id)
}
for lifetime in &generics.lifetimes {
self.operation.visit_id(lifetime.lifetime.id)
}
}
}
impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
fn visit_mod(&mut self,
module: &Mod,
_: Span,
node_id: NodeId) {
self.operation.visit_id(node_id);
visit::walk_mod(self, module)
}
fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
self.operation.visit_id(foreign_item.id);
visit::walk_foreign_item(self, foreign_item)
}
fn visit_item(&mut self, item: &Item) {
if !self.pass_through_items {
if self.visited_outermost {
return
} else {
self.visited_outermost = true
}
}
self.operation.visit_id(item.id);
match item.node {
ItemUse(ref view_path) => {
match view_path.node {
ViewPathSimple(_, _) |
ViewPathGlob(_) => {}
ViewPathList(_, ref paths) => {
for path in paths {
self.operation.visit_id(path.node.id())
}
}
}
}
ItemEnum(ref enum_definition, _) => {
for variant in &enum_definition.variants {
self.operation.visit_id(variant.node.id)
}
}
_ => {}
}
visit::walk_item(self, item);
self.visited_outermost = false
}
fn visit_local(&mut self, local: &Local) |
fn visit_block(&mut self, block: &Block) {
self.operation.visit_id(block.id);
visit::walk_block(self, block)
}
fn visit_stmt(&mut self, statement: &Stmt) {
self.operation.visit_id(ast_util::stmt_id(statement));
visit::walk_stmt(self, statement)
}
fn visit_pat(&mut self, pattern: &Pat) {
self.operation.visit_id(pattern.id);
visit::walk_pat(self, pattern)
}
fn visit_expr(&mut self, expression: &Expr) {
self.operation.visit_id(expression.id);
visit::walk_expr(self, expression)
}
fn visit_ty(&mut self, typ: &Ty) {
self.operation.visit_id(typ.id);
visit::walk_ty(self, typ)
}
fn visit_generics(&mut self, generics: &Generics) {
self.visit_generics_helper(generics);
visit::walk_generics(self, generics)
}
fn visit_fn(&mut self,
function_kind: visit::FnKind<'v>,
function_declaration: &'v FnDecl,
block: &'v Block,
span: Span,
node_id: NodeId) {
if !self.pass_through_items {
match function_kind {
visit::FkMethod(..) if self.visited_outermost => return,
visit::FkMethod(..) => self.visited_outermost = true,
_ => {}
}
}
self.operation.visit_id(node_id);
match function_kind {
visit::FkItemFn(_, generics, _, _, _) => {
self.visit_generics_helper(generics)
}
visit::FkMethod(_, sig, _) => {
self.visit_generics_helper(&sig.generics)
}
visit::FkFnBlock => {}
}
for argument in &function_declaration.inputs {
self.operation.visit_id(argument.id)
}
visit::walk_fn(self,
function_kind,
function_declaration,
block,
span);
if !self.pass_through_items {
if let visit::FkMethod(..) = function_kind {
self.visited_outermost = false;
}
}
}
fn visit_struct_field(&mut self, struct_field: &StructField) {
self.operation.visit_id(struct_field.node.id);
visit::walk_struct_field(self, struct_field)
}
fn visit_struct_def(&mut self,
struct_def: &StructDef,
_: ast::Ident,
_: &ast::Generics,
id: NodeId) {
self.operation.visit_id(id);
struct_def.ctor_id.map(|ctor_id| self.operation.visit_id(ctor_id));
visit::walk_struct_def(self, struct_def);
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.operation.visit_id(ti.id);
visit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.operation.visit_id(ii.id);
visit::walk_impl_item(self, ii);
}
fn visit_lifetime_ref(&mut self, lifetime: &Lifetime) {
self.operation.visit_id(lifetime.id);
}
fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
self.visit_lifetime_ref(&def.lifetime);
}
fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
self.operation.visit_id(trait_ref.ref_id);
visit::walk_trait_ref(self, trait_ref);
}
}
pub fn visit_ids_for_inlined_item<O: IdVisitingOperation>(item: &InlinedItem,
operation: &mut O) {
let mut id_visitor = IdVisitor {
operation: operation,
pass_through_items: true,
visited_outermost: false,
};
visit::walk_inlined_item(&mut id_visitor, item);
}
struct IdRangeComputingVisitor {
result: IdRange,
}
impl IdVisitingOperation for IdRangeComputingVisitor {
fn visit_id(&mut self, id: NodeId) {
self.result.add(id);
}
}
pub fn compute_id_range_for_inlined_item(item: &InlinedItem) -> IdRange {
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
visit_ids_for_inlined_item(item, &mut visitor);
visitor.result
}
/// Computes the id range for a single fn body, ignoring nested items.
pub fn compute_id_range_for_fn_body(fk: visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
id: NodeId)
-> IdRange
{
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
let mut id_visitor = IdVisitor {
operation: &mut visitor,
pass_through_items: false,
visited_outermost: false,
};
id_visitor.visit_fn(fk, decl, body, sp, id);
id_visitor.operation.result
}
pub fn walk_pat<F>(pat: &Pat, mut it: F) -> bool where F: FnMut(&Pat) -> bool {
// FIXME(#19596) this is a workaround, but there should be a better way
fn walk_pat_<G>(pat: &Pat, it: &mut G) -> bool where G: FnMut(&Pat) -> bool {
if !(*it)(pat) {
return false;
}
match pat.node {
PatIdent(_, _, Some(ref p)) => walk_pat_(&**p, it),
PatStruct(_, ref fields, _) => {
fields.iter().all(|field| walk_pat_(&*field.node.pat, it))
}
PatEnum(_, Some(ref s)) | PatTup(ref s) => {
s.iter().all(|p| walk_pat_(&**p, it))
}
PatBox(ref s) | PatRegion(ref s, _) => {
walk_pat_(&**s, it)
}
PatVec(ref before, ref slice, ref after) => {
before.iter().all(|p| walk_pat_(&**p, it)) &&
slice.iter().all(|p| walk_pat_(&**p, it)) &&
after.iter().all(|p| walk_pat_(&**p, it))
}
PatMac(_) => panic!("attempted to analyze unexpanded pattern"),
PatWild(_) | PatLit(_) | PatRange(_, _) | PatIdent(_, _, _) |
PatEnum(_, _) | PatQPath(_, _) => {
true
}
}
}
walk_pat_(pat, &mut it)
}
/// Returns true if the given struct def is tuple-like; i.e. that its fields
/// are unnamed.
pub fn struct_def_is_tuple_like(struct_def: &ast::StructDef) -> bool {
struct_def.ctor_id.is_some()
}
/// Returns true if the given pattern consists solely of an identifier
/// and false otherwise.
pub fn pat_is_ident(pat: P<ast::Pat>) -> bool {
match pat.node {
ast::PatIdent(..) => true,
_ => false,
}
}
// are two paths equal when compared unhygienically?
// since I'm using this to replace ==, it seems appropriate
// to compare the span, global, etc. fields as well.
pub fn path_name_eq(a : &ast::Path, b : &ast::Path) -> bool {
(a.span == b.span)
&& (a.global == b.global)
&& (segments_name_eq(&a.segments[..], &b.segments[..]))
}
// are two arrays of segments equal when compared unhygienically?
pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> bool {
a.len() == b.len() &&
a.iter().zip(b.iter()).all(|(s, t)| {
s.identifier.name == t.identifier.name &&
// FIXME #7743: ident -> name problems in lifetime comparison?
// can types contain idents?
s.parameters == t.parameters
})
}
/// Returns true if this literal is a string and false otherwise.
pub fn lit_is_str(lit: &Lit) -> bool {
match lit.node {
LitStr(..) => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use ast::*;
use super::*;
fn ident_to_segment(id : &Ident) -> PathSegment {
PathSegment {identifier: id.clone(),
parameters: PathParameters::none()}
}
#[test] fn idents_name_eq_test() {
assert!(segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(78),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
assert!(!segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(77),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
}
}
| {
self.operation.visit_id(local.id);
visit::walk_local(self, local)
} | identifier_body |
ast_util.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::*;
use ast;
use ast_util;
use codemap;
use codemap::Span;
use owned_slice::OwnedSlice;
use parse::token;
use print::pprust;
use ptr::P;
use visit::Visitor;
use visit;
use std::cmp;
use std::u32;
pub fn path_name_i(idents: &[Ident]) -> String {
// FIXME: Bad copies (#2543 -- same for everything else that says "bad")
idents.iter().map(|i| {
token::get_ident(*i).to_string()
}).collect::<Vec<String>>().connect("::")
}
pub fn local_def(id: NodeId) -> DefId {
ast::DefId { krate: LOCAL_CRATE, node: id }
}
pub fn is_local(did: ast::DefId) -> bool { did.krate == LOCAL_CRATE }
pub fn stmt_id(s: &Stmt) -> NodeId {
match s.node {
StmtDecl(_, id) => id,
StmtExpr(_, id) => id,
StmtSemi(_, id) => id,
StmtMac(..) => panic!("attempted to analyze unexpanded stmt")
}
}
pub fn binop_to_string(op: BinOp_) -> &'static str {
match op {
BiAdd => "+",
BiSub => "-",
BiMul => "*",
BiDiv => "/",
BiRem => "%",
BiAnd => "&&",
BiOr => "||",
BiBitXor => "^",
BiBitAnd => "&",
BiBitOr => "|",
BiShl => "<<",
BiShr => ">>",
BiEq => "==",
BiLt => "<",
BiLe => "<=",
BiNe => "!=",
BiGe => ">=",
BiGt => ">"
}
}
pub fn lazy_binop(b: BinOp_) -> bool {
match b {
BiAnd => true,
BiOr => true,
_ => false
}
}
pub fn is_shift_binop(b: BinOp_) -> bool {
match b {
BiShl => true,
BiShr => true,
_ => false
}
}
pub fn is_comparison_binop(b: BinOp_) -> bool {
match b {
BiEq | BiLt | BiLe | BiNe | BiGt | BiGe =>
true,
BiAnd | BiOr | BiAdd | BiSub | BiMul | BiDiv | BiRem |
BiBitXor | BiBitAnd | BiBitOr | BiShl | BiShr =>
false,
}
}
/// Returns `true` if the binary operator takes its arguments by value
pub fn is_by_value_binop(b: BinOp_) -> bool {
!is_comparison_binop(b)
}
/// Returns `true` if the unary operator takes its argument by value
pub fn is_by_value_unop(u: UnOp) -> bool {
match u {
UnNeg | UnNot => true,
_ => false,
}
}
pub fn unop_to_string(op: UnOp) -> &'static str {
match op {
UnUniq => "box() ",
UnDeref => "*",
UnNot => "!",
UnNeg => "-",
}
}
pub fn is_path(e: P<Expr>) -> bool {
match e.node { ExprPath(..) => true, _ => false }
}
/// Get a string representation of a signed int type, with its value.
/// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String {
let s = match t {
TyIs => "isize",
TyI8 => "i8",
TyI16 => "i16",
TyI32 => "i32",
TyI64 => "i64"
};
match val {
// cast to a u64 so we can correctly print INT64_MIN. All integral types
// are parsed as u64, so we wouldn't want to print an extra negative
// sign.
Some(n) => format!("{}{}", n as u64, s),
None => s.to_string()
}
}
pub fn int_ty_max(t: IntTy) -> u64 {
match t {
TyI8 => 0x80,
TyI16 => 0x8000,
TyIs | TyI32 => 0x80000000, // actually ni about TyIs
TyI64 => 0x8000000000000000
}
}
/// Get a string representation of an unsigned int type, with its value.
/// We want to avoid "42u" in favor of "42us". "42uint" is right out.
pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String {
let s = match t {
TyUs => "usize",
TyU8 => "u8",
TyU16 => "u16",
TyU32 => "u32",
TyU64 => "u64"
};
match val {
Some(n) => format!("{}{}", n, s),
None => s.to_string()
}
}
pub fn uint_ty_max(t: UintTy) -> u64 {
match t {
TyU8 => 0xff,
TyU16 => 0xffff,
TyUs | TyU32 => 0xffffffff, // actually ni about TyUs
TyU64 => 0xffffffffffffffff
}
}
pub fn float_ty_to_string(t: FloatTy) -> String {
match t {
TyF32 => "f32".to_string(),
TyF64 => "f64".to_string(),
}
}
// convert a span and an identifier to the corresponding
// 1-segment path
pub fn ident_to_path(s: Span, identifier: Ident) -> Path {
ast::Path {
span: s,
global: false,
segments: vec!(
ast::PathSegment {
identifier: identifier,
parameters: ast::AngleBracketedParameters(ast::AngleBracketedParameterData {
lifetimes: Vec::new(),
types: OwnedSlice::empty(),
bindings: OwnedSlice::empty(),
})
}
),
}
}
// If path is a single segment ident path, return that ident. Otherwise, return
// None.
pub fn path_to_ident(path: &Path) -> Option<Ident> {
if path.segments.len() != 1 {
return None;
}
let segment = &path.segments[0];
if !segment.parameters.is_empty() {
return None;
}
Some(segment.identifier)
}
pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> P<Pat> {
P(Pat {
id: id,
node: PatIdent(BindByValue(MutImmutable), codemap::Spanned{span:s, node:i}, None),
span: s
})
}
pub fn name_to_dummy_lifetime(name: Name) -> Lifetime {
Lifetime { id: DUMMY_NODE_ID,
span: codemap::DUMMY_SP,
name: name }
}
/// Generate a "pretty" name for an `impl` from its type and trait.
/// This is designed so that symbols of `impl`'d methods give some
/// hint of where they came from, (previously they would all just be
/// listed as `__extensions__::method_name::hash`, with no indication
/// of the type).
pub fn impl_pretty_name(trait_ref: &Option<TraitRef>, ty: Option<&Ty>) -> Ident {
let mut pretty = match ty {
Some(t) => pprust::ty_to_string(t),
None => String::from("..")
};
match *trait_ref {
Some(ref trait_ref) => {
pretty.push('.');
pretty.push_str(&pprust::path_to_string(&trait_ref.path));
}
None => {}
}
token::gensym_ident(&pretty[..])
}
pub fn struct_field_visibility(field: ast::StructField) -> Visibility {
match field.node.kind {
ast::NamedField(_, v) | ast::UnnamedField(v) => v
}
}
/// Maps a binary operator to its precedence
pub fn operator_prec(op: ast::BinOp_) -> usize {
match op {
// 'as' sits here with 12
BiMul | BiDiv | BiRem => 11,
BiAdd | BiSub => 10,
BiShl | BiShr => 9,
BiBitAnd => 8,
BiBitXor => 7,
BiBitOr => 6,
BiLt | BiLe | BiGe | BiGt | BiEq | BiNe => 3,
BiAnd => 2,
BiOr => 1
}
}
/// Precedence of the `as` operator, which is a binary operator
/// not appearing in the prior table.
pub const AS_PREC: usize = 12;
pub fn empty_generics() -> Generics {
Generics {
lifetimes: Vec::new(),
ty_params: OwnedSlice::empty(),
where_clause: WhereClause {
id: DUMMY_NODE_ID,
predicates: Vec::new(),
}
}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
pub struct IdRange {
pub min: NodeId,
pub max: NodeId,
}
impl IdRange {
pub fn max() -> IdRange {
IdRange {
min: u32::MAX,
max: u32::MIN,
}
}
pub fn empty(&self) -> bool {
self.min >= self.max
}
pub fn add(&mut self, id: NodeId) {
self.min = cmp::min(self.min, id);
self.max = cmp::max(self.max, id + 1);
}
}
pub trait IdVisitingOperation {
fn visit_id(&mut self, node_id: NodeId);
}
/// A visitor that applies its operation to all of the node IDs
/// in a visitable thing.
pub struct IdVisitor<'a, O:'a> {
pub operation: &'a mut O,
pub pass_through_items: bool,
pub visited_outermost: bool,
}
impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> {
fn visit_generics_helper(&mut self, generics: &Generics) {
for type_parameter in &*generics.ty_params {
self.operation.visit_id(type_parameter.id)
}
for lifetime in &generics.lifetimes {
self.operation.visit_id(lifetime.lifetime.id)
}
}
}
impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
fn visit_mod(&mut self,
module: &Mod,
_: Span,
node_id: NodeId) {
self.operation.visit_id(node_id);
visit::walk_mod(self, module)
}
fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
self.operation.visit_id(foreign_item.id);
visit::walk_foreign_item(self, foreign_item)
}
fn visit_item(&mut self, item: &Item) {
if !self.pass_through_items {
if self.visited_outermost {
return
} else {
self.visited_outermost = true
}
}
self.operation.visit_id(item.id);
match item.node {
ItemUse(ref view_path) => {
match view_path.node {
ViewPathSimple(_, _) |
ViewPathGlob(_) => {}
ViewPathList(_, ref paths) => {
for path in paths {
self.operation.visit_id(path.node.id())
}
}
}
}
ItemEnum(ref enum_definition, _) => {
for variant in &enum_definition.variants {
self.operation.visit_id(variant.node.id)
}
}
_ => {}
}
visit::walk_item(self, item);
self.visited_outermost = false
}
fn visit_local(&mut self, local: &Local) {
self.operation.visit_id(local.id);
visit::walk_local(self, local)
}
fn visit_block(&mut self, block: &Block) {
self.operation.visit_id(block.id);
visit::walk_block(self, block)
}
fn visit_stmt(&mut self, statement: &Stmt) {
self.operation.visit_id(ast_util::stmt_id(statement));
visit::walk_stmt(self, statement)
}
fn visit_pat(&mut self, pattern: &Pat) {
self.operation.visit_id(pattern.id);
visit::walk_pat(self, pattern)
}
fn visit_expr(&mut self, expression: &Expr) {
self.operation.visit_id(expression.id);
visit::walk_expr(self, expression)
}
fn visit_ty(&mut self, typ: &Ty) {
self.operation.visit_id(typ.id);
visit::walk_ty(self, typ)
}
fn visit_generics(&mut self, generics: &Generics) {
self.visit_generics_helper(generics);
visit::walk_generics(self, generics)
}
fn visit_fn(&mut self,
function_kind: visit::FnKind<'v>,
function_declaration: &'v FnDecl,
block: &'v Block,
span: Span,
node_id: NodeId) {
if !self.pass_through_items {
match function_kind {
visit::FkMethod(..) if self.visited_outermost => return,
visit::FkMethod(..) => self.visited_outermost = true,
_ => |
}
}
self.operation.visit_id(node_id);
match function_kind {
visit::FkItemFn(_, generics, _, _, _) => {
self.visit_generics_helper(generics)
}
visit::FkMethod(_, sig, _) => {
self.visit_generics_helper(&sig.generics)
}
visit::FkFnBlock => {}
}
for argument in &function_declaration.inputs {
self.operation.visit_id(argument.id)
}
visit::walk_fn(self,
function_kind,
function_declaration,
block,
span);
if !self.pass_through_items {
if let visit::FkMethod(..) = function_kind {
self.visited_outermost = false;
}
}
}
fn visit_struct_field(&mut self, struct_field: &StructField) {
self.operation.visit_id(struct_field.node.id);
visit::walk_struct_field(self, struct_field)
}
fn visit_struct_def(&mut self,
struct_def: &StructDef,
_: ast::Ident,
_: &ast::Generics,
id: NodeId) {
self.operation.visit_id(id);
struct_def.ctor_id.map(|ctor_id| self.operation.visit_id(ctor_id));
visit::walk_struct_def(self, struct_def);
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.operation.visit_id(ti.id);
visit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.operation.visit_id(ii.id);
visit::walk_impl_item(self, ii);
}
fn visit_lifetime_ref(&mut self, lifetime: &Lifetime) {
self.operation.visit_id(lifetime.id);
}
fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
self.visit_lifetime_ref(&def.lifetime);
}
fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
self.operation.visit_id(trait_ref.ref_id);
visit::walk_trait_ref(self, trait_ref);
}
}
pub fn visit_ids_for_inlined_item<O: IdVisitingOperation>(item: &InlinedItem,
operation: &mut O) {
let mut id_visitor = IdVisitor {
operation: operation,
pass_through_items: true,
visited_outermost: false,
};
visit::walk_inlined_item(&mut id_visitor, item);
}
struct IdRangeComputingVisitor {
result: IdRange,
}
impl IdVisitingOperation for IdRangeComputingVisitor {
fn visit_id(&mut self, id: NodeId) {
self.result.add(id);
}
}
pub fn compute_id_range_for_inlined_item(item: &InlinedItem) -> IdRange {
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
visit_ids_for_inlined_item(item, &mut visitor);
visitor.result
}
/// Computes the id range for a single fn body, ignoring nested items.
pub fn compute_id_range_for_fn_body(fk: visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
id: NodeId)
-> IdRange
{
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
let mut id_visitor = IdVisitor {
operation: &mut visitor,
pass_through_items: false,
visited_outermost: false,
};
id_visitor.visit_fn(fk, decl, body, sp, id);
id_visitor.operation.result
}
pub fn walk_pat<F>(pat: &Pat, mut it: F) -> bool where F: FnMut(&Pat) -> bool {
// FIXME(#19596) this is a workaround, but there should be a better way
fn walk_pat_<G>(pat: &Pat, it: &mut G) -> bool where G: FnMut(&Pat) -> bool {
if !(*it)(pat) {
return false;
}
match pat.node {
PatIdent(_, _, Some(ref p)) => walk_pat_(&**p, it),
PatStruct(_, ref fields, _) => {
fields.iter().all(|field| walk_pat_(&*field.node.pat, it))
}
PatEnum(_, Some(ref s)) | PatTup(ref s) => {
s.iter().all(|p| walk_pat_(&**p, it))
}
PatBox(ref s) | PatRegion(ref s, _) => {
walk_pat_(&**s, it)
}
PatVec(ref before, ref slice, ref after) => {
before.iter().all(|p| walk_pat_(&**p, it)) &&
slice.iter().all(|p| walk_pat_(&**p, it)) &&
after.iter().all(|p| walk_pat_(&**p, it))
}
PatMac(_) => panic!("attempted to analyze unexpanded pattern"),
PatWild(_) | PatLit(_) | PatRange(_, _) | PatIdent(_, _, _) |
PatEnum(_, _) | PatQPath(_, _) => {
true
}
}
}
walk_pat_(pat, &mut it)
}
/// Returns true if the given struct def is tuple-like; i.e. that its fields
/// are unnamed.
pub fn struct_def_is_tuple_like(struct_def: &ast::StructDef) -> bool {
struct_def.ctor_id.is_some()
}
/// Returns true if the given pattern consists solely of an identifier
/// and false otherwise.
pub fn pat_is_ident(pat: P<ast::Pat>) -> bool {
match pat.node {
ast::PatIdent(..) => true,
_ => false,
}
}
// are two paths equal when compared unhygienically?
// since I'm using this to replace ==, it seems appropriate
// to compare the span, global, etc. fields as well.
pub fn path_name_eq(a : &ast::Path, b : &ast::Path) -> bool {
(a.span == b.span)
&& (a.global == b.global)
&& (segments_name_eq(&a.segments[..], &b.segments[..]))
}
// are two arrays of segments equal when compared unhygienically?
pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> bool {
a.len() == b.len() &&
a.iter().zip(b.iter()).all(|(s, t)| {
s.identifier.name == t.identifier.name &&
// FIXME #7743: ident -> name problems in lifetime comparison?
// can types contain idents?
s.parameters == t.parameters
})
}
/// Returns true if this literal is a string and false otherwise.
pub fn lit_is_str(lit: &Lit) -> bool {
match lit.node {
LitStr(..) => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use ast::*;
use super::*;
fn ident_to_segment(id : &Ident) -> PathSegment {
PathSegment {identifier: id.clone(),
parameters: PathParameters::none()}
}
#[test] fn idents_name_eq_test() {
assert!(segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(78),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
assert!(!segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(77),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
}
}
| {} | conditional_block |
ast_util.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::*;
use ast;
use ast_util;
use codemap;
use codemap::Span;
use owned_slice::OwnedSlice;
use parse::token;
use print::pprust;
use ptr::P;
use visit::Visitor;
use visit;
use std::cmp;
use std::u32;
pub fn path_name_i(idents: &[Ident]) -> String {
// FIXME: Bad copies (#2543 -- same for everything else that says "bad")
idents.iter().map(|i| {
token::get_ident(*i).to_string()
}).collect::<Vec<String>>().connect("::")
}
pub fn local_def(id: NodeId) -> DefId {
ast::DefId { krate: LOCAL_CRATE, node: id }
}
pub fn is_local(did: ast::DefId) -> bool { did.krate == LOCAL_CRATE }
pub fn stmt_id(s: &Stmt) -> NodeId {
match s.node {
StmtDecl(_, id) => id,
StmtExpr(_, id) => id,
StmtSemi(_, id) => id,
StmtMac(..) => panic!("attempted to analyze unexpanded stmt")
}
}
pub fn binop_to_string(op: BinOp_) -> &'static str {
match op {
BiAdd => "+",
BiSub => "-",
BiMul => "*",
BiDiv => "/",
BiRem => "%",
BiAnd => "&&",
BiOr => "||",
BiBitXor => "^",
BiBitAnd => "&",
BiBitOr => "|",
BiShl => "<<",
BiShr => ">>",
BiEq => "==",
BiLt => "<",
BiLe => "<=",
BiNe => "!=",
BiGe => ">=",
BiGt => ">"
}
}
pub fn lazy_binop(b: BinOp_) -> bool {
match b {
BiAnd => true,
BiOr => true,
_ => false
}
}
pub fn is_shift_binop(b: BinOp_) -> bool {
match b {
BiShl => true,
BiShr => true,
_ => false
}
}
pub fn is_comparison_binop(b: BinOp_) -> bool {
match b {
BiEq | BiLt | BiLe | BiNe | BiGt | BiGe =>
true,
BiAnd | BiOr | BiAdd | BiSub | BiMul | BiDiv | BiRem |
BiBitXor | BiBitAnd | BiBitOr | BiShl | BiShr =>
false,
}
}
/// Returns `true` if the binary operator takes its arguments by value
pub fn is_by_value_binop(b: BinOp_) -> bool {
!is_comparison_binop(b)
}
/// Returns `true` if the unary operator takes its argument by value
pub fn is_by_value_unop(u: UnOp) -> bool {
match u {
UnNeg | UnNot => true,
_ => false,
}
}
pub fn unop_to_string(op: UnOp) -> &'static str {
match op {
UnUniq => "box() ",
UnDeref => "*",
UnNot => "!",
UnNeg => "-",
}
}
pub fn is_path(e: P<Expr>) -> bool {
match e.node { ExprPath(..) => true, _ => false }
}
/// Get a string representation of a signed int type, with its value.
/// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String {
let s = match t {
TyIs => "isize",
TyI8 => "i8",
TyI16 => "i16",
TyI32 => "i32",
TyI64 => "i64"
};
match val {
// cast to a u64 so we can correctly print INT64_MIN. All integral types
// are parsed as u64, so we wouldn't want to print an extra negative
// sign.
Some(n) => format!("{}{}", n as u64, s),
None => s.to_string()
}
}
pub fn int_ty_max(t: IntTy) -> u64 {
match t {
TyI8 => 0x80,
TyI16 => 0x8000,
TyIs | TyI32 => 0x80000000, // actually ni about TyIs
TyI64 => 0x8000000000000000
}
}
/// Get a string representation of an unsigned int type, with its value.
/// We want to avoid "42u" in favor of "42us". "42uint" is right out.
pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String {
let s = match t {
TyUs => "usize",
TyU8 => "u8",
TyU16 => "u16",
TyU32 => "u32",
TyU64 => "u64"
};
match val {
Some(n) => format!("{}{}", n, s),
None => s.to_string()
}
}
pub fn uint_ty_max(t: UintTy) -> u64 {
match t {
TyU8 => 0xff,
TyU16 => 0xffff,
TyUs | TyU32 => 0xffffffff, // actually ni about TyUs
TyU64 => 0xffffffffffffffff
}
}
pub fn float_ty_to_string(t: FloatTy) -> String {
match t {
TyF32 => "f32".to_string(),
TyF64 => "f64".to_string(),
}
}
// convert a span and an identifier to the corresponding
// 1-segment path
pub fn ident_to_path(s: Span, identifier: Ident) -> Path {
ast::Path {
span: s,
global: false,
segments: vec!(
ast::PathSegment {
identifier: identifier,
parameters: ast::AngleBracketedParameters(ast::AngleBracketedParameterData {
lifetimes: Vec::new(),
types: OwnedSlice::empty(),
bindings: OwnedSlice::empty(),
})
}
),
}
}
// If path is a single segment ident path, return that ident. Otherwise, return
// None.
pub fn path_to_ident(path: &Path) -> Option<Ident> {
if path.segments.len() != 1 {
return None;
}
let segment = &path.segments[0];
if !segment.parameters.is_empty() {
return None;
}
Some(segment.identifier)
}
pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> P<Pat> {
P(Pat {
id: id,
node: PatIdent(BindByValue(MutImmutable), codemap::Spanned{span:s, node:i}, None),
span: s
})
}
pub fn name_to_dummy_lifetime(name: Name) -> Lifetime {
Lifetime { id: DUMMY_NODE_ID,
span: codemap::DUMMY_SP,
name: name }
}
/// Generate a "pretty" name for an `impl` from its type and trait.
/// This is designed so that symbols of `impl`'d methods give some
/// hint of where they came from, (previously they would all just be
/// listed as `__extensions__::method_name::hash`, with no indication
/// of the type).
pub fn impl_pretty_name(trait_ref: &Option<TraitRef>, ty: Option<&Ty>) -> Ident {
let mut pretty = match ty {
Some(t) => pprust::ty_to_string(t),
None => String::from("..")
};
match *trait_ref {
Some(ref trait_ref) => {
pretty.push('.');
pretty.push_str(&pprust::path_to_string(&trait_ref.path));
}
None => {}
}
token::gensym_ident(&pretty[..])
}
pub fn struct_field_visibility(field: ast::StructField) -> Visibility {
match field.node.kind {
ast::NamedField(_, v) | ast::UnnamedField(v) => v
}
}
/// Maps a binary operator to its precedence
pub fn operator_prec(op: ast::BinOp_) -> usize {
match op {
// 'as' sits here with 12
BiMul | BiDiv | BiRem => 11,
BiAdd | BiSub => 10,
BiShl | BiShr => 9,
BiBitAnd => 8,
BiBitXor => 7,
BiBitOr => 6,
BiLt | BiLe | BiGe | BiGt | BiEq | BiNe => 3,
BiAnd => 2,
BiOr => 1
}
}
/// Precedence of the `as` operator, which is a binary operator
/// not appearing in the prior table.
pub const AS_PREC: usize = 12;
pub fn empty_generics() -> Generics {
Generics {
lifetimes: Vec::new(),
ty_params: OwnedSlice::empty(),
where_clause: WhereClause {
id: DUMMY_NODE_ID,
predicates: Vec::new(),
}
}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
pub struct IdRange {
pub min: NodeId,
pub max: NodeId,
}
impl IdRange {
pub fn max() -> IdRange {
IdRange {
min: u32::MAX,
max: u32::MIN,
}
}
pub fn empty(&self) -> bool {
self.min >= self.max
}
pub fn add(&mut self, id: NodeId) {
self.min = cmp::min(self.min, id);
self.max = cmp::max(self.max, id + 1);
}
}
pub trait IdVisitingOperation {
fn visit_id(&mut self, node_id: NodeId);
}
/// A visitor that applies its operation to all of the node IDs
/// in a visitable thing.
pub struct IdVisitor<'a, O:'a> {
pub operation: &'a mut O,
pub pass_through_items: bool,
pub visited_outermost: bool,
}
impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> {
fn visit_generics_helper(&mut self, generics: &Generics) {
for type_parameter in &*generics.ty_params {
self.operation.visit_id(type_parameter.id)
}
for lifetime in &generics.lifetimes {
self.operation.visit_id(lifetime.lifetime.id)
}
}
}
impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
fn visit_mod(&mut self,
module: &Mod,
_: Span,
node_id: NodeId) {
self.operation.visit_id(node_id);
visit::walk_mod(self, module) | }
fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
self.operation.visit_id(foreign_item.id);
visit::walk_foreign_item(self, foreign_item)
}
fn visit_item(&mut self, item: &Item) {
if !self.pass_through_items {
if self.visited_outermost {
return
} else {
self.visited_outermost = true
}
}
self.operation.visit_id(item.id);
match item.node {
ItemUse(ref view_path) => {
match view_path.node {
ViewPathSimple(_, _) |
ViewPathGlob(_) => {}
ViewPathList(_, ref paths) => {
for path in paths {
self.operation.visit_id(path.node.id())
}
}
}
}
ItemEnum(ref enum_definition, _) => {
for variant in &enum_definition.variants {
self.operation.visit_id(variant.node.id)
}
}
_ => {}
}
visit::walk_item(self, item);
self.visited_outermost = false
}
fn visit_local(&mut self, local: &Local) {
self.operation.visit_id(local.id);
visit::walk_local(self, local)
}
fn visit_block(&mut self, block: &Block) {
self.operation.visit_id(block.id);
visit::walk_block(self, block)
}
fn visit_stmt(&mut self, statement: &Stmt) {
self.operation.visit_id(ast_util::stmt_id(statement));
visit::walk_stmt(self, statement)
}
fn visit_pat(&mut self, pattern: &Pat) {
self.operation.visit_id(pattern.id);
visit::walk_pat(self, pattern)
}
fn visit_expr(&mut self, expression: &Expr) {
self.operation.visit_id(expression.id);
visit::walk_expr(self, expression)
}
fn visit_ty(&mut self, typ: &Ty) {
self.operation.visit_id(typ.id);
visit::walk_ty(self, typ)
}
fn visit_generics(&mut self, generics: &Generics) {
self.visit_generics_helper(generics);
visit::walk_generics(self, generics)
}
fn visit_fn(&mut self,
function_kind: visit::FnKind<'v>,
function_declaration: &'v FnDecl,
block: &'v Block,
span: Span,
node_id: NodeId) {
if !self.pass_through_items {
match function_kind {
visit::FkMethod(..) if self.visited_outermost => return,
visit::FkMethod(..) => self.visited_outermost = true,
_ => {}
}
}
self.operation.visit_id(node_id);
match function_kind {
visit::FkItemFn(_, generics, _, _, _) => {
self.visit_generics_helper(generics)
}
visit::FkMethod(_, sig, _) => {
self.visit_generics_helper(&sig.generics)
}
visit::FkFnBlock => {}
}
for argument in &function_declaration.inputs {
self.operation.visit_id(argument.id)
}
visit::walk_fn(self,
function_kind,
function_declaration,
block,
span);
if !self.pass_through_items {
if let visit::FkMethod(..) = function_kind {
self.visited_outermost = false;
}
}
}
fn visit_struct_field(&mut self, struct_field: &StructField) {
self.operation.visit_id(struct_field.node.id);
visit::walk_struct_field(self, struct_field)
}
fn visit_struct_def(&mut self,
struct_def: &StructDef,
_: ast::Ident,
_: &ast::Generics,
id: NodeId) {
self.operation.visit_id(id);
struct_def.ctor_id.map(|ctor_id| self.operation.visit_id(ctor_id));
visit::walk_struct_def(self, struct_def);
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.operation.visit_id(ti.id);
visit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.operation.visit_id(ii.id);
visit::walk_impl_item(self, ii);
}
fn visit_lifetime_ref(&mut self, lifetime: &Lifetime) {
self.operation.visit_id(lifetime.id);
}
fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
self.visit_lifetime_ref(&def.lifetime);
}
fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
self.operation.visit_id(trait_ref.ref_id);
visit::walk_trait_ref(self, trait_ref);
}
}
pub fn visit_ids_for_inlined_item<O: IdVisitingOperation>(item: &InlinedItem,
operation: &mut O) {
let mut id_visitor = IdVisitor {
operation: operation,
pass_through_items: true,
visited_outermost: false,
};
visit::walk_inlined_item(&mut id_visitor, item);
}
struct IdRangeComputingVisitor {
result: IdRange,
}
impl IdVisitingOperation for IdRangeComputingVisitor {
fn visit_id(&mut self, id: NodeId) {
self.result.add(id);
}
}
pub fn compute_id_range_for_inlined_item(item: &InlinedItem) -> IdRange {
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
visit_ids_for_inlined_item(item, &mut visitor);
visitor.result
}
/// Computes the id range for a single fn body, ignoring nested items.
pub fn compute_id_range_for_fn_body(fk: visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
id: NodeId)
-> IdRange
{
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
let mut id_visitor = IdVisitor {
operation: &mut visitor,
pass_through_items: false,
visited_outermost: false,
};
id_visitor.visit_fn(fk, decl, body, sp, id);
id_visitor.operation.result
}
pub fn walk_pat<F>(pat: &Pat, mut it: F) -> bool where F: FnMut(&Pat) -> bool {
// FIXME(#19596) this is a workaround, but there should be a better way
fn walk_pat_<G>(pat: &Pat, it: &mut G) -> bool where G: FnMut(&Pat) -> bool {
if !(*it)(pat) {
return false;
}
match pat.node {
PatIdent(_, _, Some(ref p)) => walk_pat_(&**p, it),
PatStruct(_, ref fields, _) => {
fields.iter().all(|field| walk_pat_(&*field.node.pat, it))
}
PatEnum(_, Some(ref s)) | PatTup(ref s) => {
s.iter().all(|p| walk_pat_(&**p, it))
}
PatBox(ref s) | PatRegion(ref s, _) => {
walk_pat_(&**s, it)
}
PatVec(ref before, ref slice, ref after) => {
before.iter().all(|p| walk_pat_(&**p, it)) &&
slice.iter().all(|p| walk_pat_(&**p, it)) &&
after.iter().all(|p| walk_pat_(&**p, it))
}
PatMac(_) => panic!("attempted to analyze unexpanded pattern"),
PatWild(_) | PatLit(_) | PatRange(_, _) | PatIdent(_, _, _) |
PatEnum(_, _) | PatQPath(_, _) => {
true
}
}
}
walk_pat_(pat, &mut it)
}
/// Returns true if the given struct def is tuple-like; i.e. that its fields
/// are unnamed.
pub fn struct_def_is_tuple_like(struct_def: &ast::StructDef) -> bool {
struct_def.ctor_id.is_some()
}
/// Returns true if the given pattern consists solely of an identifier
/// and false otherwise.
pub fn pat_is_ident(pat: P<ast::Pat>) -> bool {
match pat.node {
ast::PatIdent(..) => true,
_ => false,
}
}
// are two paths equal when compared unhygienically?
// since I'm using this to replace ==, it seems appropriate
// to compare the span, global, etc. fields as well.
pub fn path_name_eq(a : &ast::Path, b : &ast::Path) -> bool {
(a.span == b.span)
&& (a.global == b.global)
&& (segments_name_eq(&a.segments[..], &b.segments[..]))
}
// are two arrays of segments equal when compared unhygienically?
pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> bool {
a.len() == b.len() &&
a.iter().zip(b.iter()).all(|(s, t)| {
s.identifier.name == t.identifier.name &&
// FIXME #7743: ident -> name problems in lifetime comparison?
// can types contain idents?
s.parameters == t.parameters
})
}
/// Returns true if this literal is a string and false otherwise.
pub fn lit_is_str(lit: &Lit) -> bool {
match lit.node {
LitStr(..) => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use ast::*;
use super::*;
fn ident_to_segment(id : &Ident) -> PathSegment {
PathSegment {identifier: id.clone(),
parameters: PathParameters::none()}
}
#[test] fn idents_name_eq_test() {
assert!(segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(78),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
assert!(!segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(77),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
}
} | random_line_split | |
ast_util.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::*;
use ast;
use ast_util;
use codemap;
use codemap::Span;
use owned_slice::OwnedSlice;
use parse::token;
use print::pprust;
use ptr::P;
use visit::Visitor;
use visit;
use std::cmp;
use std::u32;
pub fn path_name_i(idents: &[Ident]) -> String {
// FIXME: Bad copies (#2543 -- same for everything else that says "bad")
idents.iter().map(|i| {
token::get_ident(*i).to_string()
}).collect::<Vec<String>>().connect("::")
}
pub fn local_def(id: NodeId) -> DefId {
ast::DefId { krate: LOCAL_CRATE, node: id }
}
pub fn is_local(did: ast::DefId) -> bool { did.krate == LOCAL_CRATE }
pub fn stmt_id(s: &Stmt) -> NodeId {
match s.node {
StmtDecl(_, id) => id,
StmtExpr(_, id) => id,
StmtSemi(_, id) => id,
StmtMac(..) => panic!("attempted to analyze unexpanded stmt")
}
}
pub fn binop_to_string(op: BinOp_) -> &'static str {
match op {
BiAdd => "+",
BiSub => "-",
BiMul => "*",
BiDiv => "/",
BiRem => "%",
BiAnd => "&&",
BiOr => "||",
BiBitXor => "^",
BiBitAnd => "&",
BiBitOr => "|",
BiShl => "<<",
BiShr => ">>",
BiEq => "==",
BiLt => "<",
BiLe => "<=",
BiNe => "!=",
BiGe => ">=",
BiGt => ">"
}
}
pub fn lazy_binop(b: BinOp_) -> bool {
match b {
BiAnd => true,
BiOr => true,
_ => false
}
}
pub fn is_shift_binop(b: BinOp_) -> bool {
match b {
BiShl => true,
BiShr => true,
_ => false
}
}
pub fn is_comparison_binop(b: BinOp_) -> bool {
match b {
BiEq | BiLt | BiLe | BiNe | BiGt | BiGe =>
true,
BiAnd | BiOr | BiAdd | BiSub | BiMul | BiDiv | BiRem |
BiBitXor | BiBitAnd | BiBitOr | BiShl | BiShr =>
false,
}
}
/// Returns `true` if the binary operator takes its arguments by value
pub fn is_by_value_binop(b: BinOp_) -> bool {
!is_comparison_binop(b)
}
/// Returns `true` if the unary operator takes its argument by value
pub fn is_by_value_unop(u: UnOp) -> bool {
match u {
UnNeg | UnNot => true,
_ => false,
}
}
pub fn unop_to_string(op: UnOp) -> &'static str {
match op {
UnUniq => "box() ",
UnDeref => "*",
UnNot => "!",
UnNeg => "-",
}
}
pub fn is_path(e: P<Expr>) -> bool {
match e.node { ExprPath(..) => true, _ => false }
}
/// Get a string representation of a signed int type, with its value.
/// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String {
let s = match t {
TyIs => "isize",
TyI8 => "i8",
TyI16 => "i16",
TyI32 => "i32",
TyI64 => "i64"
};
match val {
// cast to a u64 so we can correctly print INT64_MIN. All integral types
// are parsed as u64, so we wouldn't want to print an extra negative
// sign.
Some(n) => format!("{}{}", n as u64, s),
None => s.to_string()
}
}
pub fn int_ty_max(t: IntTy) -> u64 {
match t {
TyI8 => 0x80,
TyI16 => 0x8000,
TyIs | TyI32 => 0x80000000, // actually ni about TyIs
TyI64 => 0x8000000000000000
}
}
/// Get a string representation of an unsigned int type, with its value.
/// We want to avoid "42u" in favor of "42us". "42uint" is right out.
pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String {
let s = match t {
TyUs => "usize",
TyU8 => "u8",
TyU16 => "u16",
TyU32 => "u32",
TyU64 => "u64"
};
match val {
Some(n) => format!("{}{}", n, s),
None => s.to_string()
}
}
pub fn uint_ty_max(t: UintTy) -> u64 {
match t {
TyU8 => 0xff,
TyU16 => 0xffff,
TyUs | TyU32 => 0xffffffff, // actually ni about TyUs
TyU64 => 0xffffffffffffffff
}
}
pub fn float_ty_to_string(t: FloatTy) -> String {
match t {
TyF32 => "f32".to_string(),
TyF64 => "f64".to_string(),
}
}
// convert a span and an identifier to the corresponding
// 1-segment path
pub fn ident_to_path(s: Span, identifier: Ident) -> Path {
ast::Path {
span: s,
global: false,
segments: vec!(
ast::PathSegment {
identifier: identifier,
parameters: ast::AngleBracketedParameters(ast::AngleBracketedParameterData {
lifetimes: Vec::new(),
types: OwnedSlice::empty(),
bindings: OwnedSlice::empty(),
})
}
),
}
}
// If path is a single segment ident path, return that ident. Otherwise, return
// None.
pub fn path_to_ident(path: &Path) -> Option<Ident> {
if path.segments.len() != 1 {
return None;
}
let segment = &path.segments[0];
if !segment.parameters.is_empty() {
return None;
}
Some(segment.identifier)
}
pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> P<Pat> {
P(Pat {
id: id,
node: PatIdent(BindByValue(MutImmutable), codemap::Spanned{span:s, node:i}, None),
span: s
})
}
pub fn name_to_dummy_lifetime(name: Name) -> Lifetime {
Lifetime { id: DUMMY_NODE_ID,
span: codemap::DUMMY_SP,
name: name }
}
/// Generate a "pretty" name for an `impl` from its type and trait.
/// This is designed so that symbols of `impl`'d methods give some
/// hint of where they came from, (previously they would all just be
/// listed as `__extensions__::method_name::hash`, with no indication
/// of the type).
pub fn impl_pretty_name(trait_ref: &Option<TraitRef>, ty: Option<&Ty>) -> Ident {
let mut pretty = match ty {
Some(t) => pprust::ty_to_string(t),
None => String::from("..")
};
match *trait_ref {
Some(ref trait_ref) => {
pretty.push('.');
pretty.push_str(&pprust::path_to_string(&trait_ref.path));
}
None => {}
}
token::gensym_ident(&pretty[..])
}
pub fn struct_field_visibility(field: ast::StructField) -> Visibility {
match field.node.kind {
ast::NamedField(_, v) | ast::UnnamedField(v) => v
}
}
/// Maps a binary operator to its precedence
pub fn operator_prec(op: ast::BinOp_) -> usize {
match op {
// 'as' sits here with 12
BiMul | BiDiv | BiRem => 11,
BiAdd | BiSub => 10,
BiShl | BiShr => 9,
BiBitAnd => 8,
BiBitXor => 7,
BiBitOr => 6,
BiLt | BiLe | BiGe | BiGt | BiEq | BiNe => 3,
BiAnd => 2,
BiOr => 1
}
}
/// Precedence of the `as` operator, which is a binary operator
/// not appearing in the prior table.
pub const AS_PREC: usize = 12;
pub fn empty_generics() -> Generics {
Generics {
lifetimes: Vec::new(),
ty_params: OwnedSlice::empty(),
where_clause: WhereClause {
id: DUMMY_NODE_ID,
predicates: Vec::new(),
}
}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
pub struct IdRange {
pub min: NodeId,
pub max: NodeId,
}
impl IdRange {
pub fn max() -> IdRange {
IdRange {
min: u32::MAX,
max: u32::MIN,
}
}
pub fn empty(&self) -> bool {
self.min >= self.max
}
pub fn | (&mut self, id: NodeId) {
self.min = cmp::min(self.min, id);
self.max = cmp::max(self.max, id + 1);
}
}
pub trait IdVisitingOperation {
fn visit_id(&mut self, node_id: NodeId);
}
/// A visitor that applies its operation to all of the node IDs
/// in a visitable thing.
pub struct IdVisitor<'a, O:'a> {
pub operation: &'a mut O,
pub pass_through_items: bool,
pub visited_outermost: bool,
}
impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> {
fn visit_generics_helper(&mut self, generics: &Generics) {
for type_parameter in &*generics.ty_params {
self.operation.visit_id(type_parameter.id)
}
for lifetime in &generics.lifetimes {
self.operation.visit_id(lifetime.lifetime.id)
}
}
}
impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
fn visit_mod(&mut self,
module: &Mod,
_: Span,
node_id: NodeId) {
self.operation.visit_id(node_id);
visit::walk_mod(self, module)
}
fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
self.operation.visit_id(foreign_item.id);
visit::walk_foreign_item(self, foreign_item)
}
fn visit_item(&mut self, item: &Item) {
if !self.pass_through_items {
if self.visited_outermost {
return
} else {
self.visited_outermost = true
}
}
self.operation.visit_id(item.id);
match item.node {
ItemUse(ref view_path) => {
match view_path.node {
ViewPathSimple(_, _) |
ViewPathGlob(_) => {}
ViewPathList(_, ref paths) => {
for path in paths {
self.operation.visit_id(path.node.id())
}
}
}
}
ItemEnum(ref enum_definition, _) => {
for variant in &enum_definition.variants {
self.operation.visit_id(variant.node.id)
}
}
_ => {}
}
visit::walk_item(self, item);
self.visited_outermost = false
}
fn visit_local(&mut self, local: &Local) {
self.operation.visit_id(local.id);
visit::walk_local(self, local)
}
fn visit_block(&mut self, block: &Block) {
self.operation.visit_id(block.id);
visit::walk_block(self, block)
}
fn visit_stmt(&mut self, statement: &Stmt) {
self.operation.visit_id(ast_util::stmt_id(statement));
visit::walk_stmt(self, statement)
}
fn visit_pat(&mut self, pattern: &Pat) {
self.operation.visit_id(pattern.id);
visit::walk_pat(self, pattern)
}
fn visit_expr(&mut self, expression: &Expr) {
self.operation.visit_id(expression.id);
visit::walk_expr(self, expression)
}
fn visit_ty(&mut self, typ: &Ty) {
self.operation.visit_id(typ.id);
visit::walk_ty(self, typ)
}
fn visit_generics(&mut self, generics: &Generics) {
self.visit_generics_helper(generics);
visit::walk_generics(self, generics)
}
fn visit_fn(&mut self,
function_kind: visit::FnKind<'v>,
function_declaration: &'v FnDecl,
block: &'v Block,
span: Span,
node_id: NodeId) {
if !self.pass_through_items {
match function_kind {
visit::FkMethod(..) if self.visited_outermost => return,
visit::FkMethod(..) => self.visited_outermost = true,
_ => {}
}
}
self.operation.visit_id(node_id);
match function_kind {
visit::FkItemFn(_, generics, _, _, _) => {
self.visit_generics_helper(generics)
}
visit::FkMethod(_, sig, _) => {
self.visit_generics_helper(&sig.generics)
}
visit::FkFnBlock => {}
}
for argument in &function_declaration.inputs {
self.operation.visit_id(argument.id)
}
visit::walk_fn(self,
function_kind,
function_declaration,
block,
span);
if !self.pass_through_items {
if let visit::FkMethod(..) = function_kind {
self.visited_outermost = false;
}
}
}
fn visit_struct_field(&mut self, struct_field: &StructField) {
self.operation.visit_id(struct_field.node.id);
visit::walk_struct_field(self, struct_field)
}
fn visit_struct_def(&mut self,
struct_def: &StructDef,
_: ast::Ident,
_: &ast::Generics,
id: NodeId) {
self.operation.visit_id(id);
struct_def.ctor_id.map(|ctor_id| self.operation.visit_id(ctor_id));
visit::walk_struct_def(self, struct_def);
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.operation.visit_id(ti.id);
visit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.operation.visit_id(ii.id);
visit::walk_impl_item(self, ii);
}
fn visit_lifetime_ref(&mut self, lifetime: &Lifetime) {
self.operation.visit_id(lifetime.id);
}
fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
self.visit_lifetime_ref(&def.lifetime);
}
fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
self.operation.visit_id(trait_ref.ref_id);
visit::walk_trait_ref(self, trait_ref);
}
}
pub fn visit_ids_for_inlined_item<O: IdVisitingOperation>(item: &InlinedItem,
operation: &mut O) {
let mut id_visitor = IdVisitor {
operation: operation,
pass_through_items: true,
visited_outermost: false,
};
visit::walk_inlined_item(&mut id_visitor, item);
}
struct IdRangeComputingVisitor {
result: IdRange,
}
impl IdVisitingOperation for IdRangeComputingVisitor {
fn visit_id(&mut self, id: NodeId) {
self.result.add(id);
}
}
pub fn compute_id_range_for_inlined_item(item: &InlinedItem) -> IdRange {
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
visit_ids_for_inlined_item(item, &mut visitor);
visitor.result
}
/// Computes the id range for a single fn body, ignoring nested items.
pub fn compute_id_range_for_fn_body(fk: visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
id: NodeId)
-> IdRange
{
let mut visitor = IdRangeComputingVisitor {
result: IdRange::max()
};
let mut id_visitor = IdVisitor {
operation: &mut visitor,
pass_through_items: false,
visited_outermost: false,
};
id_visitor.visit_fn(fk, decl, body, sp, id);
id_visitor.operation.result
}
pub fn walk_pat<F>(pat: &Pat, mut it: F) -> bool where F: FnMut(&Pat) -> bool {
// FIXME(#19596) this is a workaround, but there should be a better way
fn walk_pat_<G>(pat: &Pat, it: &mut G) -> bool where G: FnMut(&Pat) -> bool {
if !(*it)(pat) {
return false;
}
match pat.node {
PatIdent(_, _, Some(ref p)) => walk_pat_(&**p, it),
PatStruct(_, ref fields, _) => {
fields.iter().all(|field| walk_pat_(&*field.node.pat, it))
}
PatEnum(_, Some(ref s)) | PatTup(ref s) => {
s.iter().all(|p| walk_pat_(&**p, it))
}
PatBox(ref s) | PatRegion(ref s, _) => {
walk_pat_(&**s, it)
}
PatVec(ref before, ref slice, ref after) => {
before.iter().all(|p| walk_pat_(&**p, it)) &&
slice.iter().all(|p| walk_pat_(&**p, it)) &&
after.iter().all(|p| walk_pat_(&**p, it))
}
PatMac(_) => panic!("attempted to analyze unexpanded pattern"),
PatWild(_) | PatLit(_) | PatRange(_, _) | PatIdent(_, _, _) |
PatEnum(_, _) | PatQPath(_, _) => {
true
}
}
}
walk_pat_(pat, &mut it)
}
/// Returns true if the given struct def is tuple-like; i.e. that its fields
/// are unnamed.
pub fn struct_def_is_tuple_like(struct_def: &ast::StructDef) -> bool {
struct_def.ctor_id.is_some()
}
/// Returns true if the given pattern consists solely of an identifier
/// and false otherwise.
pub fn pat_is_ident(pat: P<ast::Pat>) -> bool {
match pat.node {
ast::PatIdent(..) => true,
_ => false,
}
}
// are two paths equal when compared unhygienically?
// since I'm using this to replace ==, it seems appropriate
// to compare the span, global, etc. fields as well.
pub fn path_name_eq(a : &ast::Path, b : &ast::Path) -> bool {
(a.span == b.span)
&& (a.global == b.global)
&& (segments_name_eq(&a.segments[..], &b.segments[..]))
}
// are two arrays of segments equal when compared unhygienically?
pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> bool {
a.len() == b.len() &&
a.iter().zip(b.iter()).all(|(s, t)| {
s.identifier.name == t.identifier.name &&
// FIXME #7743: ident -> name problems in lifetime comparison?
// can types contain idents?
s.parameters == t.parameters
})
}
/// Returns true if this literal is a string and false otherwise.
pub fn lit_is_str(lit: &Lit) -> bool {
match lit.node {
LitStr(..) => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use ast::*;
use super::*;
fn ident_to_segment(id : &Ident) -> PathSegment {
PathSegment {identifier: id.clone(),
parameters: PathParameters::none()}
}
#[test] fn idents_name_eq_test() {
assert!(segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(78),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
assert!(!segments_name_eq(
&[Ident{name:Name(3),ctxt:4}, Ident{name:Name(78),ctxt:82}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>(),
&[Ident{name:Name(3),ctxt:104}, Ident{name:Name(77),ctxt:182}]
.iter().map(ident_to_segment).collect::<Vec<PathSegment>>()));
}
}
| add | identifier_name |
isari-multi-input.component.ts | import { Component, Input, EventEmitter, Output, OnInit } from '@angular/core';
import { FormGroup, FormControl } from '@angular/forms';
const ENTER = 13;
const BACKSPACE = 8;
@Component({
selector: 'isari-multi-input',
templateUrl: './isari-multi-input.component.html',
styleUrls: ['./isari-multi-input.component.css']
})
export class IsariMultiInputComponent implements OnInit {
_values = [];
selectControl: FormControl;
empty: boolean;
@Input() name: string;
@Input() path: string;
@Input() form: FormGroup;
@Input() label: string;
@Input() requirement: string;
@Input() description: string;
@Output() onUpdate = new EventEmitter<any>();
constructor() { }
update($event) {
if (this.onUpdate) {
this.onUpdate.emit($event);
}
}
ngOnInit() {
this.selectControl = new FormControl({
value: '',
disabled: false
});
this.values = this.form.controls[this.name].value;
}
set values(values: string[]) {
this._values = values;
this.empty = this.values.length === 0;
this.form.controls[this.name].setValue(values);
this.onUpdate.emit({});
}
get values() {
return this._values;
}
onBlur($event) {
this.addValue(this.selectControl.value);
this.empty = true;
}
onFocus($event) {
this.empty = false;
}
onKey($event) {
if ($event.keyCode === ENTER) {
this.addValue(this.selectControl.value);
}
if ($event.keyCode === BACKSPACE && this.selectControl.value === '' && this.values.length > 0) {
this.removeValue(this.values[this.values.length - 1], {});
}
}
removeValue(value, $event) {
const removedIndex = this.values.findIndex(v => v === value);
this.values = this.values.filter(v => v !== value);
this.onUpdate.emit({log: true, path: this.path, index: removedIndex, type: 'delete'});
}
addValue(value) {
this.selectControl.setValue('');
if (value !== '' && this.values.indexOf(value) === -1) { // uniq
this.values = [...this.values, value]; | }
} | this.onUpdate.emit({log: true, path: this.path, type: 'push'});
} | random_line_split |
isari-multi-input.component.ts | import { Component, Input, EventEmitter, Output, OnInit } from '@angular/core';
import { FormGroup, FormControl } from '@angular/forms';
const ENTER = 13;
const BACKSPACE = 8;
@Component({
selector: 'isari-multi-input',
templateUrl: './isari-multi-input.component.html',
styleUrls: ['./isari-multi-input.component.css']
})
export class IsariMultiInputComponent implements OnInit {
_values = [];
selectControl: FormControl;
empty: boolean;
@Input() name: string;
@Input() path: string;
@Input() form: FormGroup;
@Input() label: string;
@Input() requirement: string;
@Input() description: string;
@Output() onUpdate = new EventEmitter<any>();
constructor() { }
update($event) {
if (this.onUpdate) {
this.onUpdate.emit($event);
}
}
ngOnInit() {
this.selectControl = new FormControl({
value: '',
disabled: false
});
this.values = this.form.controls[this.name].value;
}
set values(values: string[]) {
this._values = values;
this.empty = this.values.length === 0;
this.form.controls[this.name].setValue(values);
this.onUpdate.emit({});
}
get values() {
return this._values;
}
| ($event) {
this.addValue(this.selectControl.value);
this.empty = true;
}
onFocus($event) {
this.empty = false;
}
onKey($event) {
if ($event.keyCode === ENTER) {
this.addValue(this.selectControl.value);
}
if ($event.keyCode === BACKSPACE && this.selectControl.value === '' && this.values.length > 0) {
this.removeValue(this.values[this.values.length - 1], {});
}
}
removeValue(value, $event) {
const removedIndex = this.values.findIndex(v => v === value);
this.values = this.values.filter(v => v !== value);
this.onUpdate.emit({log: true, path: this.path, index: removedIndex, type: 'delete'});
}
addValue(value) {
this.selectControl.setValue('');
if (value !== '' && this.values.indexOf(value) === -1) { // uniq
this.values = [...this.values, value];
this.onUpdate.emit({log: true, path: this.path, type: 'push'});
}
}
}
| onBlur | identifier_name |
isari-multi-input.component.ts | import { Component, Input, EventEmitter, Output, OnInit } from '@angular/core';
import { FormGroup, FormControl } from '@angular/forms';
const ENTER = 13;
const BACKSPACE = 8;
@Component({
selector: 'isari-multi-input',
templateUrl: './isari-multi-input.component.html',
styleUrls: ['./isari-multi-input.component.css']
})
export class IsariMultiInputComponent implements OnInit {
_values = [];
selectControl: FormControl;
empty: boolean;
@Input() name: string;
@Input() path: string;
@Input() form: FormGroup;
@Input() label: string;
@Input() requirement: string;
@Input() description: string;
@Output() onUpdate = new EventEmitter<any>();
constructor() { }
update($event) {
if (this.onUpdate) {
this.onUpdate.emit($event);
}
}
ngOnInit() {
this.selectControl = new FormControl({
value: '',
disabled: false
});
this.values = this.form.controls[this.name].value;
}
set values(values: string[]) {
this._values = values;
this.empty = this.values.length === 0;
this.form.controls[this.name].setValue(values);
this.onUpdate.emit({});
}
get values() {
return this._values;
}
onBlur($event) {
this.addValue(this.selectControl.value);
this.empty = true;
}
onFocus($event) {
this.empty = false;
}
onKey($event) {
if ($event.keyCode === ENTER) |
if ($event.keyCode === BACKSPACE && this.selectControl.value === '' && this.values.length > 0) {
this.removeValue(this.values[this.values.length - 1], {});
}
}
removeValue(value, $event) {
const removedIndex = this.values.findIndex(v => v === value);
this.values = this.values.filter(v => v !== value);
this.onUpdate.emit({log: true, path: this.path, index: removedIndex, type: 'delete'});
}
addValue(value) {
this.selectControl.setValue('');
if (value !== '' && this.values.indexOf(value) === -1) { // uniq
this.values = [...this.values, value];
this.onUpdate.emit({log: true, path: this.path, type: 'push'});
}
}
}
| {
this.addValue(this.selectControl.value);
} | conditional_block |
isari-multi-input.component.ts | import { Component, Input, EventEmitter, Output, OnInit } from '@angular/core';
import { FormGroup, FormControl } from '@angular/forms';
const ENTER = 13;
const BACKSPACE = 8;
@Component({
selector: 'isari-multi-input',
templateUrl: './isari-multi-input.component.html',
styleUrls: ['./isari-multi-input.component.css']
})
export class IsariMultiInputComponent implements OnInit {
_values = [];
selectControl: FormControl;
empty: boolean;
@Input() name: string;
@Input() path: string;
@Input() form: FormGroup;
@Input() label: string;
@Input() requirement: string;
@Input() description: string;
@Output() onUpdate = new EventEmitter<any>();
constructor() { }
update($event) {
if (this.onUpdate) {
this.onUpdate.emit($event);
}
}
ngOnInit() {
this.selectControl = new FormControl({
value: '',
disabled: false
});
this.values = this.form.controls[this.name].value;
}
set values(values: string[]) {
this._values = values;
this.empty = this.values.length === 0;
this.form.controls[this.name].setValue(values);
this.onUpdate.emit({});
}
get values() {
return this._values;
}
onBlur($event) |
onFocus($event) {
this.empty = false;
}
onKey($event) {
if ($event.keyCode === ENTER) {
this.addValue(this.selectControl.value);
}
if ($event.keyCode === BACKSPACE && this.selectControl.value === '' && this.values.length > 0) {
this.removeValue(this.values[this.values.length - 1], {});
}
}
removeValue(value, $event) {
const removedIndex = this.values.findIndex(v => v === value);
this.values = this.values.filter(v => v !== value);
this.onUpdate.emit({log: true, path: this.path, index: removedIndex, type: 'delete'});
}
addValue(value) {
this.selectControl.setValue('');
if (value !== '' && this.values.indexOf(value) === -1) { // uniq
this.values = [...this.values, value];
this.onUpdate.emit({log: true, path: this.path, type: 'push'});
}
}
}
| {
this.addValue(this.selectControl.value);
this.empty = true;
} | identifier_body |
indexTree.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import 'vs/css!./media/tree';
import { Iterable } from 'vs/base/common/iterator';
import { AbstractTree, IAbstractTreeOptions } from 'vs/base/browser/ui/tree/abstractTree';
import { IndexTreeModel, IList } from 'vs/base/browser/ui/tree/indexTreeModel';
import { ITreeElement, ITreeModel, ITreeNode, ITreeRenderer } from 'vs/base/browser/ui/tree/tree';
import { IListVirtualDelegate } from 'vs/base/browser/ui/list/list';
export interface IIndexTreeOptions<T, TFilterData = void> extends IAbstractTreeOptions<T, TFilterData> { }
export class IndexTree<T, TFilterData = void> extends AbstractTree<T, TFilterData, number[]> {
protected override model!: IndexTreeModel<T, TFilterData>;
constructor(
user: string,
container: HTMLElement,
delegate: IListVirtualDelegate<T>,
renderers: ITreeRenderer<T, TFilterData, any>[],
private rootElement: T,
options: IIndexTreeOptions<T, TFilterData> = {}
) {
super(user, container, delegate, renderers, options);
}
splice(location: number[], deleteCount: number, toInsert: Iterable<ITreeElement<T>> = Iterable.empty()): void {
this.model.splice(location, deleteCount, toInsert);
}
rerender(location?: number[]): void {
if (location === undefined) |
this.model.rerender(location);
}
updateElementHeight(location: number[], height: number): void {
this.model.updateElementHeight(location, height);
}
protected createModel(user: string, view: IList<ITreeNode<T, TFilterData>>, options: IIndexTreeOptions<T, TFilterData>): ITreeModel<T, TFilterData, number[]> {
return new IndexTreeModel(user, view, this.rootElement, options);
}
}
| {
this.view.rerender();
return;
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.