Instruction stringlengths 14 778 | input_code stringlengths 0 4.24k | output_code stringlengths 1 5.44k |
|---|---|---|
Implement `SqlOrd` for date and time types | use types::{self, NativeSqlType};
pub trait SqlOrd {}
impl SqlOrd for types::SmallInt {}
impl SqlOrd for types::Integer {}
impl SqlOrd for types::BigInt {}
impl SqlOrd for types::Float {}
impl SqlOrd for types::Double {}
impl SqlOrd for types::VarChar {}
impl SqlOrd for types::Text {}
impl<T: SqlOrd + NativeSqlType> SqlOrd for types::Nullable<T> {}
| use types::{self, NativeSqlType};
pub trait SqlOrd {}
impl SqlOrd for types::SmallInt {}
impl SqlOrd for types::Integer {}
impl SqlOrd for types::BigInt {}
impl SqlOrd for types::Float {}
impl SqlOrd for types::Double {}
impl SqlOrd for types::VarChar {}
impl SqlOrd for types::Text {}
impl SqlOrd for types::Date {}
impl SqlOrd for types::Interval {}
impl SqlOrd for types::Time {}
impl SqlOrd for types::Timestamp {}
impl<T: SqlOrd + NativeSqlType> SqlOrd for types::Nullable<T> {}
|
Implement test showing that we can also use other iter methods | struct Counter {
count: u32
}
impl Counter {
fn new() -> Counter {
Counter { count: 0 }
}
}
impl Iterator for Counter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
self.count += 1;
if self.count < 6 {
Some(self.count)
} else {
None
}
}
}
#[test]
fn calling_counter_next() {
let mut counter = Counter::new();
assert_eq!(counter.next(), Some(1));
assert_eq!(counter.next(), Some(2));
assert_eq!(counter.next(), Some(3));
assert_eq!(counter.next(), Some(4));
assert_eq!(counter.next(), Some(5));
assert_eq!(counter.next(), None);
} | struct Counter {
count: u32
}
impl Counter {
fn new() -> Counter {
Counter { count: 0 }
}
}
impl Iterator for Counter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
self.count += 1;
if self.count < 6 {
Some(self.count)
} else {
None
}
}
}
#[test]
fn calling_counter_next() {
let mut counter = Counter::new();
assert_eq!(counter.next(), Some(1));
assert_eq!(counter.next(), Some(2));
assert_eq!(counter.next(), Some(3));
assert_eq!(counter.next(), Some(4));
assert_eq!(counter.next(), Some(5));
assert_eq!(counter.next(), None);
}
#[test]
fn using_other_iterator_trait_methods() {
let sum: u32 = Counter::new().zip(Counter::new().skip(1))
.map(|(a, b)| a * b)
.filter(|x| x % 3 == 0)
.sum();
assert_eq!(sum, 18);
} |
Add lib.rs to ignore purge files | fn main() {
manage_docs();
}
#[cfg(any(feature = "embed-lgpl-docs", feature = "purge-lgpl-docs"))]
fn manage_docs() {
extern crate lgpl_docs;
const PATH: &'static str = "src";
const IGNORES: &'static [&'static str] = &[];
lgpl_docs::purge(PATH, IGNORES);
if cfg!(feature = "embed-lgpl-docs") {
lgpl_docs::embed(lgpl_docs::Library::Cairo, PATH, IGNORES);
}
}
#[cfg(not(any(feature = "embed-lgpl-docs", feature = "purge-lgpl-docs")))]
fn manage_docs() {}
| fn main() {
manage_docs();
}
#[cfg(any(feature = "embed-lgpl-docs", feature = "purge-lgpl-docs"))]
fn manage_docs() {
extern crate lgpl_docs;
const PATH: &'static str = "src";
const IGNORES: &'static [&'static str] = &["lib.rs"];
lgpl_docs::purge(PATH, IGNORES);
if cfg!(feature = "embed-lgpl-docs") {
lgpl_docs::embed(lgpl_docs::Library::Cairo, PATH, IGNORES);
}
}
#[cfg(not(any(feature = "embed-lgpl-docs", feature = "purge-lgpl-docs")))]
fn manage_docs() {}
|
Remove size_of == 1 case from `fill` specialization. | use crate::mem::{size_of, transmute_copy};
use crate::ptr::write_bytes;
pub(super) trait SpecFill<T> {
fn spec_fill(&mut self, value: T);
}
impl<T: Clone> SpecFill<T> for [T] {
default fn spec_fill(&mut self, value: T) {
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
}
impl<T: Copy> SpecFill<T> for [T] {
fn spec_fill(&mut self, value: T) {
if size_of::<T>() == 1 {
// SAFETY: The size_of check above ensures that values are 1 byte wide, as required
// for the transmute and write_bytes
unsafe {
let value: u8 = transmute_copy(&value);
write_bytes(self.as_mut_ptr(), value, self.len());
}
} else {
for item in self.iter_mut() {
*item = value;
}
}
}
}
| pub(super) trait SpecFill<T> {
fn spec_fill(&mut self, value: T);
}
impl<T: Clone> SpecFill<T> for [T] {
default fn spec_fill(&mut self, value: T) {
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
}
impl<T: Copy> SpecFill<T> for [T] {
fn spec_fill(&mut self, value: T) {
for item in self.iter_mut() {
*item = value;
}
}
}
|
Remove stack overflow handler stub for wasm. | pub struct Handler;
impl Handler {
pub unsafe fn new() -> Handler {
Handler
}
}
pub unsafe fn init() {}
pub unsafe fn cleanup() {}
| pub unsafe fn init() {}
pub unsafe fn cleanup() {}
|
Remove unused RawObject import from class:VM | use std::slice;
use binding::vm::init;
use class::object::Object;
use types::{Argc, Value};
use class::traits::RawObject;
pub struct VM;
impl VM {
pub fn init() {
init();
}
pub fn parse_arguments(argc: Argc, arguments: *const Object) -> Vec<Object> {
unsafe {
slice::from_raw_parts(arguments, argc as usize).to_vec()
}
}
pub fn parse_itself(itself: Value) -> Object {
Object::from(itself)
}
}
| use std::slice;
use binding::vm::init;
use class::object::Object;
use types::{Argc, Value};
pub struct VM;
impl VM {
pub fn init() {
init();
}
pub fn parse_arguments(argc: Argc, arguments: *const Object) -> Vec<Object> {
unsafe {
slice::from_raw_parts(arguments, argc as usize).to_vec()
}
}
pub fn parse_itself(itself: Value) -> Object {
Object::from(itself)
}
}
|
Add SharedPtr and WeakPtr support. | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use autocxx_parser::IncludeCppConfig;
use syn::{parse_quote, Ident, Item};
pub(crate) fn create_impl_items(id: &Ident, config: &IncludeCppConfig) -> Vec<Item> {
if config.exclude_impls {
return vec![];
}
vec![
Item::Impl(parse_quote! {
impl UniquePtr<#id> {}
}),
Item::Impl(parse_quote! {
impl CxxVector<#id> {}
}),
]
}
| // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use autocxx_parser::IncludeCppConfig;
use syn::{parse_quote, Ident, Item};
pub(crate) fn create_impl_items(id: &Ident, config: &IncludeCppConfig) -> Vec<Item> {
if config.exclude_impls {
return vec![];
}
vec![
Item::Impl(parse_quote! {
impl UniquePtr<#id> {}
}),
Item::Impl(parse_quote! {
impl SharedPtr<#id> {}
}),
Item::Impl(parse_quote! {
impl WeakPtr<#id> {}
}),
Item::Impl(parse_quote! {
impl CxxVector<#id> {}
}),
]
}
|
Add test for shell systemd provider | #![cfg(feature="inline-systemd")]
extern crate specinfra;
use specinfra::backend;
#[test]
fn service_resource_with_inline_provider() {
let b = backend::direct::Direct::new();
let s = specinfra::new(&b).unwrap();
test_service_resource(s);
}
fn test_service_resource(s: specinfra::Specinfra) {
let dbus = s.service("dbus.service");
assert!(dbus.is_running().unwrap());
let dbus = s.service("dbus");
assert!(dbus.is_running().unwrap());
let sshd = s.service("sshd");
assert!(sshd.is_enabled().unwrap());
let nginx = s.service("nginx");
assert!(nginx.enable().unwrap());
assert!(nginx.is_enabled().unwrap());
assert!(nginx.disable().unwrap());
assert_eq!(nginx.is_enabled().unwrap(), false);
assert!(nginx.start().unwrap());
assert!(nginx.is_running().unwrap());
assert!(nginx.reload().unwrap());
assert!(nginx.is_running().unwrap());
assert!(nginx.restart().unwrap());
assert!(nginx.is_running().unwrap());
assert!(nginx.stop().unwrap());
assert_eq!(nginx.is_running().unwrap(), false);
}
| #![cfg(feature="inline-systemd")]
extern crate specinfra;
use specinfra::backend;
use specinfra::Specinfra;
use specinfra::provider::service::inline::null::Null;
#[test]
fn service_resource_with_inline_provider() {
let b = backend::direct::Direct::new();
let s = specinfra::new(&b).unwrap();
test_service_resource(s);
}
#[test]
fn service_resource_with_shell_provider() {
let b = backend::direct::Direct::new();
let mut s = specinfra::new(&b).unwrap();
s.providers.service.inline = Box::new(Null);
test_service_resource(s);
}
fn test_service_resource(s: Specinfra) {
let dbus = s.service("dbus.service");
assert!(dbus.is_running().unwrap());
let dbus = s.service("dbus");
assert!(dbus.is_running().unwrap());
let sshd = s.service("sshd");
assert!(sshd.is_enabled().unwrap());
let nginx = s.service("nginx");
assert!(nginx.enable().unwrap());
assert!(nginx.is_enabled().unwrap());
assert!(nginx.disable().unwrap());
assert_eq!(nginx.is_enabled().unwrap(), false);
assert!(nginx.start().unwrap());
assert!(nginx.is_running().unwrap());
assert!(nginx.reload().unwrap());
assert!(nginx.is_running().unwrap());
assert!(nginx.restart().unwrap());
assert!(nginx.is_running().unwrap());
assert!(nginx.stop().unwrap());
assert_eq!(nginx.is_running().unwrap(), false);
}
|
Add pattern for the AVR add with immediate instruction | use {Pattern, PatternNode, PatternOperand};
use machine::avr::registers;
use machine;
use select;
use mir;
macro_rules! pattern {
($node:expr) => { Pattern { root: $node } }
}
macro_rules! node {
($opcode:ident, $operands:expr) => {
PatternNode {
opcode: mir::OpCode::$opcode,
operands: $operands,
}
};
($opcode:ident) => {
node!($opcode, vec![])
}
}
macro_rules! operands {
($($operand:expr),*) => {
vec![$( $operand ),+]
}
}
macro_rules! inst_rdrr {
($opcode:ident) => {
pattern! {
node!($opcode,
operands!(
select::PatternOperand::Value(PatternOperand::Register(®isters::GPR8)),
select::PatternOperand::Value(PatternOperand::Register(®isters::GPR8))
)
)
}
}
}
pub fn selector() -> machine::Selector {
machine::Selector::new(self::patterns())
}
pub fn patterns() -> Vec<Pattern> {
vec![
inst_rdrr!(Add),
inst_rdrr!(Sub),
pattern! { node!(Ret) },
]
}
| use {Pattern, PatternNode, PatternOperand};
use machine::avr::registers;
use machine;
use select;
use mir;
macro_rules! pattern {
($node:expr) => { Pattern { root: $node } }
}
macro_rules! node {
($opcode:ident, $operands:expr) => {
PatternNode {
opcode: mir::OpCode::$opcode,
operands: $operands,
}
};
($opcode:ident) => {
node!($opcode, vec![])
}
}
macro_rules! operands {
($($operand:expr),*) => {
vec![$( $operand ),+]
}
}
/// An instruction which takes a destination and source GPR8.
macro_rules! inst_rdrr {
($opcode:ident) => {
pattern! {
node!($opcode,
operands!(
select::PatternOperand::Value(PatternOperand::Register(®isters::GPR8)),
select::PatternOperand::Value(PatternOperand::Register(®isters::GPR8))
)
)
}
}
}
/// An instruction which takes a GPR8 and an 8-bit immediate.
macro_rules! inst_rdi {
($opcode:ident) => {
pattern! {
node!($opcode,
operands!(
select::PatternOperand::Value(PatternOperand::Register(®isters::GPR8)),
select::PatternOperand::Value(PatternOperand::Immediate { width: 8 })
)
)
}
}
}
pub fn selector() -> machine::Selector {
machine::Selector::new(self::patterns())
}
pub fn patterns() -> Vec<Pattern> {
vec![
inst_rdi!(Add), // ADDRdK
inst_rdrr!(Add), // ADDRdRr
inst_rdrr!(Sub), // SUBRdRr
pattern! { node!(Ret) },
]
}
|
Add height and width methods to Map | use engine::{IntoMap, Tile};
use util::units::{Point, Size};
use std::path::AsPath;
use std::fs::File;
use std::io::Read;
pub struct Map {
pub tiles: Vec<Vec<Tile>>,
pub size: Size,
}
impl Map {
pub fn new<T>(mappish: T) -> Map where T: IntoMap {
let result = mappish.as_map();
let map = match result {
Ok(map) => map,
Err(msg) => {
panic!(msg);
}
};
map
}
pub fn from_file<T>(path: T) -> Map where T: AsPath {
let mut level_file = File::open(&path).ok().expect("Could not find level file");
let mut level_string = String::new();
level_file.read_to_string(&mut level_string).ok().expect("Could not read from level file");
Map::new(level_string)
}
pub fn at(&self, loc: Point) -> Tile {
self.tiles[loc.y as usize][loc.x as usize]
}
pub fn is_walkable(&self, loc: Point) -> bool {
self.at(loc).is_walkable()
}
pub fn set_tile(&mut self, loc: Point, tile: Tile) {
self.tiles[loc.y as usize][loc.x as usize] = tile;
}
}
| use engine::{IntoMap, Tile};
use util::units::{Point, Size};
use std::path::AsPath;
use std::fs::File;
use std::io::Read;
pub struct Map {
pub tiles: Vec<Vec<Tile>>,
pub size: Size,
}
impl Map {
pub fn new<T>(mappish: T) -> Map where T: IntoMap {
let result = mappish.as_map();
let map = match result {
Ok(map) => map,
Err(msg) => {
panic!(msg);
}
};
map
}
pub fn from_file<T>(path: T) -> Map where T: AsPath {
let mut level_file = File::open(&path).ok().expect("Could not find level file");
let mut level_string = String::new();
level_file.read_to_string(&mut level_string).ok().expect("Could not read from level file");
Map::new(level_string)
}
pub fn at(&self, loc: Point) -> Tile {
self.tiles[loc.y as usize][loc.x as usize]
}
pub fn is_walkable(&self, loc: Point) -> bool {
self.at(loc).is_walkable()
}
pub fn set_tile(&mut self, loc: Point, tile: Tile) {
self.tiles[loc.y as usize][loc.x as usize] = tile;
}
pub fn height(&self) -> i32 {
self.tiles.len() as i32
}
pub fn width(&self) -> i32 {
self.tiles[0].len() as i32
}
}
|
Rename to ScriptTools for consistency with the toolkit library | pub mod lex_script_tool;
pub mod tool;
use self::lex_script_tool::*;
use silkthread_base::*;
use silkthread_base::basic::*;
///
/// ToolSet for dealing with the scripting language
///
pub struct ScriptToolSet {
}
impl ToolSet for ScriptToolSet {
fn create_tools(self, _: &Environment) -> Vec<(String, Box<Tool>)> {
vec![
(String::from(tool::LEX_SCRIPT), Box::new(create_lex_script_tool()))
]
}
}
impl ScriptToolSet {
///
/// Creates a new script toolset
///
pub fn new() -> ScriptToolSet {
ScriptToolSet { }
}
}
| pub mod lex_script_tool;
pub mod tool;
use self::lex_script_tool::*;
use silkthread_base::*;
use silkthread_base::basic::*;
///
/// ToolSet for dealing with the scripting language
///
pub struct ScriptTools {
}
impl ToolSet for ScriptTools{
fn create_tools(self, _: &Environment) -> Vec<(String, Box<Tool>)> {
vec![
(String::from(tool::LEX_SCRIPT), Box::new(create_lex_script_tool()))
]
}
}
impl ScriptTools {
///
/// Creates a new script toolset
///
pub fn new() -> ScriptTools {
ScriptTools { }
}
}
|
Clean up rust code for bracket-push case | #[derive(Debug)]
pub struct Brackets {
brackets: String,
}
impl Brackets {
pub fn from(text: &str) -> Self {
Brackets {
brackets: text.chars()
.filter(|&x| {
x == '[' || x == ']' || x == '{' || x == '}' || x == '(' || x == ')'
})
.collect(),
}
}
pub fn are_balanced(&self) -> bool {
let mut r = Vec::new();
for c in self.brackets.chars() {
match c {
'[' | '{' | '(' => r.push(c),
')' => if let Some(l) = r.pop() {
if l != '(' {
return false;
}
} else {
return false;
},
']' | '}' => if let Some(l) = r.pop() {
if c as i32 - l as i32 != 2 {
return false;
}
} else {
return false;
},
_ => return false,
}
}
r.is_empty()
}
}
| #[derive(Debug)]
pub struct Brackets {
string: String,
}
impl Brackets {
pub fn from(text: &str) -> Self {
Brackets {
string: text.to_string(),
}
}
pub fn are_balanced(&self) -> bool {
let mut r = Vec::new();
for c in self.string.chars() {
match c {
'[' | '{' | '(' => r.push(c),
']' | '}' | ')' => match (r.pop(), c) {
(Some('{'), '}') | (Some('['), ']') | (Some('('), ')') => {}
_ => return false,
},
_ => {}
}
}
r.is_empty()
}
}
|
Add channel support to builder-worker | // Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use env as henv;
/// Default Depot URL
pub const DEFAULT_DEPOT_URL: &'static str = "https://willem.habitat.sh/v1/depot";
/// Default Depot URL environment variable
pub const DEPOT_URL_ENVVAR: &'static str = "HAB_DEPOT_URL";
pub fn default_depot_url() -> String {
match henv::var(DEPOT_URL_ENVVAR) {
Ok(val) => val,
Err(_) => DEFAULT_DEPOT_URL.to_string(),
}
}
| // Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use env as henv;
/// Default Depot URL
pub const DEFAULT_DEPOT_URL: &'static str = "https://willem.habitat.sh/v1/depot";
/// Default Depot channel
pub const DEFAULT_DEPOT_CHANNEL: &'static str = "unstable";
/// Default Depot URL environment variable
pub const DEPOT_URL_ENVVAR: &'static str = "HAB_DEPOT_URL";
/// Default Depot Channel environment variable
pub const DEPOT_CHANNEL_ENVVAR: &'static str = "HAB_DEPOT_CHANNEL";
pub fn default_depot_url() -> String {
match henv::var(DEPOT_URL_ENVVAR) {
Ok(val) => val,
Err(_) => DEFAULT_DEPOT_URL.to_string(),
}
}
pub fn default_depot_channel() -> String {
match henv::var(DEPOT_CHANNEL_ENVVAR) {
Ok(val) => val,
Err(_) => DEFAULT_DEPOT_CHANNEL.to_string(),
}
}
|
Add warning to xml writer | mod reader;
mod writer;
pub use self::reader::StreamingParser;
pub use self::writer::Writer; | mod reader;
mod writer;
pub use self::reader::StreamingParser;
/// WIP: Does not work correctly
pub use self::writer::Writer; |
Update error message and un-xfail test | // xfail-test
fn foo(cond: bool) {
// Here we will infer a type that uses the
// region of the if stmt then block:
let mut x; //~ ERROR foo
if cond {
x = &3;
}
}
fn main() {} | fn foo(cond: bool) {
// Here we will infer a type that uses the
// region of the if stmt then block:
let mut x;
if cond {
x = &3; //~ ERROR illegal borrow: borrowed value does not live long enough
assert (*x == 3);
}
}
fn main() {} |
Use extend instead of custom interation | extern crate yaml_rust;
use self::yaml_rust::{YamlLoader, Yaml};
pub fn from_yaml(yaml_file: String) -> Vec<String> {
let docs = YamlLoader::load_from_str(&yaml_file).unwrap();
let doc = &docs[0];
let key = Yaml::from_str("default");
let default_command_list = doc.as_hash().unwrap().get(&key).unwrap();
let yaml_commands = default_command_list.as_vec().unwrap();
let mut result_commands = Vec::new();
for element in yaml_commands {
result_commands.push(element.as_str().unwrap().to_string());
}
result_commands
}
| extern crate yaml_rust;
use self::yaml_rust::{YamlLoader, Yaml};
pub fn from_yaml(yaml_file: String) -> Vec<String> {
let docs = YamlLoader::load_from_str(&yaml_file).unwrap();
let doc = &docs[0];
let key = Yaml::from_str("default");
let default_command_list = doc.as_hash().unwrap().get(&key).unwrap();
let yaml_commands = default_command_list.as_vec().unwrap();
let mut result_commands = Vec::new();
result_commands.extend(
yaml_commands.iter().map(|e| element.as_str().unwrap().to_string()
)
result_commands
}
|
Remove rtdebug_! and make rtdebug! work properly. | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[macro_escape];
macro_rules! rterrln (
($( $arg:expr),+) => ( {
::rt::util::dumb_println(fmt!( $($arg),+ ));
} )
)
// Some basic logging
macro_rules! rtdebug_ (
($( $arg:expr),+) => ( {
rterrln!( $($arg),+ )
} )
)
// An alternate version with no output, for turning off logging. An
// earlier attempt that did not call the fmt! macro was insufficient,
// as a case of the "let bind each variable" approach eventually
// failed without an error message describing the invocation site.
macro_rules! rtdebug (
($( $arg:expr),+) => ( {
let _x = fmt!( $($arg),+ );
})
)
macro_rules! rtassert (
( $arg:expr ) => ( {
if !$arg {
rtabort!("assertion failed: %s", stringify!($arg));
}
} )
)
macro_rules! rtabort(
($( $msg:expr),+) => ( {
::rt::util::abort(fmt!($($msg),+));
} )
)
| // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[macro_escape];
macro_rules! rterrln (
($( $arg:expr),+) => ( {
::rt::util::dumb_println(fmt!( $($arg),+ ));
} )
)
// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build.
macro_rules! rtdebug (
($( $arg:expr),+) => ( {
if cfg!(rtdebug) {
rterrln!( $($arg),+ )
}
})
)
macro_rules! rtassert (
( $arg:expr ) => ( {
if !$arg {
rtabort!("assertion failed: %s", stringify!($arg));
}
} )
)
macro_rules! rtabort(
($( $msg:expr),+) => ( {
::rt::util::abort(fmt!($($msg),+));
} )
)
|
Update for the old_io stopgap | mod board;
#[cfg(not(test))]
fn main() {
let mut brd = board::Board::new(65, 248).random();
let mut timer = std::io::Timer::new().unwrap();
let ref mut worker_pool = board::WorkerPool::new_with_default_size();
let periodic = timer.periodic(std::time::Duration::milliseconds(64));
loop {
println!("\x1b[H\x1b[2J{:?}", brd);
periodic.recv().unwrap();
brd = brd.parallel_next_generation(worker_pool);
}
}
| mod board;
#[cfg(not(test))]
fn main() {
let mut brd = board::Board::new(65, 248).random();
let mut timer = std::old_io::Timer::new().unwrap();
let ref mut worker_pool = board::WorkerPool::new_with_default_size();
let periodic = timer.periodic(std::time::Duration::milliseconds(64));
loop {
println!("\x1b[H\x1b[2J{:?}", brd);
periodic.recv().unwrap();
brd = brd.parallel_next_generation(worker_pool);
}
}
|
Allow mutable callbacks in gameloop | use std::time::{Duration, Instant};
use std::thread::sleep;
pub struct GameLoop {
frame_number: u64,
fps: u32
}
impl GameLoop {
pub fn new(fps: u32) -> GameLoop {
GameLoop {
frame_number: 0,
fps: fps
}
}
pub fn run<F>(&mut self, fun: F)
where F: Fn(u64) -> bool { //FIXME: Could be an enum or something
let ns_per_frame : Duration = Duration::new(0, 1_000_000_000 / self.fps);
'running: loop {
let start = Instant::now();
if fun(self.frame_number) {
break 'running;
}
// Framerate cap
let next_render_step = start + ns_per_frame;
let now = Instant::now();
if next_render_step >= now {
sleep(next_render_step - now);
}
self.frame_number += 1;
}
}
}
| use std::time::{Duration, Instant};
use std::thread::sleep;
pub struct GameLoop {
frame_number: u64,
fps: u32
}
impl GameLoop {
pub fn new(fps: u32) -> GameLoop {
GameLoop {
frame_number: 0,
fps: fps
}
}
pub fn run<F>(&mut self, mut fun: F)
where F: FnMut(u64) -> bool { //FIXME: Could be an enum or something
let ns_per_frame : Duration = Duration::new(0, 1_000_000_000 / self.fps);
'running: loop {
let start = Instant::now();
if fun(self.frame_number) {
break 'running;
}
// Framerate cap
let next_render_step = start + ns_per_frame;
let now = Instant::now();
if next_render_step >= now {
sleep(next_render_step - now);
}
self.frame_number += 1;
}
}
}
|
Build a more perfomant function that utilizes sqrt. |
fn is_prime(num :u64) -> bool {
for i in range(2, num) {
if num % i == 0 {
return false
}
}
true
}
fn fatorize_prime(num :u64) {
for i in range(2, num) {
if num % i == 0 && is_prime(i) {
println!("{}", i);
}
}
}
#[test]
fn is_prime_test() {
assert!(is_prime(4) == false, "{} {} {}", is_prime(4), false, 4);
}
#[test]
fn is_prime_test2() {
assert!(is_prime(5) == true, "{} {} {}", is_prime(5), true, 5);
}
fn main() {
fatorize_prime(2332377667);
} | fn is_prime(num :f64) -> bool {
for i in range(2, num.sqrt() as u64) {
if num as u64 % i == 0 {
return false
}
}
true
}
fn fatorize_prime(num :f64) {
for i in range(2, num.sqrt() as u64) {
if num as u64 % i == 0 && is_prime(i as f64) {
println!("{}", i);
}
}
}
#[test]
fn is_prime_test() {
assert!(is_prime(4) == false, "{} {} {}", is_prime(4), false, 4);
}
#[test]
fn is_prime_test2() {
assert!(is_prime(5) == true, "{} {} {}", is_prime(5), true, 5);
}
fn main() {
fatorize_prime(233237766723323232f64);
} |
Fix problem 4 tests, remove unused variable. | use std::num;
use std::collections::PriorityQueue;
fn is_palindrome(num: uint) -> bool {
let s = num.to_string();
let b = s.as_bytes();
for i in range(0, b.len() / 2) {
if b[i] != b[b.len() - i - 1] {
return false;
}
}
true
}
fn largest_palindrome_product(digits: uint) -> uint {
let max = num::pow(10u, digits);
let min = 0u;
let mut results = PriorityQueue::new();
for i in range(max/2, max).rev() {
for j in range(0, max).rev() {
let k = i * j;
if is_palindrome(k) {
results.push(k);
}
}
}
*results.top().unwrap()
}
#[cfg(test)]
mod test {
use super::{is_palindrome, largest_palindrome_product};
#[test]
fn test_is_palindrome() {
assert!(is_palindrome(9009));
assert!(is_palindrome(90909));
assert!(!is_palindrome(9000));
assert!(!is_palindrome(90900));
}
#[test]
fn provided_example() {
assert_eq!(largest_palindrome_product(2), 906609);
}
#[test]
fn expected_result() {
assert_eq!(largest_palindrome_product(3), 90909);
}
}
| use std::num;
use std::collections::PriorityQueue;
fn is_palindrome(num: uint) -> bool {
let s = num.to_string();
let b = s.as_bytes();
for i in range(0, b.len() / 2) {
if b[i] != b[b.len() - i - 1] {
return false;
}
}
true
}
fn largest_palindrome_product(digits: uint) -> uint {
let max = num::pow(10u, digits);
let mut results = PriorityQueue::new();
for i in range(max/2, max).rev() {
for j in range(0, max).rev() {
let k = i * j;
if is_palindrome(k) {
results.push(k);
}
}
}
*results.top().unwrap()
}
#[cfg(test)]
mod test {
use super::{is_palindrome, largest_palindrome_product};
#[test]
fn test_is_palindrome() {
assert!(is_palindrome(9009));
assert!(is_palindrome(90909));
assert!(!is_palindrome(9000));
assert!(!is_palindrome(90900));
}
#[test]
fn provided_example() {
assert_eq!(largest_palindrome_product(2), 9009);
}
#[test]
fn expected_result() {
assert_eq!(largest_palindrome_product(3), 906609);
}
}
|
Test benchmark for Deck::new(). Will remove. | extern crate libterminal_cribbage;
use libterminal_cribbage::cards::Deck;
fn main() {
let d = Deck::new();
println!("D: {}", d);
}
| extern crate libterminal_cribbage;
use libterminal_cribbage::cards::Deck;
fn main() {
let mut d = Deck::new();
for _ in 0..100000 {
d = Deck::new();
}
}
|
Revert "Removing std c++11 to fix travis CI" | extern crate gcc;
fn main()
{
gcc::Config::new().cpp(true)
.file("wrapper/wrapper.cpp")
.file("pugixml/src/pugixml.cpp")
.include("pugixml/src")
.compile("libpugixml.a")
}
| extern crate gcc;
fn main()
{
gcc::Config::new().cpp(true)
.file("wrapper/wrapper.cpp")
.file("pugixml/src/pugixml.cpp")
.include("pugixml/src")
.flag("-std=c++11")
.compile("libpugixml.a")
}
|
Remove `dead_code`, `elided_lifetimes_in_paths` and `single_use_lifetimes` lints | // Copyright (c) 2022 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#![deny(unsafe_code)]
#![warn(
const_err,
dead_code,
elided_lifetimes_in_paths,
explicit_outlives_requirements,
improper_ctypes,
keyword_idents,
missing_debug_implementations,
missing_docs,
no_mangle_generic_items,
non_ascii_idents,
non_shorthand_field_patterns,
noop_method_call,
overflowing_literals,
path_statements,
patterns_in_fns_without_body,
private_in_public,
rust_2018_idioms,
single_use_lifetimes,
unconditional_recursion,
unreachable_pub,
unused_comparisons,
unused,
while_true
)]
//! Core primitives for dealing with Bitcoin-like chains.
//!
//! Note: This is a general purpose library, but has been optimized for the
//! usage in Chronik, an indexer for Bitcoin ABC.
pub mod block;
pub mod hash;
| // Copyright (c) 2022 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#![deny(unsafe_code)]
#![warn(
const_err,
explicit_outlives_requirements,
improper_ctypes,
keyword_idents,
missing_debug_implementations,
missing_docs,
no_mangle_generic_items,
non_ascii_idents,
non_shorthand_field_patterns,
noop_method_call,
overflowing_literals,
path_statements,
patterns_in_fns_without_body,
private_in_public,
rust_2018_idioms,
unconditional_recursion,
unreachable_pub,
unused_comparisons,
unused,
while_true
)]
//! Core primitives for dealing with Bitcoin-like chains.
//!
//! Note: This is a general purpose library, but has been optimized for the
//! usage in Chronik, an indexer for Bitcoin ABC.
pub mod block;
pub mod hash;
|
Remove select and fix sample code | extern crate fsevent;
use std::sync::mpsc::channel;
use std::thread;
#[allow(dead_code)]
fn main() {
let (sender, receiver) = channel();
let _t = thread::spawn(move || {
let fsevent = fsevent::FsEvent::new(sender);
fsevent.append_path("../../");
fsevent.observe();
});
loop {
select! (
val = receiver.recv() => {
println!("{:?}", val.unwrap());
}
)
}
}
| extern crate fsevent;
use std::sync::mpsc::channel;
use std::thread;
fn main() {
let (sender, receiver) = channel();
let _t = thread::spawn(move || {
let fsevent = fsevent::FsEvent::new(sender);
fsevent.append_path("../../");
fsevent.observe();
});
loop {
let val = receiver.recv();
println!("{:?}", val.unwrap());
}
}
|
Add `fixed_restore_signal` to root decode function | use subframe;
use subframe::{Subframe, MAX_FIXED_ORDER};
pub fn fixed_restore_signal(order: usize,
residual: &[i32],
output: &mut [i32]) {
debug_assert!(order <= MAX_FIXED_ORDER);
let polynomial = [ &[][..]
, &[1][..]
, &[-1, 2][..]
, &[1, -3, 3][..]
, &[-1, 4, -6, 4][..]
];
let coefficients = polynomial[order];
for i in 0..residual.len() {
let offset = i + order;
let prediction = coefficients.iter()
.zip(&output[i..offset])
.fold(0, |result, (coefficient, signal)|
result + coefficient * signal);
output[offset] = residual[i] + prediction;
}
}
pub fn decode(subframe: &Subframe, output: &mut [i32]) {
match subframe.data {
subframe::Data::Constant(_) => unimplemented!(),
subframe::Data::Verbatim(_) => unimplemented!(),
subframe::Data::Fixed(_) => unimplemented!(),
subframe::Data::LPC(_) => unimplemented!(),
}
if subframe.wasted_bits > 0 {
for value in output {
*value <<= subframe.wasted_bits;
}
}
}
| use subframe;
use subframe::{Subframe, MAX_FIXED_ORDER};
pub fn fixed_restore_signal(order: usize,
residual: &[i32],
output: &mut [i32]) {
debug_assert!(order <= MAX_FIXED_ORDER);
let polynomial = [ &[][..]
, &[1][..]
, &[-1, 2][..]
, &[1, -3, 3][..]
, &[-1, 4, -6, 4][..]
];
let coefficients = polynomial[order];
for i in 0..residual.len() {
let offset = i + order;
let prediction = coefficients.iter()
.zip(&output[i..offset])
.fold(0, |result, (coefficient, signal)|
result + coefficient * signal);
output[offset] = residual[i] + prediction;
}
}
pub fn decode(subframe: &Subframe, output: &mut [i32]) {
match subframe.data {
subframe::Data::Constant(_) => unimplemented!(),
subframe::Data::Verbatim(_) => unimplemented!(),
subframe::Data::Fixed(ref fixed) => {
let order = fixed.order as usize;
for i in 0..order {
output[i] = fixed.warmup[i];
}
fixed_restore_signal(order, &fixed.residual, output);
}
subframe::Data::LPC(_) => unimplemented!(),
}
if subframe.wasted_bits > 0 {
for value in output {
*value <<= subframe.wasted_bits;
}
}
}
|
Use entry instead of get | #![feature(ascii_ctype)]
use std::ascii::AsciiExt;
use std::collections::HashMap;
pub fn word_count(text: &str) -> HashMap<String, u32> {
let iter = text.split(|x: char| x.is_ascii_punctuation() || x == ' ')
.filter(|x| !x.is_empty())
.map(|x| x.to_lowercase());
let mut result: HashMap<String, u32> = HashMap::new();
for i in iter {
let v = result.get(&i).map_or(1, |v| v + 1);
result.insert(i, v);
}
result
}
| #![feature(ascii_ctype)]
use std::ascii::AsciiExt;
use std::collections::HashMap;
pub fn word_count(text: &str) -> HashMap<String, u32> {
let mut result: HashMap<String, u32> = HashMap::new();
let iter = text.split(|x: char| x.is_ascii_punctuation() || x == ' ')
.filter(|x| !x.is_empty())
.map(|x| x.to_lowercase());
for i in iter {
let entry = result.entry(i).or_insert(0);
*entry += 1;
}
result
}
|
Add Server header to response | extern crate hyper;
mod get;
mod post;
use hyper::{Get, Post};
use hyper::server::{Handler, Server, Request, Response};
use hyper::status::StatusCode;
struct HttpTin;
impl Handler for HttpTin {
fn handle(&self, request: Request, mut response: Response) {
println!("{} {} {}", request.remote_addr, request.method, request.uri);
match request.method {
Get => get::get(request, response),
Post => post::post(request, response),
_ => *response.status_mut() = StatusCode::MethodNotAllowed,
}
}
}
fn main() {
let server = Server::http("::1:8000").unwrap();
// println!("Server {:?}", server);
let active = server.handle(HttpTin {}).unwrap();
println!("Active {:?}", active.socket);
}
| extern crate hyper;
mod get;
mod post;
use hyper::{Get, Post};
use hyper::header;
use hyper::server::{Handler, Server, Request, Response};
use hyper::status::StatusCode;
struct HttpTin {
server: String,
}
impl HttpTin {
pub fn new() -> Self {
let server = format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"));
HttpTin { server: server }
}
fn prepare_response(&self, response: &mut Response) {
let server = header::Server(self.server.clone());
response.headers_mut().set(server);
}
}
impl Handler for HttpTin {
fn handle(&self, request: Request, mut response: Response) {
println!("{} {} {}", request.remote_addr, request.method, request.uri);
self.prepare_response(&mut response);
match request.method {
Get => get::get(request, response),
Post => post::post(request, response),
_ => *response.status_mut() = StatusCode::MethodNotAllowed,
}
}
}
fn main() {
let server = Server::http("::1:8000").unwrap();
// println!("Server {:?}", server);
let active = server.handle(HttpTin::new()).unwrap();
println!("Active {:?}", active.socket);
}
|
Clean up some use declarations | #![feature(libc)]
extern crate libc;
extern crate "glib-2_0-sys" as glib;
extern crate "gobject-2_0-sys" as gobject;
pub mod notification;
pub use notification::*;
use libc::{
c_char
};
use glib::types::{
gboolean,
};
use glib::GList;
extern "C" {
pub fn notify_init(app_name: *const c_char) -> gboolean;
pub fn notify_uninit();
pub fn notify_is_initted() -> gboolean;
pub fn notify_get_app_name() -> *const c_char;
pub fn notify_set_app_name(app_name: *const c_char);
pub fn notify_get_server_caps() -> *mut GList;
pub fn notify_get_server_info(ret_name: *mut *mut c_char,
ret_vendor: *mut *mut c_char,
ret_version: *mut *mut c_char,
ret_spec_version: *mut *mut c_char);
}
| #![feature(libc)]
extern crate libc;
extern crate "glib-2_0-sys" as glib;
extern crate "gobject-2_0-sys" as gobject;
pub mod notification;
pub use notification::*;
use libc::c_char;
use glib::types::gboolean;
use glib::GList;
extern "C" {
pub fn notify_init(app_name: *const c_char) -> gboolean;
pub fn notify_uninit();
pub fn notify_is_initted() -> gboolean;
pub fn notify_get_app_name() -> *const c_char;
pub fn notify_set_app_name(app_name: *const c_char);
pub fn notify_get_server_caps() -> *mut GList;
pub fn notify_get_server_info(ret_name: *mut *mut c_char,
ret_vendor: *mut *mut c_char,
ret_version: *mut *mut c_char,
ret_spec_version: *mut *mut c_char);
}
|
Simplify solution for Circular Buffer | pub struct CircularBuffer<T> {
buffer: Vec<T>,
capacity: usize
}
#[derive(Debug, PartialEq)]
pub enum Error {
EmptyBuffer,
FullBuffer
}
impl<T> CircularBuffer<T> {
pub fn new(capacity: usize) -> Self {
CircularBuffer {
buffer: Vec::with_capacity(capacity),
capacity: capacity
}
}
pub fn write(&mut self, item: T) -> Result<(), Error> {
if self.buffer.len() == self.capacity {
return Err(Error::FullBuffer)
}
Ok(self.buffer.push(item))
}
pub fn overwrite(&mut self, item: T) {
if self.buffer.len() == self.capacity {
self.buffer.remove(0);
}
self.buffer.push(item);
}
pub fn read(&mut self) -> Result<T, Error> {
if self.buffer.len() == 0 {
return Err(Error::EmptyBuffer)
}
Ok(self.buffer.remove(0))
}
pub fn clear(&mut self) {
self.buffer.clear();
}
}
| pub struct CircularBuffer<T>(Vec<T>);
#[derive(Debug, PartialEq)]
pub enum Error {
EmptyBuffer,
FullBuffer
}
impl<T> CircularBuffer<T> {
pub fn new(capacity: usize) -> Self {
CircularBuffer(Vec::with_capacity(capacity))
}
pub fn write(&mut self, item: T) -> Result<(), Error> {
if self.0.len() == self.0.capacity() {
return Err(Error::FullBuffer)
}
Ok(self.0.push(item))
}
pub fn overwrite(&mut self, item: T) {
if self.0.len() == self.0.capacity() {
self.0.remove(0);
}
self.0.push(item);
}
pub fn read(&mut self) -> Result<T, Error> {
if self.0.len() == 0 {
return Err(Error::EmptyBuffer)
}
Ok(self.0.remove(0))
}
pub fn clear(&mut self) {
self.0.clear();
}
}
|
Configure Rocket to log minimally | use db::Pool;
use rocket::{self, Rocket, Request};
use rocket_contrib::{JSON, Value};
mod teams;
mod members;
pub fn app(pool: Pool) -> Rocket {
rocket::ignite()
.manage(pool)
.mount("/teams", teams::routes())
.mount("/members", members::routes())
.catch(errors![not_found, server_error])
}
#[error(404)]
fn not_found(_req: &Request) -> JSON<Value> {
JSON(json!({ "message": "Not Found" }))
}
#[error(500)]
fn server_error(_req: &Request) -> JSON<Value> {
JSON(json!({ "message": "Internal Server Error" }))
}
| use db::Pool;
use rocket::{self, Rocket, Request};
use rocket_contrib::{JSON, Value};
use std::env;
mod teams;
mod members;
pub fn app(pool: Pool) -> Rocket {
set_log_level();
rocket::ignite()
.manage(pool)
.mount("/teams", teams::routes())
.mount("/members", members::routes())
.catch(errors![not_found, server_error])
}
#[error(404)]
fn not_found(_req: &Request) -> JSON<Value> {
JSON(json!({ "message": "Not Found" }))
}
#[error(500)]
fn server_error(_req: &Request) -> JSON<Value> {
JSON(json!({ "message": "Internal Server Error" }))
}
fn set_log_level() {
env::set_var("ROCKET_LOG", "critical");
}
|
Sort the list of configs and tab space | extern crate common;
use common::args::Args;
use common::first_run::check_first_run;
use common::project_paths::{project_paths, CONFIG_EXTENSION};
use std::path::PathBuf;
pub fn exec(args: Args) -> Result<(), String> {
let project_paths = project_paths(&args);
check_first_run(&project_paths.project_directory)?;
let projects: Vec<String> = project_paths
.project_directory
.read_dir()
.map_err(|_| "Could not read the dir")?
.filter_map(|path| path.ok())
.map(|path| PathBuf::from(path.file_name()))
.filter_map(|buf| match buf.extension().and_then(|x| x.to_str()) {
Some(CONFIG_EXTENSION) => buf
.file_stem()
.and_then(|x| x.to_str())
.map(|x| x.to_string()),
_ => None,
})
.collect();
println!("{}", projects.join(" "));
Ok(())
}
| extern crate common;
use common::args::Args;
use common::first_run::check_first_run;
use common::project_paths::{project_paths, CONFIG_EXTENSION};
use std::path::PathBuf;
pub fn exec(args: Args) -> Result<(), String> {
let project_paths = project_paths(&args);
check_first_run(&project_paths.project_directory)?;
let mut projects: Vec<String> = project_paths
.project_directory
.read_dir()
.map_err(|_| "Could not read the dir")?
.filter_map(|path| path.ok())
.map(|path| PathBuf::from(path.file_name()))
.filter_map(|buf| match buf.extension().and_then(|x| x.to_str()) {
Some(CONFIG_EXTENSION) => buf
.file_stem()
.and_then(|x| x.to_str())
.map(|x| x.to_string()),
_ => None,
})
.collect();
&projects.sort();
println!("{}", &projects.join("\t\t"));
Ok(())
}
|
Add newlines after error messages | extern crate chattium_oxide_lib;
extern crate yaml_file_handler;
extern crate hyper;
extern crate clap;
mod io;
mod options;
use io::read_unprompted;
use options::Options;
use hyper::client::Client;
use std::io::{stderr, Write};
use chattium_oxide_lib::{ChatMessage, ChatUser};
use chattium_oxide_lib::json::ToJsonnable;
fn main() {
let client = Client::new();
let options = Options::parse();
while let Ok(Some(rmessage)) = read_unprompted() {
match ChatMessage::new(ChatUser::me(options.name.clone()), rmessage).to_json_string() {
Ok(json) =>
match client.post(&*&options.server).body(&*&json).send() {
Ok(response) => println!("Server responded with status {}", response.status),
Err(error) => {let _ = stderr().write_fmt(format_args!("POSTing the message failed: {}", error));},
},
Err(error) => {let _ = stderr().write_fmt(format_args!("Couldn't serialize message: {}", error));},
}
}
}
| extern crate chattium_oxide_lib;
extern crate yaml_file_handler;
extern crate hyper;
extern crate clap;
mod io;
mod options;
use io::read_unprompted;
use options::Options;
use hyper::client::Client;
use std::io::{stderr, Write};
use chattium_oxide_lib::{ChatMessage, ChatUser};
use chattium_oxide_lib::json::ToJsonnable;
fn main() {
let client = Client::new();
let options = Options::parse();
while let Ok(Some(rmessage)) = read_unprompted() {
match ChatMessage::new(ChatUser::me(options.name.clone()), rmessage).to_json_string() {
Ok(json) =>
match client.post(&*&options.server).body(&*&json).send() {
Ok(response) => println!("Server responded with status {}", response.status),
Err(error) => {let _ = stderr().write_fmt(format_args!("POSTing the message failed: {}\n", error));},
},
Err(error) => {let _ = stderr().write_fmt(format_args!("Couldn't serialize message: {}\n", error));},
}
}
}
|
Fix mime_guess error in Warp example | #![deny(warnings)]
#[macro_use]
extern crate rust_embed;
extern crate warp;
use std::borrow::Cow;
use warp::{filters::path::Tail, http::Response, Filter, Rejection, Reply};
#[derive(RustEmbed)]
#[folder = "examples/public/"]
struct Asset;
fn main() {
let index_hml = warp::get2().and(warp::path::end()).and_then(|| serve("index.html"));
let dist = warp::path("dist").and(warp::path::tail()).and_then(|tail: Tail| serve(tail.as_str()));
let routes = index_hml.or(dist);
warp::serve(routes).run(([127, 0, 0, 1], 8080));
}
fn serve(path: &str) -> Result<impl Reply, Rejection> {
let mime = mime_guess::guess_mime_type(path);
let asset: Option<Cow<'static, [u8]>> = Asset::get(path);
let file = asset.ok_or_else(|| warp::reject::not_found())?;
Ok(Response::builder().header("content-type", mime.to_string()).body(file))
}
| #![deny(warnings)]
#[macro_use]
extern crate rust_embed;
extern crate warp;
use std::borrow::Cow;
use warp::{filters::path::Tail, http::Response, Filter, Rejection, Reply};
#[derive(RustEmbed)]
#[folder = "examples/public/"]
struct Asset;
fn main() {
let index_hml = warp::get2().and(warp::path::end()).and_then(|| serve("index.html"));
let dist = warp::path("dist").and(warp::path::tail()).and_then(|tail: Tail| serve(tail.as_str()));
let routes = index_hml.or(dist);
warp::serve(routes).run(([127, 0, 0, 1], 8080));
}
fn serve(path: &str) -> Result<impl Reply, Rejection> {
let mime = mime_guess::from_path(path).first_or_octet_stream();
let asset: Option<Cow<'static, [u8]>> = Asset::get(path);
let file = asset.ok_or_else(|| warp::reject::not_found())?;
Ok(Response::builder().header("content-type", mime.to_string()).body(file))
}
|
Move serde_json usage to root crate (to use macros). | // TODO: FIXME: It must be removed after code layout stabilization!
#![allow(dead_code)]
#![allow(unused_variables)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate lazy_static;
// Note that to use macroses from util inside of other modules it must me loaded first!
#[macro_use]
mod utils;
pub mod api;
mod commands;
mod errors;
mod services;
#[cfg(test)]
mod tests {
//use super::*;
#[test]
fn dummy() {
assert! (true, "Dummy check!");
}
}
| // TODO: FIXME: It must be removed after code layout stabilization!
#![allow(dead_code)]
#![allow(unused_variables)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate lazy_static;
// Note that to use macroses from util inside of other modules it must me loaded first!
#[macro_use]
mod utils;
pub mod api;
mod commands;
mod errors;
mod services;
#[cfg(test)]
mod tests {
//use super::*;
#[test]
fn dummy() {
assert! (true, "Dummy check!");
}
}
|
Update create session function with small sts example | pub mod SessionHandler {
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::env;
pub fn create(profile_name: &str) {
println!("Creating session for profile \"{}\"...", profile_name);
println!("{}",read_aws_config_file());
}
pub fn show() {
println!("Showing sessions...");
}
pub fn refresh() {
println!("Refreshing sessions...");
}
pub fn clean() {
println!("Cleaning sessions...");
}
fn read_aws_config_file() -> String {
let path = match env::home_dir() {
Some(path) => path,
None => panic!("Could not retrieve user's home directory."),
};
let config_file_path = format!("{}/.aws/config", path.display());
let f = File::open(config_file_path).expect("Could not find AWS config file.");
let mut buf_reader = BufReader::new(f);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents).expect("Found config file but could not read it.");
contents
}
}
| extern crate rusoto_core;
extern crate rusoto_sts;
extern crate rusoto_dynamodb;
pub mod SessionHandler {
use rusoto_core::{DefaultCredentialsProvider, Region};
use rusoto_core::{default_tls_client, ProfileProvider, ProvideAwsCredentials};
use rusoto_sts::{StsClient, StsAssumeRoleSessionCredentialsProvider};
use rusoto_dynamodb::{DynamoDb, DynamoDbClient, ListTablesInput};
pub fn create(profile_name: &str) {
println!("Creating session for profile \"{}\"...", profile_name);
let mut profile = ProfileProvider::new().unwrap();
profile.set_profile(profile_name);
let sts = StsClient::new(default_tls_client().unwrap(), profile, Region::EuWest1);
let provider = StsAssumeRoleSessionCredentialsProvider::new(
sts,
"arn:aws:iam::247901982038:role/CloudreachAdminRole".to_owned(),
"default".to_owned(),
None, None, None, None
);
let client = DynamoDbClient::new(default_tls_client().unwrap(), profile, Region::EuWest1);
let list_tables_input: ListTablesInput = Default::default();
match client.list_tables(&list_tables_input) {
Ok(output) => {
match output.table_names {
Some(table_name_list) => {
println!("Tables in database:");
for table_name in table_name_list {
println!("{}", table_name);
}
}
None => println!("No tables in database!"),
}
}
Err(error) => {
println!("Error: {:?}", error);
}
}
}
pub fn show() {
println!("Showing sessions...");
}
pub fn refresh() {
println!("Refreshing sessions...");
}
pub fn clean() {
println!("Cleaning sessions...");
}
}
|
Update mod_path for Rust nightly | #![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::ast::{TokenTree, Ident};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager, IdentTT, get_single_str_from_tts};
use syntax::util::small_vector::SmallVector;
use rustc::plugin::Registry;
fn expand_mod_path<'a>(cx: &'a mut ExtCtxt, sp: Span, ident: Ident, tts: Vec<TokenTree>)
-> Box<MacResult + 'a> {
let path = match get_single_str_from_tts(cx, sp, &*tts, "mod_path!") {
Some(string) => string,
None => return DummyResult::expr(sp),
};
let path = &*path;
MacEager::items(SmallVector::one(quote_item!(cx,
#[path = $path]
pub mod $ident;
).unwrap()))
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_syntax_extension(token::intern("mod_path"), IdentTT(Box::new(expand_mod_path), None, false));
}
| #![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate rustc;
extern crate rustc_plugin;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::ast::{TokenTree, Ident};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager, IdentTT, get_single_str_from_tts};
use syntax::util::small_vector::SmallVector;
use rustc_plugin::Registry;
fn expand_mod_path<'a>(cx: &'a mut ExtCtxt, sp: Span, ident: Ident, tts: Vec<TokenTree>)
-> Box<MacResult + 'a> {
let path = match get_single_str_from_tts(cx, sp, &*tts, "mod_path!") {
Some(string) => string,
None => return DummyResult::expr(sp),
};
let path = &*path;
MacEager::items(SmallVector::one(quote_item!(cx,
#[path = $path]
pub mod $ident;
).unwrap()))
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_syntax_extension(token::intern("mod_path"), IdentTT(Box::new(expand_mod_path), None, false));
}
|
Fix fontconfig / expat link order | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Some crumminess to make sure we link correctly
#[cfg(target_os = "linux")]
#[link(name = "azure")]
#[link(name = "stdc++")]
#[link(name = "skia")]
#[link(name = "expat")]
#[link(name = "fontconfig")]
#[link(name = "X11")]
extern { }
#[cfg(target_os = "android")]
#[link(name = "azure")]
#[link(name = "stdc++")]
#[link(name = "skia")]
#[link(name = "expat")]
#[link(name = "fontconfig")]
#[link(name = "EGL")]
extern { }
#[cfg(target_os = "macos")]
#[link(name = "azure")]
#[link(name = "stdc++")]
#[link(name = "skia")]
#[link(name = "objc")]
#[link(name = "IOSurface", kind = "framework")]
#[link(name = "OpenGL", kind = "framework")]
#[link(name = "Foundation", kind = "framework")]
#[link(name = "QuartzCore", kind = "framework")]
#[link(name = "ApplicationServices", kind = "framework")]
extern { }
| /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Some crumminess to make sure we link correctly
#[cfg(target_os = "linux")]
#[link(name = "azure")]
#[link(name = "stdc++")]
#[link(name = "skia")]
// fontconfig must come before expat for linking to succeed
#[link(name = "fontconfig")]
#[link(name = "expat")]
#[link(name = "X11")]
extern { }
#[cfg(target_os = "android")]
#[link(name = "azure")]
#[link(name = "stdc++")]
#[link(name = "skia")]
#[link(name = "expat")]
#[link(name = "fontconfig")]
#[link(name = "EGL")]
extern { }
#[cfg(target_os = "macos")]
#[link(name = "azure")]
#[link(name = "stdc++")]
#[link(name = "skia")]
#[link(name = "objc")]
#[link(name = "IOSurface", kind = "framework")]
#[link(name = "OpenGL", kind = "framework")]
#[link(name = "Foundation", kind = "framework")]
#[link(name = "QuartzCore", kind = "framework")]
#[link(name = "ApplicationServices", kind = "framework")]
extern { }
|
Add thread for UserInput and remove thread for view | use std::thread;
use std::sync::mpsc::{channel, Sender, Receiver, self};
use message::Message;
pub struct UserView<'a> {
_incoming_sender: Sender<Message>,
_view_guard: thread::JoinGuard<'a, ()>
}
pub fn start<'a>() -> UserView<'a> {
let (tx, rx) = channel::<Message>();
let view_guard = spawn_view(rx);
UserView {
_incoming_sender: tx,
_view_guard: view_guard
}
}
impl<'a> UserView<'a> {
pub fn incoming_message(&self, message: Message) -> Result<(), mpsc::SendError<Message>> {
self._incoming_sender.send(message)
}
}
fn spawn_view<'b>(rx: Receiver<Message>) -> thread::JoinGuard<'b, ()> {
thread::scoped(move || {
loop {
let message = rx.recv().ok().expect("Failed to print message");
println!("{}", message);
}
})
}
| use std::io;
use std::thread;
use std::sync::mpsc;
use message::Message;
pub struct UserView<'a> {
_input_recv: mpsc::Receiver<String>,
_input_guard: thread::JoinGuard<'a, ()>
}
pub fn start<'a>() -> UserView<'a> {
let (input_send, input_recv) = mpsc::channel::<String>();
UserView {
_input_recv: input_recv,
_input_guard: spawn_input(input_send),
}
}
impl<'a> UserView<'a> {
pub fn incoming_message(&self, message: Message) -> Result<(), &'static str> {
println!("{}", message);
Ok(())
}
pub fn iter_user_input(&self) -> mpsc::Iter<String> {
self._input_recv.iter()
}
}
fn spawn_input<'b>(tx: mpsc::Sender<String>) -> thread::JoinGuard<'b, ()> {
thread::scoped(move || {
let mut stdin = io::stdin();
loop {
let mut input: String = "".to_string();
stdin.read_line(&mut input);
tx.send(input);
}
})
}
|
Send back the same payload as client sent. | extern crate zmq;
use zmq::{Context, Message};
fn main() {
println!("24 days of Rust - zmq (day 20)");
let args = std::env::args().collect::<Vec<_>>();
if args.len() < 2 {
println!("Usage: {} (client|server)", args[0]);
return;
}
let mut ctx = Context::new();
let addr = "tcp://127.0.0.1:25933";
if args[1] == "client" {
println!("ZeroMQ client connecting to {}", addr);
let mut sock = ctx.socket(zmq::REQ).unwrap();
let _ = sock.connect(addr);
let payload = b"Hello world!";
println!("-> {:?}", payload);
let mut msg = Message::new().unwrap();
sock.send(payload, 0).unwrap();
sock.recv(&mut msg, 0).unwrap();
let contents = msg.as_str();
println!("<- {:?}", contents);
}
else {
println!("ZeroMQ server listening on {}", addr);
let mut sock = ctx.socket(zmq::REP).unwrap();
let _ = sock.bind(addr);
let mut msg = Message::new().unwrap();
loop {
if let Ok(_) = sock.recv(&mut msg, 0) {
sock.send_str("hehehe", 0).unwrap();
}
}
}
}
| extern crate zmq;
use zmq::{Context, Message};
fn main() {
println!("24 days of Rust - zmq (day 20)");
let args = std::env::args().collect::<Vec<_>>();
if args.len() < 2 {
println!("Usage: {} (client|server)", args[0]);
return;
}
let mut ctx = Context::new();
let addr = "tcp://127.0.0.1:25933";
if args[1] == "client" {
println!("ZeroMQ client connecting to {}", addr);
let mut sock = ctx.socket(zmq::REQ).unwrap();
let _ = sock.connect(addr);
let payload = "Hello world!";
println!("-> {:?}", payload);
let mut msg = Message::new().unwrap();
sock.send(payload.as_bytes(), 0).unwrap();
sock.recv(&mut msg, 0).unwrap();
let contents = msg.as_str().unwrap();
println!("<- {:?}", contents);
}
else {
println!("ZeroMQ server listening on {}", addr);
let mut sock = ctx.socket(zmq::REP).unwrap();
let _ = sock.bind(addr);
let mut msg = Message::new().unwrap();
loop {
if let Ok(_) = sock.recv(&mut msg, 0) {
sock.send_str(msg.as_str().unwrap(), 0).unwrap();
}
}
}
}
|
Make ractive.js available for patron-client |
{% set repo = 'digibib' %}
{% set image = 'redef-patron-client-skeleton' %}
{% set tag = 'latest' %}
{% set build_context = '/vagrant/redef/patron-client' %}
{% set dockerfile = 'Dockerfile-skeleton' %}
{% include 'docker-build.sls-fragment' %}
copy_graph_module:
cmd.run:
- name: cp /vagrant/redef/catalinker/client/src/graph.js /vagrant/redef/patron-client/lib/
|
{% set repo = 'digibib' %}
{% set image = 'redef-patron-client-skeleton' %}
{% set tag = 'latest' %}
{% set build_context = '/vagrant/redef/patron-client' %}
{% set dockerfile = 'Dockerfile-skeleton' %}
{% include 'docker-build.sls-fragment' %}
copy_graph_module:
cmd.run:
- name: cp /vagrant/redef/catalinker/client/src/graph.js /vagrant/redef/patron-client/lib/
/vagrant/redef/patron-client/public/ractive.min.js:
file.managed:
- source: http://cdn.ractivejs.org/0.7.3/ractive.min.js
- source_hash: md5=8e9c737dfa1343881d724403d5c295b7 |
Use contents_pillar to avoid newline issues | haproxy:
pkgrepo.managed:
- ppa: vbernat/haproxy-1.5
- keyid: CFFB779AADC995E4F350A060505D97A41C61B9CD
- require_in:
- pkg: haproxy
pkg:
- latest
service.running:
- enable: True
- reload: True
- watch:
- file: /etc/haproxy/haproxy.cfg
- require:
- pkg: haproxy
- file: /etc/haproxy/haproxy.cfg
/etc/haproxy/haproxy.cfg:
file.managed:
- source: salt://haproxy/config/haproxy.cfg.jinja
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- pkg: haproxy
/etc/ssl/private/hg.python.org.pem:
file.managed:
- contents: {{ pillar["tls_certs"]["hg.python.org"] }}
- user: root
- group: root
- mode: 644
| haproxy:
pkgrepo.managed:
- ppa: vbernat/haproxy-1.5
- keyid: CFFB779AADC995E4F350A060505D97A41C61B9CD
- require_in:
- pkg: haproxy
pkg:
- latest
service.running:
- enable: True
- reload: True
- watch:
- file: /etc/haproxy/haproxy.cfg
- require:
- pkg: haproxy
- file: /etc/haproxy/haproxy.cfg
/etc/haproxy/haproxy.cfg:
file.managed:
- source: salt://haproxy/config/haproxy.cfg.jinja
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- pkg: haproxy
/etc/ssl/private/hg.python.org.pem:
file.managed:
- contents_pillar: tls_certs:hg.python.org
- user: root
- group: root
- mode: 644
|
Revert "Pin selenium-webdriver version to speed up build." | Europe/Oslo:
timezone.system:
- utc: True
installpkgs:
pkg.installed:
- pkgs:
- ruby1.9.1-dev
- phantomjs
- firefox
- chromium-browser
install_chromedriver:
pkg.installed:
- pkgs:
- unzip
archive.extracted:
- name: /usr/local/bin/
- source: http://chromedriver.storage.googleapis.com/2.10/chromedriver_linux64.zip
- archive_format: zip
- source_hash: md5=058cd8b7b4b9688507701b5e648fd821
- if_missing: /usr/local/bin/chromedriver
- requires:
- pkg: unzip
file.managed:
- name: /usr/local/bin/chromedriver
- replace: False
- mode: 755
- requires:
- archive: install_chromedriver
{% for gem in
'rspec',
'cucumber',
'watir-webdriver' %}
{{ gem }}:
gem.installed:
- requires:
- pkg: ruby1.9.1-dev
{% endfor %}
selenium-webdriver:
gem.installed:
- version: 2.44.0
| Europe/Oslo:
timezone.system:
- utc: True
installpkgs:
pkg.installed:
- pkgs:
- ruby1.9.1-dev
- phantomjs
- firefox
- chromium-browser
install_chromedriver:
pkg.installed:
- pkgs:
- unzip
archive.extracted:
- name: /usr/local/bin/
- source: http://chromedriver.storage.googleapis.com/2.10/chromedriver_linux64.zip
- archive_format: zip
- source_hash: md5=058cd8b7b4b9688507701b5e648fd821
- if_missing: /usr/local/bin/chromedriver
- requires:
- pkg: unzip
file.managed:
- name: /usr/local/bin/chromedriver
- replace: False
- mode: 755
- requires:
- archive: install_chromedriver
{% for gem in
'rspec',
'cucumber',
'watir-webdriver' %}
{{ gem }}:
gem.installed:
- requires:
- pkg: ruby1.9.1-dev
{% endfor %}
selenium-webdriver:
cmd.run:
- name: gem install --version '>= 2.43.0' selenium-webdriver
|
Fix avahi service file name | #
# Tools and settings for local file sharing discovery on Mac
#
install-avahi:
pkg.installed:
- name: avahi-daemon
/etc/avahi/avahi-daemon.conf:
file.managed:
- source: salt://avahi/files/etc/avahi/avahi-daemon.conf
- require:
- pkg: install-avahi
/etc/avahi/services/smb.conf:
file.managed:
- source: salt://avahi/files/etc/avahi/services/smb.conf
- require:
- pkg: install-avahi
avahi-daemon:
service.running:
- enable: True
- require:
- pkg: install-avahi
- watch:
- file: /etc/avahi/avahi-daemon.conf
- file: /etc/avahi/services/smb.conf
| #
# Tools and settings for local file sharing discovery on Mac
#
install-avahi:
pkg.installed:
- name: avahi-daemon
/etc/avahi/avahi-daemon.conf:
file.managed:
- source: salt://avahi/files/etc/avahi/avahi-daemon.conf
- require:
- pkg: install-avahi
/etc/avahi/services/smb.service:
file.managed:
- source: salt://avahi/files/etc/avahi/services/smb.service
- require:
- pkg: install-avahi
avahi-daemon:
service.running:
- enable: True
- require:
- pkg: install-avahi
- watch:
- file: /etc/avahi/avahi-daemon.conf
- file: /etc/avahi/services/smb.service
|
Fix missing docker-related pillar-data for vm-devops. | # PILLAR TOP
base:
'^(wombat|\w+-ship)$':
- match: pcre
- koha
- koha.admin
- sip
- migration
- migration.admin
'^(wombat|\w+-ship|vm-devops)$':
- match: pcre
- elk
- resource_monitoring
'^(\w+-ship|vm-devops)$':
- match: pcre
- elk.dev
- resource_monitoring.dev
'^(\w+-ship)$':
- match: pcre
- docker.dev
- sip.dev
- redef
- redef.dev
'wombat,vm-devops':
- match: list
- overview # overview
- koha # overview
- resource_monitoring # overview
- redef # overview
'wombat':
- match: list
- docker.prod
- elk.prod
- sip.prod
- redef.prod
- resource_monitoring.prod
- overview.prod # overview
- redef.prod # overview
- elk.prod # overview
'vm-devops':
- match: list
- overview.dev # overview
- redef.dev # overview
- elk.dev # overview
| # PILLAR TOP
base:
'^(wombat|\w+-ship)$':
- match: pcre
- koha
- koha.admin
- sip
- migration
- migration.admin
'^(wombat|\w+-ship|vm-devops)$':
- match: pcre
- elk
- resource_monitoring
'^(\w+-ship|vm-devops)$':
- match: pcre
- docker.dev
- elk.dev
- resource_monitoring.dev
'^(\w+-ship)$':
- match: pcre
- sip.dev
- redef
- redef.dev
'wombat,vm-devops':
- match: list
- overview # overview
- koha # overview
- resource_monitoring # overview
- redef # overview
'wombat':
- match: list
- docker.prod
- elk.prod
- sip.prod
- redef.prod
- resource_monitoring.prod
- overview.prod # overview
- redef.prod # overview
- elk.prod # overview
'vm-devops':
- match: list
- overview.dev # overview
- redef.dev # overview
- elk.dev # overview
|
Add more grantnav dev branches | # grantnav development
grantnav:
allowedhosts: '.default.threesixtygiving.uk0.bigv.io'
server_size: small
branches:
- master
- iteration08
- 387-sum-currencies
- 382-include-GSS-codes
dataselections:
# - all
- acceptable_license_valid
# - valid
deploy_mode: matrix
deploys:
new:
datadate: '2018-01-05'
current:
datadate: '2018-01-05'
old:
datadate: '2017-12-04'
piwik:
url: '//mon.opendataservices.coop/piwik/'
site_id: '6'
| # grantnav development
grantnav:
allowedhosts: '.default.threesixtygiving.uk0.bigv.io'
server_size: small
branches:
- master
- iteration08
- 387-sum-currencies
- 382-include-GSS-codes
- 356-shared-URL-shows-cookie-policy
dataselections:
# - all
- acceptable_license_valid
# - valid
deploy_mode: matrix
deploys:
new:
datadate: '2018-01-05'
current:
datadate: '2018-01-05'
old:
datadate: '2017-12-04'
piwik:
url: '//mon.opendataservices.coop/piwik/'
site_id: '6'
|
Change dfb10 -> dfb due to network changes. | elk:
lumberjack-host-name: dfb10 # used for cert generation
lumberjack-host-ip: 10.172.2.105 # used for cert generation
lumberjack-host: dfb10 # used for actual connect to server
lumberjack-port: 5000
configserver-host: dfb10
configserver-port: 9999
| elk:
lumberjack-host-name: dfb # used for cert generation
lumberjack-host-ip: 171.23.3.39 # used for cert generation
lumberjack-host: dfb # used for actual connect to server
lumberjack-port: 5000
configserver-host: dfb
configserver-port: 9999
|
Remove some settings from elastic-stack:elasticsearch pillar | {% set ENVIRONMENT = salt.grains.get('environment', 'rc-apps') %}
{% set pkg_version = salt.pkg.version('elasticsearch').split('.')[0] %}
elastic_stack:
elasticsearch:
configuration_settings:
discovery:
zen.hosts_provider: ec2
cluster.name: {{ ENVIRONMENT }}
discovery.ec2.tag.escluster: {{ ENVIRONMENT }}
network.host: ['_eth0:ipv4_', '_lo:ipv4_']
{% if pkg_version and pkg_version|int > 6 %}
cluster.initial_master_nodes:
- elasticsearch.service.consul
{% endif %}
plugins:
- name: discovery-ec2
| {% set ENVIRONMENT = salt.grains.get('environment', 'rc-apps') %}
elastic_stack:
elasticsearch:
configuration_settings:
cluster.name: {{ ENVIRONMENT }}
discovery.ec2.tag.escluster: {{ ENVIRONMENT }}
network.host: ['_eth0:ipv4_', '_lo:ipv4_']
plugins:
- name: discovery-ec2
|
Add indentation to sls to fix parse error |
ensure_installation_of_jq:
pkg.installed:
- name: jq
manage_search_index_pruning_job:
cron.present:
- identifier: PRUNE_SEARCH_INDICES
- user: root
- hour: random
- minute: random
- name: >-
for index in `curl -s 'localhost:9200/_aliases' | jq 'to_entries |
map(select(.value.aliases == {}) | .key) | .[]' | sed s'/"//g'`;
do curl -X DELETE localhost:9200/$index; done > /var/tmp/prune-search-idx.log 2>&1
{% if salt.grains.get('environment') == 'rc-apps' %}
manage_ci_index_pruning_job:
cron.present:
- identifier: PRUNE_CI_INDICES
- user: root
- hour: 6
- minute: random
- day: 6
- name: >-
for index in `curl -s localhost:9200/_cat/indices/*-ci* | awk '{print $3}'`;
do curl -X DELETE localhost:9200/$index; done > /var/tmp/prune-ci-idx.log 2>&1
{% endif %}
|
ensure_installation_of_jq:
pkg.installed:
- name: jq
manage_search_index_pruning_job:
cron.present:
- identifier: PRUNE_SEARCH_INDICES
- user: root
- hour: random
- minute: random
- name: >-
for index in `curl -s 'localhost:9200/_aliases' | jq 'to_entries |
map(select(.value.aliases == {}) | .key) | .[]' | sed s'/"//g'`;
do curl -X DELETE localhost:9200/$index; done > /var/tmp/prune-search-idx.log 2>&1
{% if salt.grains.get('environment') == 'rc-apps' %}
manage_ci_index_pruning_job:
cron.present:
- identifier: PRUNE_CI_INDICES
- user: root
- hour: 6
- minute: random
- day: 6
- name: >-
for index in `curl -s localhost:9200/_cat/indices/*-ci* | awk '{print $3}'`;
do curl -X DELETE localhost:9200/$index; done > /var/tmp/prune-ci-idx.log 2>&1
{% endif %}
|
Replace deprecated iteritems() with items() | {% from "nfs/map.jinja" import nfs with context %}
include:
- nfs.client
{% for m in salt['pillar.get']('nfs:mount').iteritems() %}
{{ m[1].mountpoint }}:
mount.mounted:
- device: {{ m[1].location }}
- fstype: nfs
- opts: {{ m[1].opts|default('vers=3') }}
- persist: {{ m[1].persist|default('True') }}
- mkmnt: {{ m[1].mkmnt|default('True') }}
{% endfor %}
| {% from "nfs/map.jinja" import nfs with context %}
include:
- nfs.client
{% for m in salt['pillar.get']('nfs:mount').items() %}
{{ m[1].mountpoint }}:
mount.mounted:
- device: {{ m[1].location }}
- fstype: nfs
- opts: {{ m[1].opts|default('vers=3') }}
- persist: {{ m[1].persist|default('True') }}
- mkmnt: {{ m[1].mkmnt|default('True') }}
{% endfor %}
|
Set deploy complete signal to execute last | {% set app_dir = '/opt/{0}'.format(salt.pillar.get('django:app_name')) %}
build_static_assets_for_odlvideo:
cmd.script:
- name: {{ app_dir }}/webpack_if_prod.sh
- cwd: {{ app_dir }}
- env:
- NODE_ENV: production
- user: deploy
generate_deploy_hash_for_odlvideo:
cmd.run:
- name: 'git log --pretty=format:%H -n 1 > static/hash.txt'
- cwd: {{ app_dir }}
- user: deploy
signal_odlvideo_deploy_complete:
file.touch:
- name: {{ app_dir }}/deploy_complete.txt
| {% set app_dir = '/opt/{0}'.format(salt.pillar.get('django:app_name')) %}
build_static_assets_for_odlvideo:
cmd.script:
- name: {{ app_dir }}/webpack_if_prod.sh
- cwd: {{ app_dir }}
- env:
- NODE_ENV: production
- user: deploy
generate_deploy_hash_for_odlvideo:
cmd.run:
- name: 'git log --pretty=format:%H -n 1 > static/hash.txt'
- cwd: {{ app_dir }}
- user: deploy
signal_odlvideo_deploy_complete:
file.touch:
- name: {{ app_dir }}/deploy_complete.txt
- order: last
|
Add unless for chrislea nodejs repo | include:
- python.python-libs
nodejs:
cmd.wait:
- name: /usr/bin/add-apt-repository -y ppa:chris-lea/node.js
- require:
- pkg: python-software-properties
pkg:
- installed
- refresh: True
- names:
- nodejs
- npm
- unless: node -v 2>/dev/null
- require:
- cmd: nodejs
| include:
- python.python-libs
nodejs:
cmd.wait:
- name: /usr/bin/add-apt-repository -y ppa:chris-lea/node.js
- unless: apt-key list | grep -q C7917B12
- require:
- pkg: python-software-properties
pkg:
- installed
- refresh: True
- names:
- nodejs
- npm
- unless: node -v 2>/dev/null
- require:
- cmd: nodejs
|
Install psycopg2 requred for celery. | install_celery:
pip.installed:
- name: Celery
- upgrade: True
install_celery_bundles:
pip.installed:
- name: celery[librabbitmq]
install_celery_flower:
pip.installed:
- name: flower
- upgrade: True
| install_celery:
pip.installed:
- name: Celery
- upgrade: True
install_celery_bundles:
pip.installed:
- name: celery[librabbitmq]
install_celery_flower:
pip.installed:
- name: flower
- upgrade: True
python-psycopg2:
pkg.installed:
- name: python-psycopg2 |
Move 360 CoVE live deploy to use master branch | default_branch: live
cove:
servername: dataquality.threesixtygiving.org
https: 'force'
extra_cove_branches: []
| default_branch: master
cove:
servername: dataquality.threesixtygiving.org
https: 'force'
extra_cove_branches: []
|
Change cmd to not use \ for line separation, its not necessary. | ceph:
service.running:
- enable: True
volumes-user:
file.managed:
- name: /etc/ceph/ceph.client.volumes.keyring
- mode: 440
- require:
- cmd: volumes-user
cmd.run:
- name: >-
ceph auth get-or-create client.volumes \
mon 'allow r' \
mds 'allow'
osd 'allow class-read object_prefix rbd_children,
allow rwx pool=volumes,
allow rx pool=images' > /etc/ceph/ceph.client.volumes.keyring
images-user:
file.managed:
- name: /etc/ceph/ceph.client.images.keyring
- mode: 440
- require:
- cmd: images-user
cmd.run:
- name: >-
ceph auth get-or-create client.images \
mon 'allow r' \
mds 'allow' \
osd 'allow class-read object_prefix rbd_children,
allow rwx pool=images' > /etc/ceph/ceph.client.images.keyring
instances-user:
file.managed:
- name: /etc/ceph/ceph.client.instances.keyring
- mode: 440
- require:
- cmd: instances-user
cmd.run:
- name: >-
ceph auth get-or-create client.images \
mon 'allow r' \
mds 'allow' \
osd 'allow class-read object_prefix rbd_children,
allow rwx pool=instances' > /etc/ceph/ceph.client.instances.keyring
| ceph:
service.running:
- enable: True
volumes-user:
file.managed:
- name: /etc/ceph/ceph.client.volumes.keyring
- mode: 440
- require:
- cmd: volumes-user
cmd.run:
- name: >-
ceph auth get-or-create client.volumes
mon 'allow r'
mds 'allow'
osd 'allow class-read object_prefix rbd_children,
allow rwx pool=volumes,
allow rx pool=images' > /etc/ceph/ceph.client.volumes.keyring
images-user:
file.managed:
- name: /etc/ceph/ceph.client.images.keyring
- mode: 440
- require:
- cmd: images-user
cmd.run:
- name: >-
ceph auth get-or-create client.images
mon 'allow r'
mds 'allow'
osd 'allow class-read object_prefix rbd_children,
allow rwx pool=images' > /etc/ceph/ceph.client.images.keyring
instances-user:
file.managed:
- name: /etc/ceph/ceph.client.instances.keyring
- mode: 440
- require:
- cmd: instances-user
cmd.run:
- name: >-
ceph auth get-or-create client.images
mon 'allow r'
mds 'allow'
osd 'allow class-read object_prefix rbd_children,
allow rwx pool=instances' > /etc/ceph/ceph.client.instances.keyring
|
Fix adjust DNS settings for bootstrap in latitude | #
{% macro configure_deploy_step_function(
source_env_pillar
,
target_env_pillar
,
selected_host_name
,
deploy_step
,
deploy_step_config
,
project_name
,
profile_name
,
requisite_config_file_id
,
requisite_config_file_path
,
bootstrap_dir
)
%}
{{ requisite_config_file_id }}_{{ deploy_step }}:
file.blockreplace:
- name: '{{ requisite_config_file_path }}'
- marker_start: '# Salt auto-config START: {{ requisite_config_file_id }}_{{ deploy_step }}'
- marker_end: '# Salt auto-config END: {{ requisite_config_file_id }}_{{ deploy_step }}'
- append_if_not_found: True
- backup: False
- content: |
{{ deploy_step }} = {
'step_enabled': {{ deploy_step_config['step_enabled'] }},
'resolv_conf_file': 'resources/examples/uvsmtid/centos-5.5-minimal/resolv.conf',
'dns_server_ip': '8.8.8.8',
'remote_hostname': 'google.com',
}
- show_changes: True
- require:
- file: {{ requisite_config_file_id }}
{% endmacro %}
| #
{% macro configure_deploy_step_function(
source_env_pillar
,
target_env_pillar
,
selected_host_name
,
deploy_step
,
deploy_step_config
,
project_name
,
profile_name
,
requisite_config_file_id
,
requisite_config_file_path
,
bootstrap_dir
)
%}
{{ requisite_config_file_id }}_{{ deploy_step }}:
file.blockreplace:
- name: '{{ requisite_config_file_path }}'
- marker_start: '# Salt auto-config START: {{ requisite_config_file_id }}_{{ deploy_step }}'
- marker_end: '# Salt auto-config END: {{ requisite_config_file_id }}_{{ deploy_step }}'
- append_if_not_found: True
- backup: False
- content: |
{{ deploy_step }} = {
'step_enabled': {{ deploy_step_config['step_enabled'] }},
'resolv_conf_file': 'resources/examples/uvsmtid/centos-5.5-minimal/resolv.conf',
'dns_server_ip': '10.77.1.198',
'remote_hostname': 'example.com',
}
- show_changes: True
- require:
- file: {{ requisite_config_file_id }}
{% endmacro %}
|
Add missing git installation directive | git://github.com/ekg/freebayes.git
git.latest:
- target: /opt/
- submodules: True | git:
pkg.installed: []
clone:
git.latest:
- name: git://github.com/ekg/freebayes.git
- target: /opt/freebayes
- submodules: True
|
Add another 360 cove branch dev deploy | cove:
prefixmap: '360=^/?'
allowedhosts: '.threesixtygiving.org,.threesixtygiving.uk0.bigv.io'
https: 'no'
app: cove_360
extra_cove_branches: #[] # remove the [] (empty list) if you re-add some values below
- name: release-201611
uwsgi_port: 3040
- name: release-201705
uwsgi_port: 3041
app: cove_360
| cove:
prefixmap: '360=^/?'
allowedhosts: '.threesixtygiving.org,.threesixtygiving.uk0.bigv.io'
https: 'no'
app: cove_360
extra_cove_branches: #[] # remove the [] (empty list) if you re-add some values below
- name: release-201611
uwsgi_port: 3040
- name: release-201705
uwsgi_port: 3041
app: cove_360
- name: 676-768-bad-data
uwsgi_port: 3042
app: cove_360
|
Upgrade setuptools before upgrading pip. | python-pkgs:
pkg:
- installed
- names:
- python-pip
- python-dev
- build-essential
- python-imaging
python-headers:
pkg:
- installed
- names:
- libpq-dev
- libev-dev
- libevent-dev
- libmemcached-dev
- libjpeg8
- libjpeg8-dev
- libfreetype6
- libfreetype6-dev
- zlib1g
- zlib1g-dev
- libxml2-dev
- libxslt1-dev
pip:
pip.installed:
- upgrade: True
- require:
- pkg: python-pkgs
virtualenv:
pip.installed:
- upgrade: True
- require:
- pip: pip
/usr/lib/libz.so:
file.symlink:
- target: /usr/lib/{{ grains['cpuarch'] }}-linux-gnu/libz.so
- require:
- pkg: python-headers
/usr/lib/libfreetype.so:
file.symlink:
- target: /usr/lib/{{ grains['cpuarch'] }}-linux-gnu/libfreetype.so
- require:
- pkg: python-headers
/usr/lib/libjpeg.so:
file.symlink:
- target: /usr/lib/{{ grains['cpuarch'] }}-linux-gnu/libjpeg.so
- require:
- pkg: python-headers
| python-pkgs:
pkg:
- installed
- names:
- python-pip
- python-dev
- build-essential
- python-imaging
python-headers:
pkg:
- installed
- names:
- libpq-dev
- libev-dev
- libevent-dev
- libmemcached-dev
- libjpeg8
- libjpeg8-dev
- libfreetype6
- libfreetype6-dev
- zlib1g
- zlib1g-dev
- libxml2-dev
- libxslt1-dev
setuptools:
pip.installed:
- upgrade: True
- require:
- pkg: python-pkgs
pip:
pip.installed:
- upgrade: True
- require:
- pip: setuptools
virtualenv:
pip.installed:
- upgrade: True
- require:
- pip: pip
/usr/lib/libz.so:
file.symlink:
- target: /usr/lib/{{ grains['cpuarch'] }}-linux-gnu/libz.so
- require:
- pkg: python-headers
/usr/lib/libfreetype.so:
file.symlink:
- target: /usr/lib/{{ grains['cpuarch'] }}-linux-gnu/libfreetype.so
- require:
- pkg: python-headers
/usr/lib/libjpeg.so:
file.symlink:
- target: /usr/lib/{{ grains['cpuarch'] }}-linux-gnu/libjpeg.so
- require:
- pkg: python-headers
|
Move the openvpn.routing state to be on everything but the vpn | base:
'*':
- base.sanity
- groups
- firewall
- users
- sudoers
- backup.client
- auto-security
'roles:backup-server':
- match: grain
- backup.server
'roles:cdn-logs':
- match: grain
- cdn-logs
'roles:docs':
- match: grain
- docs
'roles:downloads':
- match: grain
- downloads
'roles:hg':
- match: grain
- hg
- ssh.host_keys
'roles:jython-web':
- match: grain
- jython
'roles:loadbalancer':
- match: grain
- haproxy
- openvpn.routing
'roles:salt-master':
- match: grain
'roles:vpn':
- match: grain
- openvpn.server
| base:
'*':
- base.sanity
- groups
- firewall
- users
- sudoers
- backup.client
- auto-security
'* and not G@roles:vpn':
- match: compound
- openvpn.routing
'roles:backup-server':
- match: grain
- backup.server
'roles:cdn-logs':
- match: grain
- cdn-logs
'roles:docs':
- match: grain
- docs
'roles:downloads':
- match: grain
- downloads
'roles:hg':
- match: grain
- hg
- ssh.host_keys
'roles:jython-web':
- match: grain
- jython
'roles:loadbalancer':
- match: grain
- haproxy
'roles:salt-master':
- match: grain
'roles:vpn':
- match: grain
- openvpn.server
|
Fix download URL for Elasticsearch | {% set elasticsearch_version = salt['pillar.get']('elasticsearch:version', false) -%}
{%- if elasticsearch_version %}
elasticsearch:
pkg.installed:
- sources:
{%- if elasticsearch_version.startswith(('0', '1')) %}
- elasticsearch: https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{{ elasticsearch_version }}.deb
{%- else %}
- elasticsearch: https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/deb/elasticsearch/{{ elasticsearch_version }}/elasticsearch-{{ elasticsearch_version }}.deb
{%- endif %}
service.running:
- enable: True
- require:
- pkg: elasticsearch
{%- endif %}
| {% set elasticsearch_version = salt['pillar.get']('elasticsearch:version', false) -%}
{%- if elasticsearch_version %}
elasticsearch:
pkg.installed:
- sources:
{%- if elasticsearch_version.startswith(('0', '1')) %}
- elasticsearch: https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{{ elasticsearch_version }}.deb
{%- elif elasticsearch_version.startswith('2') %}
- elasticsearch: https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/deb/elasticsearch/{{ elasticsearch_version }}/elasticsearch-{{ elasticsearch_version }}.deb
{%- else %}
- elasticsearch: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{ elasticsearch_version }}.deb
{%- endif %}
service.running:
- enable: True
- require:
- pkg: elasticsearch
{%- endif %}
|
Make db connection via localhost | run_tracking_db_airflow:
postgres_database.present:
- name: airflow
- owner: butler_admin
- tablespace: run_dbspace
- user: postgres
run_tracking_db_celery:
postgres_database.present:
- name: celery
- owner: butler_admin
- tablespace: run_dbspace
- user: postgres
| run_tracking_db_airflow:
postgres_database.present:
- name: airflow
- owner: butler_admin
- tablespace: run_dbspace
- user: postgres
- db_host: localhost
run_tracking_db_celery:
postgres_database.present:
- name: celery
- owner: butler_admin
- tablespace: run_dbspace
- user: postgres
- db_host: localhost |
Add note about permissions for pushing files to ocds docs dev | # This is the dev sls for ocds-docs which doesn't include reverse proxying,
#
# The docs on this server are built and copied it to travis.
# https://github.com/open-contracting/standard/blob/1.0/.travis.yml
# The travis config pulls in a shell script from this deploy repository (so
# that we don't have to make deployment changes to every single branch we might
# want to build).
# https://github.com/OpenDataServices/opendataservices-deploy/blob/master/open-contracting-standard-deploy.sh
include:
- ocds-docs-common
{% from 'lib.sls' import apache %}
{{ apache('ocds-docs-dev.conf') }}
| # This is the dev sls for ocds-docs which doesn't include reverse proxying,
#
# The docs on this server are built and copied it to travis.
# https://github.com/open-contracting/standard/blob/1.0/.travis.yml
# The travis config pulls in a shell script from this deploy repository (so
# that we don't have to make deployment changes to every single branch we might
# want to build).
# https://github.com/OpenDataServices/opendataservices-deploy/blob/master/open-contracting-standard-deploy.sh
#
# Note that this means that anyone with push access to the open-contracting
# GitHub repo can cause files to be copied to this server.
include:
- ocds-docs-common
{% from 'lib.sls' import apache %}
{{ apache('ocds-docs-dev.conf') }}
|
Add Java environment setup for Salt minion | # Configure `salt_minion_role` role (Salt minion).
{% if 'salt_minion_role' in pillar['system_host_roles'] %}
{% if grains['id'] in pillar['system_host_roles']['salt_minion_role']['assigned_hosts'] %}
include:
- common.orchestrate.wraps.salt_minion_role.minimal
- common.firewall
- common.shell.prompt
- common.shell.aliases
- common.shell.variables
# Set splash screen and boot console resolution.
- common.grub
- common.system_version
- common.sudo
- common.sudo.configure_required_users
- common.vim
- common.git
- common.yum
# Prepare seamless SSH connectivity.
- common.ssh.distribute_private_keys
# NOTE: Distribution of public keys is done from control host.
#-common.ssh.distribute_public_keys
- common.ssh.accept_host_keys
- common.gnome.system_proxy
- common.gnome.auto_login
- common.packages_per_os_platfrom_type
- common.windows_power
- common.custom_root_ca
- common.selinux
{% endif %}
{% endif %}
| # Configure `salt_minion_role` role (Salt minion).
{% if 'salt_minion_role' in pillar['system_host_roles'] %}
{% if grains['id'] in pillar['system_host_roles']['salt_minion_role']['assigned_hosts'] %}
include:
- common.orchestrate.wraps.salt_minion_role.minimal
- common.firewall
- common.shell.prompt
- common.shell.aliases
- common.shell.variables
# Set splash screen and boot console resolution.
- common.grub
- common.system_version
- common.sudo
- common.sudo.configure_required_users
- common.vim
- common.git
- common.yum
# Prepare seamless SSH connectivity.
- common.ssh.distribute_private_keys
# NOTE: Distribution of public keys is done from control host.
#-common.ssh.distribute_public_keys
- common.ssh.accept_host_keys
- common.gnome.system_proxy
- common.gnome.auto_login
- common.packages_per_os_platfrom_type
- common.windows_power
- common.custom_root_ca
- common.selinux
- common.java
{% endif %}
{% endif %}
|
Update ecs service placeholder file | #anatomy of an ecs service.
# elb is required for managing location of container in cluster, although consul can provide provides the same service via DNS/consul agent
# elb is required for load balanced access and external access
# task definition -> json format from template
# service definition -> aws resource
# persistent data dirs -> create dirs and deliver config from templates.
#include:
# - aws_rds
# - aws_elb
# - aws_service
# - aws_task
# - aws_dns
{% set service_list = [] %}
{% for service_name in pillar['services'] %}
{% if pillar['services'][service_name]['type'] | default('compose') == 'ecs' %}
{% do service_list.append(pillar['services'][service_name]) %}
{% endif %}
{% endfor %}
{{ service_list }}
| #anatomy of an ecs service.
# elb is required for managing location of container in cluster, although consul can provide provides the same service via DNS/consul agent
# elb is required for load balanced access and external access
# task definition -> json format from template
# service definition -> aws resource
# persistent data dirs -> create dirs and deliver config from templates.
#include:
# - aws_rds
# - aws_elb
# - aws_service
# - aws_task
# - aws_dns
{% for service_name in pillar['services'] %}
{% if pillar['services'][service_name]['type'] | default('compose') == 'ecs' %}
{% %}
{% endif %}
{% endfor %}
|
Update the require statements and ensure that the latest RabbitMQ is installed. | rabbitmq-server:
pkgrepo.managed:
- name: deb http://www.rabbitmq.com/debian/ testing main
- key_url: http://www.rabbitmq.com/rabbitmq-signing-key-public.asc
pkg:
- installed
- require:
- pkgrepo: rabbitmq-server
service:
- running
- enable: True
- require:
- pkg: rabbitmq-server
- watch:
- file: rabbitmq-config
remove_defaults:
rabbitmq_user.absent:
- name: guest
rabbitmq-config:
file.managed:
- name: /etc/rabbitmq/rabbitmq.config
- source: salt://rabbitmq/rabbitmq.config
- user: root
- group: root
- mode: 644
- template: jinja
- require:
- pkg: rabbitmq-server
| rabbitmq-server:
pkgrepo.managed:
- name: deb http://www.rabbitmq.com/debian/ testing main
- key_url: http://www.rabbitmq.com/rabbitmq-signing-key-public.asc
- require_in:
- pkg: rabbitmq-server
pkg:
- latest
service:
- running
- enable: True
- require:
- pkg: rabbitmq-server
- watch:
- file: rabbitmq-config
remove_defaults:
rabbitmq_user.absent:
- name: guest
rabbitmq-config:
file.managed:
- name: /etc/rabbitmq/rabbitmq.config
- source: salt://rabbitmq/rabbitmq.config
- user: root
- group: root
- mode: 644
- template: jinja
- require:
- pkg: rabbitmq-server
|
Update db hosts salt states to use redis formula | base:
'*':
- core
'db':
- postgres
- core.redis
'docker':
- rails
- docker
- worker.docker
- worker.service
'core':
- rails
- rails.db
- rails.service
| base:
'*':
- core
'db':
- postgres
- redis.server
'docker':
- rails
- docker
- worker.docker
- worker.service
'core':
- rails
- rails.db
- rails.service
|
Stop managing /var/lib/etcd in salt | include:
- repositories
- ca-cert
- cert
etcd:
group.present:
- name: etcd
- system: True
user.present:
- name: etcd
- createhome: False
- groups:
- etcd
- require:
- group: etcd
file.directory:
- name: /var/lib/etcd
- user: etcd
- group: etcd
- dir_mode: 700
- recurse:
- user
- group
- mode
- require:
- user: etcd
- group: etcd
pkg.installed:
- pkgs:
- iptables
- etcdctl
- etcd
- require:
- file: /etc/zypp/repos.d/containers.repo
iptables.append:
- table: filter
- family: ipv4
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
# TODO: add "- source: <local-subnet>"
- dports:
- 2380
- proto: tcp
service.running:
- name: etcd
- enable: True
- require:
- sls: ca-cert
- sls: cert
- pkg: etcd
- iptables: etcd
- file: /var/lib/etcd
- watch:
- file: /etc/sysconfig/etcd
# note: this id will be inherited/overwritten by the etcd-proxy
/etc/sysconfig/etcd:
file.managed:
- source: salt://etcd/etcd.conf.jinja
- template: jinja
- user: etcd
- group: etcd
- mode: 644
- require:
- pkg: etcd
- user: etcd
- group: etcd
| include:
- repositories
- ca-cert
- cert
etcd:
group.present:
- name: etcd
- system: True
user.present:
- name: etcd
- createhome: False
- groups:
- etcd
- require:
- group: etcd
pkg.installed:
- pkgs:
- iptables
- etcdctl
- etcd
- require:
- file: /etc/zypp/repos.d/containers.repo
iptables.append:
- table: filter
- family: ipv4
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
# TODO: add "- source: <local-subnet>"
- dports:
- 2380
- proto: tcp
service.running:
- name: etcd
- enable: True
- require:
- sls: ca-cert
- sls: cert
- pkg: etcd
- iptables: etcd
- file: /var/lib/etcd
- watch:
- file: /etc/sysconfig/etcd
# note: this id will be inherited/overwritten by the etcd-proxy
/etc/sysconfig/etcd:
file.managed:
- source: salt://etcd/etcd.conf.jinja
- template: jinja
- user: etcd
- group: etcd
- mode: 644
- require:
- pkg: etcd
- user: etcd
- group: etcd
|
Make sure java is installed before we start elasticsearch. | include:
- java
elasticsearch:
pkg:
- installed
- sources:
- elasticsearch: http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.20.2.deb
service.running:
- require:
- pkg: elasticsearch
- file: /mnt/elasticsearch
- file: /var/log/elasticsearch
- watch:
- file: /etc/elasticsearch/elasticsearch.yml
- file: /etc/elasticsearch/default_mapping.json
/mnt/elasticsearch:
file.directory:
- user: elasticsearch
- group: elasticsearch
- require:
- pkg: elasticsearch
/var/log/elasticsearch:
file.directory:
- user: elasticsearch
- group: elasticsearch
- require:
- pkg: elasticsearch
/etc/elasticsearch/elasticsearch.yml:
file.managed:
- source: salt://elasticsearch/elasticsearch.yml
- user: root
- group: root
- mode: 0644
- require:
- pkg: elasticsearch
/etc/elasticsearch/default_mapping.json:
file.managed:
- source: salt://elasticsearch/default_mapping.json
- user: root
- group: root
- mode: 0644
- require:
- pkg: elasticsearch
| include:
- java
elasticsearch:
pkg:
- installed
- sources:
- elasticsearch: http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.20.2.deb
service.running:
- require:
- pkg: java
- pkg: elasticsearch
- file: /mnt/elasticsearch
- file: /var/log/elasticsearch
- watch:
- file: /etc/elasticsearch/elasticsearch.yml
- file: /etc/elasticsearch/default_mapping.json
/mnt/elasticsearch:
file.directory:
- user: elasticsearch
- group: elasticsearch
- require:
- pkg: elasticsearch
/var/log/elasticsearch:
file.directory:
- user: elasticsearch
- group: elasticsearch
- require:
- pkg: elasticsearch
/etc/elasticsearch/elasticsearch.yml:
file.managed:
- source: salt://elasticsearch/elasticsearch.yml
- user: root
- group: root
- mode: 0644
- require:
- pkg: elasticsearch
/etc/elasticsearch/default_mapping.json:
file.managed:
- source: salt://elasticsearch/default_mapping.json
- user: root
- group: root
- mode: 0644
- require:
- pkg: elasticsearch
|
Remove old 360 cove branches | cove:
allowedhosts: '.threesixtygiving.org,.threesixtygiving.uk0.bigv.io'
https: 'no'
app: cove_360
extra_cove_branches: #[] # remove the [] (empty list) if you re-add some values below
- name: release-201710
app: cove_360
- name: release-201712
app: cove_360
- name: release-201801
app: cove_360
- name: branch-360-0.9-release-bugfix
app: cove_360
- name: branch-360-autumn-2017-bugfix
app: cove_360
- name: branch-360-186-duration-number
app: cove_360
- name: date-validation
app: cove_360
- name: remove-google-doc
app: cove_360
| cove:
allowedhosts: '.threesixtygiving.org,.threesixtygiving.uk0.bigv.io'
https: 'no'
app: cove_360
extra_cove_branches: #[] # remove the [] (empty list) if you re-add some values below
- name: release-201710
app: cove_360
- name: release-201712
app: cove_360
- name: release-201801
app: cove_360
- name: branch-360-0.9-release-bugfix
app: cove_360
- name: branch-360-autumn-2017-bugfix
app: cove_360
- name: branch-360-186-duration-number
app: cove_360
- name: date-validation
app: cove_360
|
Set the diamond client to running | diamond-depends:
pkg.installed:
- pkgs:
- python-configobj
- python-psutil
diamond:
pkg.installed:
- sources:
- python-diamond: salt://monitoring/client/packages/python-diamond_3.4.421_all.deb
- require:
- pkg: diamond-depends
group.present:
- system: True
user.present:
- shell: /bin/false
- system: True
- gid_from_name: True
- require:
- group: diamond
/etc/diamond/diamond.conf:
file.managed:
- source: salt://monitoring/client/configs/diamond.conf.jinja
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- pkg: diamond
| diamond-depends:
pkg.installed:
- pkgs:
- python-configobj
- python-psutil
diamond:
pkg.installed:
- sources:
- python-diamond: salt://monitoring/client/packages/python-diamond_3.4.421_all.deb
- require:
- pkg: diamond-depends
group.present:
- system: True
user.present:
- shell: /bin/false
- system: True
- gid_from_name: True
- require:
- group: diamond
service.running:
- enable: True
- watch:
- file: /etc/diamond/diamond.conf
- require:
- pkg: diamond
- user: diamond
/etc/diamond/diamond.conf:
file.managed:
- source: salt://monitoring/client/configs/diamond.conf.jinja
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- pkg: diamond
|
Update rabbitmq user to have management tag. | install_erlang:
pkg.installed:
- name: erlang
install_rabbitmq:
pkg.installed:
- sources:
- rabbitmq: https://www.rabbitmq.com/releases/rabbitmq-server/v3.5.6/rabbitmq-server-3.5.6-1.noarch.rpm
enable_rabbitmq_on_startup:
cmd.run:
- name: chkconfig rabbitmq-server on
start_rabbitmq:
service.running:
- name: rabbitmq-server
rabbitmq_vhost:
rabbitmq_vhost.present:
- name: pcawg_vhost
- user: pcawg
rabbitmq_user:
rabbitmq_user.present:
- name: pcawg
- password: pcawg
- perms:
- 'pcawg_vhost':
- '.*'
- '.*'
- '.*'
rabbitmq_consul_config:
file.managed:
- name: /etc/opt/consul.d/rabbitmq_consul.json
- source: salt://rabbitmq/conf/rabbitmq_consul.json
- user: root
- group: root
- mode: 644
- makedirs: True | install_erlang:
pkg.installed:
- name: erlang
install_rabbitmq:
pkg.installed:
- sources:
- rabbitmq: https://www.rabbitmq.com/releases/rabbitmq-server/v3.5.6/rabbitmq-server-3.5.6-1.noarch.rpm
enable_rabbitmq_on_startup:
cmd.run:
- name: chkconfig rabbitmq-server on
start_rabbitmq:
service.running:
- name: rabbitmq-server
rabbitmq_vhost:
rabbitmq_vhost.present:
- name: pcawg_vhost
- user: pcawg
rabbitmq_user:
rabbitmq_user.present:
- name: pcawg
- password: pcawg
- tags:
- management
- perms:
- 'pcawg_vhost':
- '.*'
- '.*'
- '.*'
rabbitmq_management_plugin:
rabbitmq_plugin.enabled:
- name: rabbitmq_management
rabbitmq_consul_config:
file.managed:
- name: /etc/opt/consul.d/rabbitmq_consul.json
- source: salt://rabbitmq/conf/rabbitmq_consul.json
- user: root
- group: root
- mode: 644
- makedirs: True |
Uninstall jenkins user and home | include:
- jenkins.cli.uninstall
remove_site:
file.absent:
- name: /etc/nginx/sites-available/jenkins.conf
remove_site_link:
file.absent:
- name: /etc/nginx/sites-enabled/jenkins.conf
remove_pkgs:
pkg.purged:
- pkgs:
- jenkins
- nginx-full
| {% set jenkins = pillar.get('jenkins', {}) -%}
{% set home = jenkins.get('home', '/usr/local/jenkins') -%}
include:
- jenkins.cli.uninstall
remove_site:
file.absent:
- name: /etc/nginx/sites-available/jenkins.conf
remove_site_link:
file.absent:
- name: /etc/nginx/sites-enabled/jenkins.conf
remove_pkgs:
pkg.purged:
- pkgs:
- jenkins
- nginx-full
remove_user:
user.absent:
- name: jenkins
remove_home:
file.absent:
- name: {{ home }}
|
Update the ES requirements list | #
# Install Elasticsearch and plugins configured in pillar
#
elasticsearch-requirements:
pkg.installed:
- pkgs:
- openjdk-8-jre-headless
elasticsearch:
pkg.installed:
- version: {{ pillar.elasticsearch.version }}
- require:
- pkg: elasticsearch-requirements
# Disable default elasticsearch service
# Each environment will get its own ES instance running.
disable-elasticsearch-service:
service.dead:
- name: elasticsearch
- enable: False
# For each plugin - we need to restart Elasticsearch service on each environment
# This is not maintained anymore since elasticsearch 5.x
#
# {%- for shortname, plugin in salt['pillar.get']('elasticsearch:plugins', {}).items() %}
# /usr/share/elasticsearch/bin/plugin install {% if plugin.url is defined %}{{ plugin.url }}{% else %}{{ plugin.name }}{% endif %}:
# cmd.run:
# - unless: test -d /usr/share/elasticsearch/plugins/{{ shortname }}
# - require:
# - pkg: elasticsearch
# - watch_in:
# {%- for environment, environment_details in pillar.environments.items() %}
# {%- if 'skip_instance_setup' not in environment_details.elasticsearch %}
# - service: elasticsearch-{{ environment }}
# {%- endif %}
# {%- endfor %}
# {%- endfor %}
| #
# Install Elasticsearch and plugins configured in pillar
#
elasticsearch-requirements:
pkg.installed:
- pkgs:
- openjdk-11-jre-headless
- policykit-1
elasticsearch:
pkg.installed:
- version: {{ pillar.elasticsearch.version }}
- require:
- pkg: elasticsearch-requirements
# Disable default elasticsearch service
# Each environment will get its own ES instance running.
disable-elasticsearch-service:
service.dead:
- name: elasticsearch
- enable: False
# For each plugin - we need to restart Elasticsearch service on each environment
# This is not maintained anymore since elasticsearch 5.x
#
# {%- for shortname, plugin in salt['pillar.get']('elasticsearch:plugins', {}).items() %}
# /usr/share/elasticsearch/bin/plugin install {% if plugin.url is defined %}{{ plugin.url }}{% else %}{{ plugin.name }}{% endif %}:
# cmd.run:
# - unless: test -d /usr/share/elasticsearch/plugins/{{ shortname }}
# - require:
# - pkg: elasticsearch
# - watch_in:
# {%- for environment, environment_details in pillar.environments.items() %}
# {%- if 'skip_instance_setup' not in environment_details.elasticsearch %}
# - service: elasticsearch-{{ environment }}
# {%- endif %}
# {%- endfor %}
# {%- endfor %}
|
Install Node 8 instead of Node 6 | node:
install_from_ppa: true
npm:
config:
prefix: /opt/node
python:
pypy3:
version: v5.2.0-alpha1
binary_sha256: f5e66ab24267d6ddf662d07c512d06c10ebc732ae62093dabbd775ac63b9060a
| node:
install_from_ppa: true
ppa:
repository_url: 'https://deb.nodesource.com/node_8.x'
npm:
config:
prefix: /opt/node
python:
pypy3:
version: v5.2.0-alpha1
binary_sha256: f5e66ab24267d6ddf662d07c512d06c10ebc732ae62093dabbd775ac63b9060a
|
Fix the syntax matching inside orchestrate job. | gluster_setup:
salt.state:
- tgt: roles:glusterfs-server
- match: grain
- highstate: True
gluster_volume_setup:
salt.state:
- tgt: roles:glusterfs-master
- match: grain
- sls:
- gluster.master
gluster_volume_mount:
salt.state:
- tgt: roles:glusterfs-server
- match: grain
- sls:
- gluster.mount | gluster_setup:
salt.state:
- tgt: 'roles:glusterfs-server'
- tgt_type: grain
- highstate: True
gluster_volume_setup:
salt.state:
- tgt: 'roles:glusterfs-master'
- tgt_type: grain
- sls:
- gluster.master
gluster_volume_mount:
salt.state:
- tgt: 'roles:glusterfs-server'
- tgt_type: grain
- sls:
- gluster.mount |
Upgrade jenkins to avoid slow download | #
# Install package, remove default service
#
# Here we use specific version of the package to avoid auth issues with Jenkins 2.0
jenkins:
pkg.installed:
- hold: True
- sources:
- jenkins: http://pkg.jenkins-ci.org/debian-stable/binary/jenkins_1.651.1_all.deb
disable-jenkins-service:
service.dead:
- name: jenkins
- enable: False
- require:
- pkg: jenkins
# Make sure that www-data can unpack jenkins war file
/var/cache/jenkins:
file.directory:
- user: www-data
- group: www-data
- mode: 775
- recurse:
- user
- group
- require:
- pkg: jenkins
| #
# Install package, remove default service
#
# Here we use specific version of the package to avoid auth issues with Jenkins 2.0
jenkins:
pkg.installed:
- hold: True
- sources:
- jenkins: http://pkg.jenkins-ci.org/debian-stable/binary/jenkins_1.651.3_all.deb
disable-jenkins-service:
service.dead:
- name: jenkins
- enable: False
- require:
- pkg: jenkins
# Make sure that www-data can unpack jenkins war file
/var/cache/jenkins:
file.directory:
- user: www-data
- group: www-data
- mode: 775
- recurse:
- user
- group
- require:
- pkg: jenkins
|
Remove redundant state tracking for outgoing ntp fw | ntp:
pkg:
- installed
file.managed:
- name: /etc/ntp.conf
- source: salt://ntp/ntp.conf
- template: jinja
service.running:
- require:
- pkg: ntp
- watch:
- file: ntp
{% for family in ('ipv4', 'ipv6') %}
ntp-firewall-outgoing-{{ family }}:
firewall.append:
- table: filter
- chain: OUTPUT
- family: {{ family }}
- proto: udp
- sport: 123
- dport: 123
- match:
- comment
- owner
- state
- connstate: NEW,ESTABLISHED
- uid-owner: root
- jump: ACCEPT
- comment: "ntp: Allow outgoing NTP queries for root"
{% endfor %}
| ntp:
pkg:
- installed
file.managed:
- name: /etc/ntp.conf
- source: salt://ntp/ntp.conf
- template: jinja
service.running:
- require:
- pkg: ntp
- watch:
- file: ntp
{% for family in ('ipv4', 'ipv6') %}
ntp-firewall-outgoing-{{ family }}:
firewall.append:
- table: filter
- chain: OUTPUT
- family: {{ family }}
- proto: udp
- sport: 123
- dport: 123
- match:
- comment
- owner
- uid-owner: root
- jump: ACCEPT
- comment: "ntp: Allow outgoing NTP queries for root"
{% endfor %}
|
Fix dockerfile argument in catalinker-salt state. |
include:
- .skeletonbuild
{% set repo = 'digibib' %}
{% set image = 'redef-catalinker' %}
{% set tag = 'latest' %}
{% set build_context = '/vagrant/redef/catalinker' %}
{% set dockerfile = 'Dockerfile-skeleton' %}
{% include 'docker-build.sls-fragment' %}
extend:
redef-catalinker_built:
cmd.run:
- require:
- cmd: redef-catalinker-skeleton_built
- cmd: copy_skeleton_gemfile_lock
{% set container = 'redef_catalinker_container' %}
{% set ports = ["4567/tcp"] %}
{% set environment = {'SERVICES_PORT': "http://{0}:{1}".format(pillar['redef']['services']['host'], pillar['redef']['services']['port']) } %}
{% set port_bindings = {'4567/tcp': { 'HostIp': pillar['redef']['catalinker']['binding'], 'HostPort': pillar['redef']['catalinker']['port'] } } %}
{% include 'docker-run.sls-fragment' %}
|
include:
- .skeletonbuild
{% set repo = 'digibib' %}
{% set image = 'redef-catalinker' %}
{% set tag = 'latest' %}
{% set build_context = '/vagrant/redef/catalinker' %}
{% set dockerfile = 'Dockerfile' %}
{% include 'docker-build.sls-fragment' %}
extend:
redef-catalinker_built:
cmd.run:
- require:
- cmd: redef-catalinker-skeleton_built
- cmd: copy_skeleton_gemfile_lock
{% set container = 'redef_catalinker_container' %}
{% set ports = ["4567/tcp"] %}
{% set environment = {'SERVICES_PORT': "http://{0}:{1}".format(pillar['redef']['services']['host'], pillar['redef']['services']['port']) } %}
{% set port_bindings = {'4567/tcp': { 'HostIp': pillar['redef']['catalinker']['binding'], 'HostPort': pillar['redef']['catalinker']['port'] } } %}
{% include 'docker-run.sls-fragment' %}
|
Fix broken pwm-server state on vagrant | pip:
pkg.installed:
- name: python-pip
pip.installed:
- upgrade: True
- require:
- pkg: pip
virtualenv:
pip.installed:
- require:
- pip: pip
python-dev:
pkg.installed
pyopenssl-reqs:
pkg.installed:
- name: libffi-dev
pwm-server:
virtualenv.managed:
- name: /srv/pwm-server/venv
pip.installed:
- name: /vagrant
- upgrade: True
- editable: True
- bin_env: /srv/pwm-server/venv
- require:
- virtualenv: pwm-server
- pkg: python-dev
file.managed:
- name: /srv/pwm-server/config.py
- source: salt://pwm-server/config.py
service.running:
- watch:
- pip: pwm-server
- file: pwm-server
- file: pwm-server-job
pwm-server-job:
file.managed:
- name: /etc/init/pwm-server.conf
- source: salt://pwm-server/pwm-server.conf
| pip:
pkg.installed:
- name: python-pip
pip.installed:
- upgrade: True
- require:
- pkg: pip
virtualenv:
pip.installed:
- require:
- pip: pip
python-dev:
pkg.installed
pyopenssl-reqs:
pkg.installed:
- name: libffi-dev
pwm-server:
virtualenv.managed:
- name: /srv/pwm-server/venv
pip.installed:
- editable: /vagrant
- upgrade: True
- bin_env: /srv/pwm-server/venv
- require:
- virtualenv: pwm-server
- pkg: python-dev
file.managed:
- name: /srv/pwm-server/config.py
- source: salt://pwm-server/config.py
service.running:
- watch:
- pip: pwm-server
- file: pwm-server
- file: pwm-server-job
pwm-server-job:
file.managed:
- name: /etc/init/pwm-server.conf
- source: salt://pwm-server/pwm-server.conf
|
Add symlink to butler CLI to path | butler-clone:
git.latest:
- rev: master
- force_reset: True
- name: https://github.com/llevar/butler.git
- target: /opt/butler
- submodules: True
install_butler_tracker:
cmd.run:
- name: pip install -e .
- cwd: /opt/butler/track/
| butler-clone:
git.latest:
- rev: master
- force_reset: True
- name: https://github.com/llevar/butler.git
- target: /opt/butler
- submodules: True
install_butler_tracker:
cmd.run:
- name: pip install -e .
- cwd: /opt/butler/track/
/usr/bin/butler:
file.symlink:
- target: /opt/butler/track/tracker/bin/butler
- user: root
- group: root
- mode: 755
- force: True |
Add the postgresql users to the tracker | base:
'*':
- networking
- users
- sudoers
- psf-ca
'roles:cdn-logs':
- match: grain
- fastly-logging
- firewall.fastly-logging
'roles:docs':
- match: grain
- firewall.fastly-backend
- groups.docs
- secrets.backup.docs
'roles:downloads':
- match: grain
- firewall.fastly-backend
- groups.downloads
- secrets.backup.downloads
'roles:hg':
- match: grain
- firewall.rs-lb-backend
- secrets.backup.hg
- secrets.ssh.hg
'roles:salt-master':
- match: grain
- salt-master
'roles:jython-web':
- match: grain
- secrets.backup.jython-web
- groups.jython
- firewall.http
'roles:planet':
- match: grain
- planet
- firewall.http
'roles:postgresql':
- match: grain
- firewall.postgresql
- postgresql.server
- secrets.psf-ca.pg
'roles:postgresql-replica':
- match: grain
- secrets.postgresql-users.replica
'roles:backup-server':
- match: grain
- backup.server
'roles:loadbalancer':
- match: grain
- haproxy
- firewall.loadbalancer
- secrets.tls.certs.loadbalancer
'roles:vpn':
- match: grain
- openvpn
- firewall.vpn
- secrets.openvpn.vpn
- secrets.duosec.vpn
| base:
'*':
- networking
- users
- sudoers
- psf-ca
'roles:cdn-logs':
- match: grain
- fastly-logging
- firewall.fastly-logging
'roles:docs':
- match: grain
- firewall.fastly-backend
- groups.docs
- secrets.backup.docs
'roles:downloads':
- match: grain
- firewall.fastly-backend
- groups.downloads
- secrets.backup.downloads
'roles:hg':
- match: grain
- firewall.rs-lb-backend
- secrets.backup.hg
- secrets.ssh.hg
'roles:tracker':
- match: grain
- secrets.postgresql-users.tracker
'roles:salt-master':
- match: grain
- salt-master
'roles:jython-web':
- match: grain
- secrets.backup.jython-web
- groups.jython
- firewall.http
'roles:planet':
- match: grain
- planet
- firewall.http
'roles:postgresql':
- match: grain
- firewall.postgresql
- postgresql.server
- secrets.psf-ca.pg
'roles:postgresql-replica':
- match: grain
- secrets.postgresql-users.replica
'roles:backup-server':
- match: grain
- backup.server
'roles:loadbalancer':
- match: grain
- haproxy
- firewall.loadbalancer
- secrets.tls.certs.loadbalancer
'roles:vpn':
- match: grain
- openvpn
- firewall.vpn
- secrets.openvpn.vpn
- secrets.duosec.vpn
|
Update new datadate to 2017-06-07 for live pillar | # grantnav live
grantnav:
allowedhosts: '.live.threesixtygiving.uk0.bigv.io,.threesixtygiving.org'
server_size: large
deploy_mode: list
deploys:
new:
datadate: '2017-05-09'
branch: 'iteration07.6'
dataselection: acceptable_license_valid
current:
datadate: '2017-05-09'
branch: 'iteration07.6'
dataselection: acceptable_license_valid
old:
datadate: '2017-04-04'
branch: 'iteration07.6'
dataselection: acceptable_license_valid
piwik:
url: '//mon.opendataservices.coop/piwik/'
site_id: '9'
| # grantnav live
grantnav:
allowedhosts: '.live.threesixtygiving.uk0.bigv.io,.threesixtygiving.org'
server_size: large
deploy_mode: list
deploys:
new:
datadate: '2017-06-07'
branch: 'iteration07.6'
dataselection: acceptable_license_valid
current:
datadate: '2017-05-09'
branch: 'iteration07.6'
dataselection: acceptable_license_valid
old:
datadate: '2017-04-04'
branch: 'iteration07.6'
dataselection: acceptable_license_valid
piwik:
url: '//mon.opendataservices.coop/piwik/'
site_id: '9'
|
Use IP for slave SSH access | {% set jenkins = pillar.get('jenkins', {}) -%}
{% set home = jenkins.get('home', '/usr/local/jenkins') -%}
{% set user = jenkins.get('user', 'jenkins') -%}
{% set group = jenkins.get('group', user) -%}
{% set keys = salt['publish.publish']('roles:jenkins-master', 'ssh_key.pub', user, expr_form='grain') %}
{% set master_key = keys.values()[0] %}
{% set labels = grains.get('jenkins', {}).get('labels', []) -%}
include:
- jenkins.user
- jenkins.cli
- jenkins.git
jre:
pkg.latest:
- name: default-jre-headless
ssh:
pkg.latest:
- name: openssh-server
allow_master_key:
ssh_auth.present:
- name: {{ master_key }}
- user: {{ user }}
slave_node:
jenkins_node.present:
- name: {{ grains['host'] }}
- host: {{ grains['fqdn'] }}
- remote_fs: {{ home }}
- credential: master-ssh
{%- if labels %}
- labels:
{%- for label in labels %}
- {{ label }}
{%- endfor %}
{%- endif %}
| {% set jenkins = pillar.get('jenkins', {}) -%}
{% set home = jenkins.get('home', '/usr/local/jenkins') -%}
{% set user = jenkins.get('user', 'jenkins') -%}
{% set group = jenkins.get('group', user) -%}
{% set keys = salt['publish.publish']('roles:jenkins-master', 'ssh_key.pub', user, expr_form='grain') %}
{% set master_key = keys.values()[0] %}
{% set labels = grains.get('jenkins', {}).get('labels', []) -%}
include:
- jenkins.user
- jenkins.cli
- jenkins.git
jre:
pkg.latest:
- name: default-jre-headless
ssh:
pkg.latest:
- name: openssh-server
allow_master_key:
ssh_auth.present:
- name: {{ master_key }}
- user: {{ user }}
slave_node:
jenkins_node.present:
- name: {{ grains['host'] }}
- host: {{ salt['network.ip_addrs']()[0] }}
- remote_fs: {{ home }}
- credential: master-ssh
{%- if labels %}
- labels:
{%- for label in labels %}
- {{ label }}
{%- endfor %}
{%- endif %}
|
Add client info in nydalen (mac+ip). | klientservernydalen:
server:
network:
lan:
gateway: "192.168.0.1"
broadcast: "192.168.0.255"
pool_lower: "192.168.0.60"
pool_upper: "192.168.0.70"
subnet: "192.168.0.0"
netmask: "255.255.255.0"
iface: eth1
wlan:
dns: "10.172.2.1"
gateway: "10.172.24.100"
iface: eth0
clients:
boot:
params: "--"
mycelclients:
-
ip: "192.168.0.101"
mac: "c0:3f:d5:68:ee:17"
name: nydalenklient1
| klientservernydalen:
server:
network:
lan:
gateway: "192.168.0.1"
broadcast: "192.168.0.255"
pool_lower: "192.168.0.60"
pool_upper: "192.168.0.70"
subnet: "192.168.0.0"
netmask: "255.255.255.0"
iface: eth1
wlan:
dns: "10.172.2.1"
gateway: "10.172.24.100"
iface: eth0
clients:
boot:
params: "--"
mycelclients:
-
ip: "192.168.0.101"
mac: "c0:3f:d5:69:e1:ed"
name: nydalenklient1
-
ip: "192.168.0.102"
mac: "c0:3f:d5:68:ed:b9"
name: nydalenklient2
-
ip: "192.168.0.103"
mac: "c0:3f:d5:69:bf:02"
name: nydalenklient3
-
ip: "192.168.0.104"
mac: "c0:3f:d5:69:f0:ae"
name: nydalenklient4
|
Make etcd state a requirement for states that need etcd running on localhost | base:
'*':
- repositories
{% if pillar.get('avahi', '').lower() == 'true' %}
- avahi
{% endif %}
- motd
- users
{% if salt['pillar.get']('infrastructure', 'libvirt') == 'cloud' %}
- hosts
{% endif %}
'roles:ca':
- match: grain
- ca
'roles:etcd':
- match: grain
- cert
- etcd
'roles:kube-(master|minion)':
- match: grain_pcre
- cert
- etcd-proxy
'roles:kube-master':
- match: grain
- kubernetes-master
- reboot
'roles:kube-minion':
- match: grain
- flannel
- docker
- kubernetes-minion
'roles:nfs':
- match: grain
- nfs-server
'roles:haproxy':
- match: grain
- confd
- haproxy
| base:
'*':
- repositories
{% if pillar.get('avahi', '').lower() == 'true' %}
- avahi
{% endif %}
- motd
- users
{% if salt['pillar.get']('infrastructure', 'libvirt') == 'cloud' %}
- hosts
{% endif %}
'roles:ca':
- match: grain
- ca
'roles:etcd':
- match: grain
- cert
- etcd
'roles:kube-(master|minion)':
- match: grain_pcre
- cert
- etcd-proxy
'roles:kube-master':
- match: grain
- kubernetes-master:
- require:
- sls: etcd-proxy
- reboot:
- require:
- sls: etcd-proxy
'roles:kube-minion':
- match: grain
- flannel
- docker
- kubernetes-minion
'roles:nfs':
- match: grain
- nfs-server
'roles:haproxy':
- match: grain
- confd
- haproxy
|
Use the ubuntu user for all the things. | include:
- git
devstack:
git.latest:
- name: https://github.com/openstack-dev/devstack.git
- rev: stable/folsom
- target: /tmp/devstack
- require:
- pkg: git
cmd.wait:
- name: /tmp/devstack/stack.sh
- cwd: /tmp/devstack
- user: ubuntu
- require:
- file: /tmp/devstack/localrc
- watch:
- git: devstack
/tmp/devstack/localrc:
file.managed:
- source: salt://devstack/localrc
- template: jinja
- require:
- git: devstack
| include:
- git
devstack:
git.latest:
- name: https://github.com/openstack-dev/devstack.git
- rev: stable/folsom
- target: /tmp/devstack
- runas: ubuntu
- require:
- pkg: git
cmd.wait:
- name: /tmp/devstack/stack.sh
- cwd: /tmp/devstack
- user: ubuntu
- require:
- file: /tmp/devstack/localrc
- watch:
- git: devstack
/tmp/devstack/localrc:
file.managed:
- source: salt://devstack/localrc
- template: jinja
- user: ubuntu
- require:
- git: devstack
|
Allow LOG_DESTINATION not to be set | include:
- syslog
send_logs_to_remote_log_server:
file.managed:
- name: /etc/rsyslog.d/remote_log_server.conf
- source: salt://forward_logs/remote_log_server.conf
- template: jinja
- context:
LOG_DESTINATION: {{ pillar['secrets']['LOG_DESTINATION'] }}
restart_syslog_for_remote_log_server:
cmd.run:
- name: restart rsyslog
- onchanges:
- file: send_logs_to_remote_log_server
| {% if pillar['secrets'].get('LOG_DESTINATION', False) %}
include:
- syslog
send_logs_to_remote_log_server:
file.managed:
- name: /etc/rsyslog.d/remote_log_server.conf
- source: salt://forward_logs/remote_log_server.conf
- template: jinja
- context:
LOG_DESTINATION: {{ pillar['secrets']['LOG_DESTINATION'] }}
restart_syslog_for_remote_log_server:
cmd.run:
- name: restart rsyslog
- onchanges:
- file: send_logs_to_remote_log_server
{% endif %}
|
Create the sqlite3 data file directory | {% from "powerdns/map.jinja" import powerdns with context %}
include:
- powerdns.config
powerdns_install_sqlite3:
pkg.installed:
- name: {{ powerdns.lookup.backend_sqlite3_pkg }}
- require:
- pkg: powerdns
powerdns_config_sqlite3:
file.managed:
{## Using .get below because direct access fails on the include keyword ##}
- name: {{ powerdns.config.get('include-dir') }}/pdns.gsqlite3.conf
- user: {{ powerdns.config.setuid }}
- group: {{ powerdns.config.setuid }}
- mode: 644
- template: jinja
- source: salt://powerdns/templates/pdns_sqlite.conf.jinja
- makedirs: True
powerdns_schema_sqlite3:
cmd.script:
- source: salt://powerdns/files/sqlite_schema_load.sh
- name: sqlite_schema_load.sh {{ powerdns.lookup.backend_sqlite3_file }}
- runas: {{ powerdns.config.setuid }}
- creates: {{ powerdns.lookup.backend_sqlite3_file }}
- require:
- pkg: {{ powerdns.lookup.backend_sqlite3_pkg }}
| {% from "powerdns/map.jinja" import powerdns with context %}
{% set sqlite_data_dir = salt['file.basename'](powerdns.lookup.backend_sqlite3_file) %}
include:
- powerdns.config
powerdns_install_sqlite3:
pkg.installed:
- name: {{ powerdns.lookup.backend_sqlite3_pkg }}
- require:
- pkg: powerdns
powerdns_config_sqlite3:
file.managed:
{## Using .get below because direct access fails on the include keyword ##}
- name: {{ powerdns.config.get('include-dir') }}/pdns.gsqlite3.conf
- user: {{ powerdns.config.setuid }}
- group: {{ powerdns.config.setuid }}
- mode: 644
- template: jinja
- source: salt://powerdns/templates/pdns_sqlite.conf.jinja
- makedirs: True
powerdns_directory_sqlite3:
file.directory:
- name: {{ sqlite_data_dir }}
- user: {{ powerdns.config.setuid }}
- group: {{ powerdns.config.setuid }}
- makedirs: True
powerdns_schema_sqlite3:
cmd.script:
- source: salt://powerdns/files/sqlite_schema_load.sh
- name: sqlite_schema_load.sh {{ powerdns.lookup.backend_sqlite3_file }}
- runas: {{ powerdns.config.setuid }}
- creates: {{ powerdns.lookup.backend_sqlite3_file }}
- require:
- pkg: {{ powerdns.lookup.backend_sqlite3_pkg }}
- file: {{ sqlite_data_dir }}
|
Add gperf, which is apparently needed by freetype-sys in some cases. | cmake:
pkg.installed
git:
pkg.installed
virtualenv:
pip.installed
ghp-import:
pip.installed
{% if grains["kernel"] != "Darwin" %}
libglib2.0-dev:
pkg.installed
libgl1-mesa-dri:
pkg.installed
freeglut3-dev:
pkg.installed
libfreetype6-dev:
pkg.installed
xorg-dev:
pkg.installed
libssl-dev:
pkg.installed
libbz2-dev:
pkg.installed
xserver-xorg-input-void:
pkg.installed
xserver-xorg-video-dummy:
pkg.installed
xpra:
pkg.installed
libosmesa6-dev:
pkg.installed
{% else %}
pkg-config:
pkg.installed
{% endif %}
| cmake:
pkg.installed
git:
pkg.installed
virtualenv:
pip.installed
ghp-import:
pip.installed
{% if grains["kernel"] != "Darwin" %}
libglib2.0-dev:
pkg.installed
libgl1-mesa-dri:
pkg.installed
freeglut3-dev:
pkg.installed
libfreetype6-dev:
pkg.installed
xorg-dev:
pkg.installed
libssl-dev:
pkg.installed
libbz2-dev:
pkg.installed
xserver-xorg-input-void:
pkg.installed
xserver-xorg-video-dummy:
pkg.installed
xpra:
pkg.installed
libosmesa6-dev:
pkg.installed
gperf:
pkg.installed
{% else %}
pkg-config:
pkg.installed
{% endif %}
|
Update wildfly config to create instances |
###############################################################################
#
# Import properties.
{% set properties_path = profile_root.replace('.', '/') + '/properties.yaml' %}
{% import_yaml properties_path as props %}
{% set master_minion_id = props['master_minion_id'] %}
system_features:
wildfly_deployments:
standard_wildfly:
resource_id: wildfly_application_server_distribution_zip
archive_format: zip
root_subdir: 'wildfly-10.0.0.Final'
owner_user: master_minion_user
destination_dir_path: Apps/wildfly/standard_wildfly
wildfly_instances:
node-1:
target_system_role: wildfly_node_1_role
deployment_id: standard_wildfly
file_templates:
example:
source_url:
destination_path:
config_data:
node-2:
target_system_role: wildfly_node_2_role
deployment_id: standard_wildfly
file_templates:
example:
source_url:
destination_path:
config_data:
###############################################################################
# EOF
###############################################################################
|
###############################################################################
#
# Import properties.
{% set properties_path = profile_root.replace('.', '/') + '/properties.yaml' %}
{% import_yaml properties_path as props %}
{% set master_minion_id = props['master_minion_id'] %}
system_features:
wildfly_deployments:
standard_wildfly:
resource_id: wildfly_application_server_distribution_zip
archive_format: zip
root_subdir: 'wildfly-10.0.0.Final'
owner_user: master_minion_user
destination_dir_path: Apps/wildfly/standard_wildfly
wildfly_instances:
node-1:
target_system_role: wildfly_node_1_role
deployment_id: standard_wildfly
file_templates:
example:
source_url: 'salt://common/wildfly/templates/standalone.xml'
template_type: jinja
destination_path: 'configuration/standalone.xml'
config_data:
key1: value1
key2: value2
node-2:
target_system_role: wildfly_node_2_role
deployment_id: standard_wildfly
file_templates:
example:
source_url: 'salt://common/wildfly/templates/standalone.xml'
template_type: jinja
destination_path: 'configuration/standalone.xml'
config_data:
key1: value1
key2: value2
###############################################################################
# EOF
###############################################################################
|
Fix OpenVPN service state for non-systemd systems. | #!stateconf
{% from 'states/openvpn/map.jinja' import openvpn as openvpn_map with context %}
{% from 'states/defaults.map.jinja' import defaults with context %}
.params:
stateconf.set: []
# --- end of state config ---
{% for vpnname, vpn in params.vpns.items() %}
openvpn-client-{{ vpnname }}-service:
service.running:
- name: {{ openvpn_map.client.service|format('client-' + vpnname) }}
- enable: True
- require:
- pkg: openvpn
{% endfor %}
| #!stateconf
{% from 'states/openvpn/map.jinja' import openvpn as openvpn_map with context %}
{% from 'states/defaults.map.jinja' import defaults with context %}
.params:
stateconf.set: []
# --- end of state config ---
{% for vpnname, vpn in params.vpns.items() %}
openvpn-client-{{ vpnname }}-service:
service.running:
{% if '%s' in openvpn.server.service %}
- name: {{ openvpn_map.client.service|format('client-' + vpnname) }}
{% else %}
- name: {{ openvpn.server.service }}
{% endif %}
- enable: True
- require:
- pkg: openvpn
{% endfor %}
|
Make .ssh dir as ssh-kegen wont | backerclient:
user.present:
- fullname: Backer Client User
- shell: /bin/false
- home: /home/backerclient
- createhome: True
- groups:
- wheel
ssh-keygen -N "" -f /home/backerclient/.ssh/id_rsa:
cmd.run:
- creates: /home/backerclient/.ssh/id_rsa
| backerclient:
user.present:
- fullname: Backer Client User
- shell: /bin/false
- home: /home/backerclient
- createhome: True
- groups:
- wheel
/home/backerclient/.ssh/:
file.directory:
- user: backerclient
- group: users
- mode: 700
- require:
- user: backerclient
ssh-keygen -N "" -f /home/backerclient/.ssh/id_rsa:
cmd.run:
- creates: /home/backerclient/.ssh/id_rsa
|
Add mine functions which will record IP addresses | psf_internal_network: 192.168.5.0/24
pypi_internal_network: 172.16.57.0/24
vpn0_internal_network: 10.8.0.0/24
vpn1_internal_network: 10.9.0.0/24
rackspace_iad_service_net: 10.0.0.0/8
psf_internal_vpn_gateway: 192.168.5.10
pypi_internal_vpn_gateway: 172.16.57.17
| psf_internal_network: 192.168.5.0/24
pypi_internal_network: 172.16.57.0/24
vpn0_internal_network: 10.8.0.0/24
vpn1_internal_network: 10.9.0.0/24
rackspace_iad_service_net: 10.0.0.0/8
psf_internal_vpn_gateway: 192.168.5.10
pypi_internal_vpn_gateway: 172.16.57.17
mine_functions:
psf-internal:
mine_function: ip_picker.ip_addrs
cidr: 192.168.5.0/24
pypi-internal:
mine_function: ip_picker.ipaddrs
cidr: 172.16.57.0/24
|
Set up an apt cache for dbaas | # Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
base:
'*':
- fail2ban
- datadog
- network
'logstash*':
- logstash.indexer
- logstash.queue
- logstash.web
- redis
- redis.jenkins
- elasticsearch.master
'esnode*':
- elasticsearch.data-slave
'jenkins.*':
- jenkins.master
'msgaas*.jenkins*':
- jenkins.msgaas
'dbaas*.jenkins*':
- jenkins.dbaas
'pypi*':
- pypi
'apt.mirror*':
- apt.mirror
'devstack':
- devstack
| # Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
base:
'*':
- fail2ban
- datadog
- network
'logstash*':
- logstash.indexer
- logstash.queue
- logstash.web
- redis
- redis.jenkins
- elasticsearch.master
'esnode*':
- elasticsearch.data-slave
'jenkins.*':
- jenkins.master
'msgaas*.jenkins*':
- jenkins.msgaas
'dbaas*.jenkins*':
- jenkins.dbaas
'pypi*':
- pypi
'apt.mirror*':
- apt.mirror
'apt.dbaas*':
- apt.cache
'devstack':
- devstack
|
Use standard ID naming scheme | python-pip:
pkg.installed
locustio:
pip.installed:
- name: locustio >= 0.8.0, <= 0.8.1
- require:
- pkg: python-pip
- pkg: locust-prereq
prometheus-clint:
pip.installed:
- name: prometheus-client >= 0.1.0, <= 0.1.1
- require:
- pkg: python-pip
- pkg: locust-prereq
locust-prereq:
pkg.installed:
- pkgs:
- gcc
- python-devel
- python-pyzmq-devel
- git-core
locust_config_file:
file.managed:
- name: /root/locust_config.yml
- source: salt://locust/locust_config.yml
- template: jinja
- user: root
- group: root
- mode: 755
- force: True
install_locust_file:
file.decode:
- name: /root/locustfile.py
- encoding_type: base64
- encoded_data: {{ grains['locust_file'] }}
| pip:
pkg.installed:
- name: python-pip
locustio:
pip.installed:
- name: locustio >= 0.8.0, <= 0.8.1
- require:
- pkg: pip
- pkg: locust_prerequisites
prometheus_client:
pip.installed:
- name: prometheus-client >= 0.1.0, <= 0.1.1
- require:
- pkg: pip
- pkg: locust_prerequisites
locust_prerequisites:
pkg.installed:
- pkgs:
- gcc
- python-devel
- python-pyzmq-devel
- git-core
locust_config_file:
file.managed:
- name: /root/locust_config.yml
- source: salt://locust/locust_config.yml
- template: jinja
- user: root
- group: root
- mode: 755
- force: True
install_locust_file:
file.decode:
- name: /root/locustfile.py
- encoding_type: base64
- encoded_data: {{ grains['locust_file'] }}
|
Enable fail2ban whenever SSHD is configured. | include:
- ufw
openssh-client:
pkg:
- installed
ssh:
pkg.installed:
- name: openssh-server
service.running:
- enable: True
- watch:
- file: /etc/ssh/sshd_config
- pkg: ssh
require:
- group: login
/etc/ssh/sshd_config:
file.managed:
- source: salt://sshd/sshd_config
- user: root
- mode: 644
allow_ssh:
ufw.allow:
- name: '22'
- enabled: true
- require:
- pkg: ufw | include:
- ufw
- fail2ban
openssh-client:
pkg:
- installed
ssh:
pkg.installed:
- name: openssh-server
service.running:
- enable: True
- watch:
- file: /etc/ssh/sshd_config
- pkg: ssh
require:
- group: login
/etc/ssh/sshd_config:
file.managed:
- source: salt://sshd/sshd_config
- user: root
- mode: 644
allow_ssh:
ufw.allow:
- name: '22'
- enabled: true
- require:
- pkg: ufw |
Create mopidy local media directory | include:
- .pillar_check
mopidy:
pkgrepo.managed:
- name: deb http://apt.mopidy.com/ jessie main contrib non-free
- key_url: salt://mopidy/release-key.asc
pkg.installed:
- pkgs:
- mopidy
- mopidy-spotify
file.managed:
- name: /etc/mopidy/mopidy.conf
- source: salt://mopidy/mopidy.conf
- template: jinja
- user: root
- group: root
- mode: 640
- show_diff: False
firewall.append:
- chain: INPUT
- proto: tcp
- port: 6600
- match:
- comment
- comment: "mopidy: Allow MPD"
- jump: ACCEPT
| {% set mopidy = pillar.get('mopidy', {}) %}
include:
- .pillar_check
mopidy:
pkgrepo.managed:
- name: deb http://apt.mopidy.com/ jessie main contrib non-free
- key_url: salt://mopidy/release-key.asc
pkg.installed:
- pkgs:
- mopidy
- mopidy-spotify
file.managed:
- name: /etc/mopidy/mopidy.conf
- source: salt://mopidy/mopidy.conf
- template: jinja
- user: root
- group: root
- mode: 640
- show_diff: False
firewall.append:
- chain: INPUT
- proto: tcp
- port: 6600
- match:
- comment
- comment: "mopidy: Allow MPD"
- jump: ACCEPT
{% if 'local' in mopidy %}
mopidy-local-media-dir:
file.directory:
- name: {{ mopidy.local.media_dir }}
{% endif %}
|
Change state function calls to short version (which seems preferred in docs) | {% from 'openttd/openttd.jinja' import name with context %}
openttd.install:
pkg:
- latest
- name: {{ name }}
tmux:
pkg:
- installed | {% from 'openttd/openttd.jinja' import name with context %}
openttd.install:
pkg.latest
- name: {{ name }}
tmux.install:
pkg.installed:
- name: tmux |
Drop all incoming connections unless permitted | iptables-persistent:
pkg.installed: []
{% for port in 8300, 8301, 8302, 8400 %}
firewall tcp {{port}}:
iptables.append:
- chain: INPUT
- jump: ACCEPT
- dport: {{port}}
- proto: tcp
- save: true
{% endfor %}
{% for port in 8301, 8302 %}
firewall udp {{port}}:
iptables.append:
- chain: INPUT
- jump: ACCEPT
- dport: {{port}}
- proto: udp
- save: true
{% endfor %}
{% for proto in "tcp", "udp" %}
firewall dns prerouting {{proto}}:
iptables.append:
- table: nat
- chain: PREROUTING
- jump: REDIRECT
- dport: 53
- to-ports: 8600
- proto: {{proto}}
- save: true
firewall dns output {{proto}}:
iptables.append:
- table: nat
- chain: OUTPUT
- jump: REDIRECT
- destination: localhost
- dport: 53
- to-ports: 8600
- proto: {{proto}}
- save: true
{% endfor %}
| iptables-persistent:
pkg.installed: []
firewall drop incoming:
iptables.append:
- chain: INPUT
- jump: DROP
{% for port in 8300, 8301, 8302, 8400 %}
firewall tcp {{port}}:
iptables.append:
- chain: INPUT
- jump: ACCEPT
- dport: {{port}}
- proto: tcp
- save: true
{% endfor %}
{% for port in 8301, 8302 %}
firewall udp {{port}}:
iptables.append:
- chain: INPUT
- jump: ACCEPT
- dport: {{port}}
- proto: udp
- save: true
{% endfor %}
{% for proto in "tcp", "udp" %}
firewall dns prerouting {{proto}}:
iptables.append:
- table: nat
- chain: PREROUTING
- jump: REDIRECT
- dport: 53
- to-ports: 8600
- proto: {{proto}}
- save: true
firewall dns output {{proto}}:
iptables.append:
- table: nat
- chain: OUTPUT
- jump: REDIRECT
- destination: localhost
- dport: 53
- to-ports: 8600
- proto: {{proto}}
- save: true
{% endfor %}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.