file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
seenShow.js | $( document ).ready(function() {
"use strict";
firebase.auth().onAuthStateChanged(firebaseUser =>{
if (firebaseUser){
var rootRef = firebase.database().ref().child("users").child(firebaseUser.uid).child("movies").child("seen");
rootRef.on("child_added", snap =>{
var movieId = snap.child("movieId").val();
var moviename = snap.child("moviename").val();
var poster = snap.child("poster").val();
var posterPre = "https://image.tmdb.org/t/p/w500";
$("#seenList").append("<div class='col-md-2 col-xs-6'><div class='movieVast'><a href='detail'><img class='posterSize movieId' alt='"+movieId+"' src='"+poster + "'/><p class='textVast'>"+ moviename +"</p></a></div></div>");
$(".col-md-2:has(img[alt=null])").remove();
//wanneer er geklikt wordt op een .movieId --> sessionstorage van alt-waarde
$(".movieId").on("click", function(){
var movieId = $(this).attr("alt");
sessionStorage.setItem("movieId", movieId);
});
});
} | });
}); | random_line_split | |
seenShow.js | $( document ).ready(function() {
"use strict";
firebase.auth().onAuthStateChanged(firebaseUser =>{
if (firebaseUser) |
});
}); | {
var rootRef = firebase.database().ref().child("users").child(firebaseUser.uid).child("movies").child("seen");
rootRef.on("child_added", snap =>{
var movieId = snap.child("movieId").val();
var moviename = snap.child("moviename").val();
var poster = snap.child("poster").val();
var posterPre = "https://image.tmdb.org/t/p/w500";
$("#seenList").append("<div class='col-md-2 col-xs-6'><div class='movieVast'><a href='detail'><img class='posterSize movieId' alt='"+movieId+"' src='"+poster + "'/><p class='textVast'>"+ moviename +"</p></a></div></div>");
$(".col-md-2:has(img[alt=null])").remove();
//wanneer er geklikt wordt op een .movieId --> sessionstorage van alt-waarde
$(".movieId").on("click", function(){
var movieId = $(this).attr("alt");
sessionStorage.setItem("movieId", movieId);
});
});
} | conditional_block |
bootstrap-datepicker.th.js | /**
* Thai translation for bootstrap-datepicker
* Suchau Jiraprapot <seroz24@gmail.com>
*/
;(function ($) { | $.fn.datepicker.dates['th'] = {
days: ["อาทิตย์", "จันทร์", "อังคาร", "พุธ", "พฤหัส", "ศุกร์", "เสาร์", "อาทิตย์"],
daysShort: ["อา", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"],
daysMin: ["อา", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"],
months: ["มกราคม", "กุมภาพันธ์", "มีนาคม", "เมษายน", "พฤษภาคม", "มิถุนายน", "กรกฎาคม", "สิงหาคม", "กันยายน", "ตุลาคม", "พฤศจิกายน", "ธันวาคม"],
monthsShort: ["ม.ค.", "ก.พ.", "มี.ค.", "เม.ย.", "พ.ค.", "มิ.ย.", "ก.ค.", "ส.ค.", "ก.ย.", "ต.ค.", "พ.ย.", "ธ.ค."],
today: "วันนี้"
};
}(jQuery)); | random_line_split | |
app.js | /* -----------------------------------------------
/* How to use? : Check the GitHub README
/* ----------------------------------------------- */
/* To load a config file (particles.json) you need to host this demo (MAMP/WAMP/local)... */
/*
particlesJS.load('particles-js', 'particles.json', function() {
console.log('particles.js loaded - callback');
});
*/
/* Otherwise just put the config content (json): */
particlesJS('particles-js',
{
"particles": {
"number": {
"value": 80,
"density": {
"enable": true,
"value_area": 800
}
},
"color": {
"value": "#888"
},
"shape": {
"type": "circle",
"stroke": {
"width": 0,
"color": "#000000"
},
"polygon": {
"nb_sides": 5
},
"image": {
"src": "img/github.svg",
"width": 100,
"height": 100
}
},
"opacity": {
"value": 0.5,
"random": false,
"anim": {
"enable": false,
"speed": 1,
"opacity_min": 0.1,
"sync": false
}
},
"size": {
"value": 5,
"random": true,
"anim": {
"enable": false,
"speed": 40, | "enable": true,
"distance": 150,
"color": "#777",
"opacity": 0.4,
"width": 1
},
"move": {
"enable": true,
"speed": 6,
"direction": "none",
"random": false,
"straight": false,
"out_mode": "out",
"attract": {
"enable": false,
"rotateX": 600,
"rotateY": 1200
}
}
},
"interactivity": {
"detect_on": "canvas",
"events": {
"onhover": {
"enable": true,
"mode": "repulse"
},
"onclick": {
"enable": true,
"mode": "push"
},
"resize": true
},
"modes": {
"grab": {
"distance": 400,
"line_linked": {
"opacity": 1
}
},
"bubble": {
"distance": 400,
"size": 40,
"duration": 2,
"opacity": 8,
"speed": 3
},
"repulse": {
"distance": 200
},
"push": {
"particles_nb": 4
},
"remove": {
"particles_nb": 2
}
}
},
"retina_detect": true,
"config_demo": {
"hide_card": false,
"background_color": "#b61924",
"background_image": "",
"background_position": "50% 50%",
"background_repeat": "no-repeat",
"background_size": "cover"
}
}
); | "size_min": 0.1,
"sync": false
}
},
"line_linked": { | random_line_split |
pipeline-hetero-lr-normal.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_logistic_regression import common_tools
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
|
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| if isinstance(config, str):
config = load_job_config(config)
lr_param = {
"name": "hetero_lr_0",
"penalty": "L2",
"optimizer": "rmsprop",
"tol": 0.0001,
"alpha": 0.01,
"max_iter": 30,
"early_stop": "diff",
"batch_size": 320,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"sqn_param": {
"update_interval_L": 3,
"memory_M": 5,
"sample_size": 5000,
"random_seed": None
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"callback_param": {
"callbacks": ["ModelCheckpoint"],
"save_freq": "epoch"
}
}
pipeline = common_tools.make_normal_dsl(config, namespace, lr_param)
# dsl_json = predict_pipeline.get_predict_dsl()
# conf_json = predict_pipeline.get_predict_conf()
# import json
# json.dump(dsl_json, open('./hetero-lr-normal-predict-dsl.json', 'w'), indent=4)
# json.dump(conf_json, open('./hetero-lr-normal-predict-conf.json', 'w'), indent=4)
# fit model
pipeline.fit()
# query component summary
common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary())
common_tools.prettify(pipeline.get_component("evaluation_0").get_summary()) | identifier_body |
pipeline-hetero-lr-normal.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_logistic_regression import common_tools
from pipeline.utils.tools import load_job_config
def | (config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
lr_param = {
"name": "hetero_lr_0",
"penalty": "L2",
"optimizer": "rmsprop",
"tol": 0.0001,
"alpha": 0.01,
"max_iter": 30,
"early_stop": "diff",
"batch_size": 320,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"sqn_param": {
"update_interval_L": 3,
"memory_M": 5,
"sample_size": 5000,
"random_seed": None
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"callback_param": {
"callbacks": ["ModelCheckpoint"],
"save_freq": "epoch"
}
}
pipeline = common_tools.make_normal_dsl(config, namespace, lr_param)
# dsl_json = predict_pipeline.get_predict_dsl()
# conf_json = predict_pipeline.get_predict_conf()
# import json
# json.dump(dsl_json, open('./hetero-lr-normal-predict-dsl.json', 'w'), indent=4)
# json.dump(conf_json, open('./hetero-lr-normal-predict-conf.json', 'w'), indent=4)
# fit model
pipeline.fit()
# query component summary
common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary())
common_tools.prettify(pipeline.get_component("evaluation_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| main | identifier_name |
pipeline-hetero-lr-normal.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_logistic_regression import common_tools
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
|
lr_param = {
"name": "hetero_lr_0",
"penalty": "L2",
"optimizer": "rmsprop",
"tol": 0.0001,
"alpha": 0.01,
"max_iter": 30,
"early_stop": "diff",
"batch_size": 320,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"sqn_param": {
"update_interval_L": 3,
"memory_M": 5,
"sample_size": 5000,
"random_seed": None
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"callback_param": {
"callbacks": ["ModelCheckpoint"],
"save_freq": "epoch"
}
}
pipeline = common_tools.make_normal_dsl(config, namespace, lr_param)
# dsl_json = predict_pipeline.get_predict_dsl()
# conf_json = predict_pipeline.get_predict_conf()
# import json
# json.dump(dsl_json, open('./hetero-lr-normal-predict-dsl.json', 'w'), indent=4)
# json.dump(conf_json, open('./hetero-lr-normal-predict-conf.json', 'w'), indent=4)
# fit model
pipeline.fit()
# query component summary
common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary())
common_tools.prettify(pipeline.get_component("evaluation_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| config = load_job_config(config) | conditional_block |
pipeline-hetero-lr-normal.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_logistic_regression import common_tools
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
lr_param = {
"name": "hetero_lr_0",
"penalty": "L2",
"optimizer": "rmsprop",
"tol": 0.0001,
"alpha": 0.01,
"max_iter": 30,
"early_stop": "diff",
"batch_size": 320,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"sqn_param": {
"update_interval_L": 3,
"memory_M": 5,
"sample_size": 5000,
"random_seed": None
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"callback_param": {
"callbacks": ["ModelCheckpoint"],
"save_freq": "epoch"
}
}
pipeline = common_tools.make_normal_dsl(config, namespace, lr_param)
# dsl_json = predict_pipeline.get_predict_dsl()
# conf_json = predict_pipeline.get_predict_conf()
# import json
# json.dump(dsl_json, open('./hetero-lr-normal-predict-dsl.json', 'w'), indent=4)
# json.dump(conf_json, open('./hetero-lr-normal-predict-conf.json', 'w'), indent=4)
# fit model
pipeline.fit()
# query component summary
common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary())
common_tools.prettify(pipeline.get_component("evaluation_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main() | #
# http://www.apache.org/licenses/LICENSE-2.0
# | random_line_split |
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc; | use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message {
self.counts.borrow_mut().update(time, data.len() as i64);
}
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn new(pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
}
} | random_line_split | |
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc;
use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message |
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn new(pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
}
}
| {
self.counts.borrow_mut().update(time, data.len() as i64);
} | conditional_block |
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc;
use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message {
self.counts.borrow_mut().update(time, data.len() as i64);
}
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn | (pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
}
}
| new | identifier_name |
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc;
use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message {
self.counts.borrow_mut().update(time, data.len() as i64);
}
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn new(pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) |
}
| {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
} | identifier_body |
gr-change-status_test.ts | /**
* @license
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0 | *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import '../../../test/common-test-setup-karma';
import {createChange} from '../../../test/test-data-generators';
import './gr-change-status';
import {ChangeStates, GrChangeStatus, WIP_TOOLTIP} from './gr-change-status';
import {GerritNav} from '../../core/gr-navigation/gr-navigation';
import {MERGE_CONFLICT_TOOLTIP} from './gr-change-status';
const basicFixture = fixtureFromElement('gr-change-status');
const PRIVATE_TOOLTIP =
'This change is only visible to its owner and ' +
'current reviewers (or anyone with "View Private Changes" permission).';
suite('gr-change-status tests', () => {
let element: GrChangeStatus;
setup(() => {
element = basicFixture.instantiate();
});
test('WIP', () => {
element.status = ChangeStates.WIP;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'Work in Progress'
);
assert.equal(element.tooltipText, WIP_TOOLTIP);
assert.isTrue(element.classList.contains('wip'));
});
test('WIP flat', () => {
element.flat = true;
element.status = ChangeStates.WIP;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'WIP'
);
assert.isDefined(element.tooltipText);
assert.isTrue(element.classList.contains('wip'));
assert.isTrue(element.hasAttribute('flat'));
});
test('merged', () => {
element.status = ChangeStates.MERGED;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'Merged'
);
assert.equal(element.tooltipText, '');
assert.isTrue(element.classList.contains('merged'));
assert.isFalse(
element.showResolveIcon([{url: 'http://google.com'}], ChangeStates.MERGED)
);
});
test('abandoned', () => {
element.status = ChangeStates.ABANDONED;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'Abandoned'
);
assert.equal(element.tooltipText, '');
assert.isTrue(element.classList.contains('abandoned'));
});
test('merge conflict', () => {
const status = ChangeStates.MERGE_CONFLICT;
element.status = status;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'Merge Conflict'
);
assert.equal(element.tooltipText, MERGE_CONFLICT_TOOLTIP);
assert.isTrue(element.classList.contains('merge-conflict'));
assert.isFalse(element.hasStatusLink(undefined, [], status));
assert.isFalse(element.showResolveIcon([], status));
});
test('merge conflict with resolve link', () => {
const status = ChangeStates.MERGE_CONFLICT;
const url = 'http://google.com';
const weblinks = [{url}];
assert.isTrue(element.hasStatusLink(undefined, weblinks, status));
assert.equal(element.getStatusLink(undefined, weblinks, status), url);
assert.isTrue(element.showResolveIcon(weblinks, status));
});
test('reverted change', () => {
const url = 'http://google.com';
const status = ChangeStates.REVERT_SUBMITTED;
const revertedChange = createChange();
sinon.stub(GerritNav, 'getUrlForSearchQuery').returns(url);
assert.isTrue(element.hasStatusLink(revertedChange, [], status));
assert.equal(element.getStatusLink(revertedChange, [], status), url);
});
test('private', () => {
element.status = ChangeStates.PRIVATE;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'Private'
);
assert.equal(element.tooltipText, PRIVATE_TOOLTIP);
assert.isTrue(element.classList.contains('private'));
});
test('active', () => {
element.status = ChangeStates.ACTIVE;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'Active'
);
assert.equal(element.tooltipText, '');
assert.isTrue(element.classList.contains('active'));
});
test('ready to submit', () => {
element.status = ChangeStates.READY_TO_SUBMIT;
flush();
assert.equal(
element.shadowRoot!.querySelector<HTMLDivElement>('.chip')!.innerText,
'Ready to submit'
);
assert.equal(element.tooltipText, '');
assert.isTrue(element.classList.contains('ready-to-submit'));
});
test('updating status removes the previous class', () => {
element.status = ChangeStates.PRIVATE;
flush();
assert.isTrue(element.classList.contains('private'));
assert.isFalse(element.classList.contains('wip'));
element.status = ChangeStates.WIP;
flush();
assert.isFalse(element.classList.contains('private'));
assert.isTrue(element.classList.contains('wip'));
});
}); | random_line_split | |
users.js | import resource from 'resource-router-middleware';
import model from '../models/users';
export default ({ config, db }) => resource({
/** Property name to store preloaded entity on `request`. */
id : 'user',
mergeParams: true,
/** For requests with an `id`, you can auto-load the entity.
* Errors terminate the request, success sets `req[id] = data`.
*/
load(req, id, callback) {
let query = {
select: '-_id -__v'
};
model.findOne({id: id}, query.select, function(err, record) {
let error = record ? null : 'Not found';
callback(error, record);
});
},
/** GET / - List all entities */
index(params, res) {
let urlQuery = {};
let searchable = ['fullName', 'email', 'workPhone', 'personalPhone'];
params.query.select = {
'_id': 0,
'id': 1,
'fullName': 1,
'email': 1,
'workPhone': 1,
'personalPhone': 1
};
if(!params.query.active || params.query.active === 'false') {
urlQuery.active = false;
} else {
urlQuery.active = true;
}
if(!params.query.limit) {
params.query.limit = config.pagination;
}
if(!params.query.sort) {
params.query.sort = 'id';
}
if(params.query.sort) {
params.query.sort = params.query.sort;
}
for(var key in params.query) {
if(!isNaN(parseFloat((params.query[key])))) {
params.query[key] = parseInt(params.query[key]);
}
}
if(params.query.find) {
let parsed = JSON.parse(params.query.find);
let queryArray = [];
for ( let property in parsed ) {
if( (searchable.indexOf(property) > -1) ) {
let userSearch = new RegExp(parsed[property], 'i');
let tempObj = {};
tempObj[property] = userSearch;
queryArray.push(tempObj);
}
}
//we ensure we only search for proper active/inactive records
let activeQuery = {
'active': urlQuery.active
}
queryArray.push(activeQuery);
urlQuery = {
$and: queryArray,
};
}
model.paginate( urlQuery ,params.query, function(err, result) {
if(err) {
console.log(err);
res.sendStatus(500);
}
res.json(result);
});
},
/** POST / - Create a new entity */
create(req, res) {
console.log(req.body);
let newModel = new model(req.body);
newModel.save((err, user) => {
if(err) {
console.error(err);
if (err.name === 'MongoError' && err.code === 11000) {
// Duplicate username
return res.status(400).send({ success: false, message: 'Email is not unique' });
}
return res.status(400).send({ success: false, message: err });
} else {
return res.status(200).send({success: true});
}
});
},
/** GET /:id - Return a given entity */
read( record , res) {
res.status(200).json(record['user']);
},
/** PUT /:id - Update a given entity */
update({ facet, body }, res) {
for (let key in body) {
if (key!=='id') {
facet[key] = body[key];
}
}
res.sendStatus(204);
},
/** DELETE /:id - Delete a given entity */
| ({ facet }, res) {
facets.splice(facets.indexOf(facet), 1);
res.sendStatus(204);
},
});
| delete | identifier_name |
users.js | import resource from 'resource-router-middleware';
import model from '../models/users';
export default ({ config, db }) => resource({
/** Property name to store preloaded entity on `request`. */
id : 'user',
mergeParams: true,
/** For requests with an `id`, you can auto-load the entity.
* Errors terminate the request, success sets `req[id] = data`.
*/
load(req, id, callback) {
let query = {
select: '-_id -__v'
};
model.findOne({id: id}, query.select, function(err, record) {
let error = record ? null : 'Not found';
callback(error, record);
});
},
/** GET / - List all entities */
index(params, res) {
let urlQuery = {};
let searchable = ['fullName', 'email', 'workPhone', 'personalPhone'];
params.query.select = {
'_id': 0,
'id': 1,
'fullName': 1,
'email': 1,
'workPhone': 1,
'personalPhone': 1
};
if(!params.query.active || params.query.active === 'false') {
urlQuery.active = false;
} else {
urlQuery.active = true;
}
if(!params.query.limit) |
if(!params.query.sort) {
params.query.sort = 'id';
}
if(params.query.sort) {
params.query.sort = params.query.sort;
}
for(var key in params.query) {
if(!isNaN(parseFloat((params.query[key])))) {
params.query[key] = parseInt(params.query[key]);
}
}
if(params.query.find) {
let parsed = JSON.parse(params.query.find);
let queryArray = [];
for ( let property in parsed ) {
if( (searchable.indexOf(property) > -1) ) {
let userSearch = new RegExp(parsed[property], 'i');
let tempObj = {};
tempObj[property] = userSearch;
queryArray.push(tempObj);
}
}
//we ensure we only search for proper active/inactive records
let activeQuery = {
'active': urlQuery.active
}
queryArray.push(activeQuery);
urlQuery = {
$and: queryArray,
};
}
model.paginate( urlQuery ,params.query, function(err, result) {
if(err) {
console.log(err);
res.sendStatus(500);
}
res.json(result);
});
},
/** POST / - Create a new entity */
create(req, res) {
console.log(req.body);
let newModel = new model(req.body);
newModel.save((err, user) => {
if(err) {
console.error(err);
if (err.name === 'MongoError' && err.code === 11000) {
// Duplicate username
return res.status(400).send({ success: false, message: 'Email is not unique' });
}
return res.status(400).send({ success: false, message: err });
} else {
return res.status(200).send({success: true});
}
});
},
/** GET /:id - Return a given entity */
read( record , res) {
res.status(200).json(record['user']);
},
/** PUT /:id - Update a given entity */
update({ facet, body }, res) {
for (let key in body) {
if (key!=='id') {
facet[key] = body[key];
}
}
res.sendStatus(204);
},
/** DELETE /:id - Delete a given entity */
delete({ facet }, res) {
facets.splice(facets.indexOf(facet), 1);
res.sendStatus(204);
},
});
| {
params.query.limit = config.pagination;
} | conditional_block |
users.js | import resource from 'resource-router-middleware';
import model from '../models/users';
export default ({ config, db }) => resource({
/** Property name to store preloaded entity on `request`. */
id : 'user',
mergeParams: true,
/** For requests with an `id`, you can auto-load the entity.
* Errors terminate the request, success sets `req[id] = data`.
*/
load(req, id, callback) {
let query = {
select: '-_id -__v'
};
model.findOne({id: id}, query.select, function(err, record) {
let error = record ? null : 'Not found';
callback(error, record);
});
},
/** GET / - List all entities */
index(params, res) {
let urlQuery = {};
let searchable = ['fullName', 'email', 'workPhone', 'personalPhone'];
params.query.select = {
'_id': 0,
'id': 1,
'fullName': 1,
'email': 1,
'workPhone': 1,
'personalPhone': 1
};
if(!params.query.active || params.query.active === 'false') {
urlQuery.active = false;
} else {
urlQuery.active = true;
}
if(!params.query.limit) {
params.query.limit = config.pagination;
}
if(!params.query.sort) {
params.query.sort = 'id';
}
if(params.query.sort) {
params.query.sort = params.query.sort;
}
for(var key in params.query) {
if(!isNaN(parseFloat((params.query[key])))) {
params.query[key] = parseInt(params.query[key]);
}
}
if(params.query.find) {
let parsed = JSON.parse(params.query.find);
let queryArray = [];
for ( let property in parsed ) { | }
}
//we ensure we only search for proper active/inactive records
let activeQuery = {
'active': urlQuery.active
}
queryArray.push(activeQuery);
urlQuery = {
$and: queryArray,
};
}
model.paginate( urlQuery ,params.query, function(err, result) {
if(err) {
console.log(err);
res.sendStatus(500);
}
res.json(result);
});
},
/** POST / - Create a new entity */
create(req, res) {
console.log(req.body);
let newModel = new model(req.body);
newModel.save((err, user) => {
if(err) {
console.error(err);
if (err.name === 'MongoError' && err.code === 11000) {
// Duplicate username
return res.status(400).send({ success: false, message: 'Email is not unique' });
}
return res.status(400).send({ success: false, message: err });
} else {
return res.status(200).send({success: true});
}
});
},
/** GET /:id - Return a given entity */
read( record , res) {
res.status(200).json(record['user']);
},
/** PUT /:id - Update a given entity */
update({ facet, body }, res) {
for (let key in body) {
if (key!=='id') {
facet[key] = body[key];
}
}
res.sendStatus(204);
},
/** DELETE /:id - Delete a given entity */
delete({ facet }, res) {
facets.splice(facets.indexOf(facet), 1);
res.sendStatus(204);
},
}); | if( (searchable.indexOf(property) > -1) ) {
let userSearch = new RegExp(parsed[property], 'i');
let tempObj = {};
tempObj[property] = userSearch;
queryArray.push(tempObj); | random_line_split |
users.js | import resource from 'resource-router-middleware';
import model from '../models/users';
export default ({ config, db }) => resource({
/** Property name to store preloaded entity on `request`. */
id : 'user',
mergeParams: true,
/** For requests with an `id`, you can auto-load the entity.
* Errors terminate the request, success sets `req[id] = data`.
*/
load(req, id, callback) {
let query = {
select: '-_id -__v'
};
model.findOne({id: id}, query.select, function(err, record) {
let error = record ? null : 'Not found';
callback(error, record);
});
},
/** GET / - List all entities */
index(params, res) | ,
/** POST / - Create a new entity */
create(req, res) {
console.log(req.body);
let newModel = new model(req.body);
newModel.save((err, user) => {
if(err) {
console.error(err);
if (err.name === 'MongoError' && err.code === 11000) {
// Duplicate username
return res.status(400).send({ success: false, message: 'Email is not unique' });
}
return res.status(400).send({ success: false, message: err });
} else {
return res.status(200).send({success: true});
}
});
},
/** GET /:id - Return a given entity */
read( record , res) {
res.status(200).json(record['user']);
},
/** PUT /:id - Update a given entity */
update({ facet, body }, res) {
for (let key in body) {
if (key!=='id') {
facet[key] = body[key];
}
}
res.sendStatus(204);
},
/** DELETE /:id - Delete a given entity */
delete({ facet }, res) {
facets.splice(facets.indexOf(facet), 1);
res.sendStatus(204);
},
});
| {
let urlQuery = {};
let searchable = ['fullName', 'email', 'workPhone', 'personalPhone'];
params.query.select = {
'_id': 0,
'id': 1,
'fullName': 1,
'email': 1,
'workPhone': 1,
'personalPhone': 1
};
if(!params.query.active || params.query.active === 'false') {
urlQuery.active = false;
} else {
urlQuery.active = true;
}
if(!params.query.limit) {
params.query.limit = config.pagination;
}
if(!params.query.sort) {
params.query.sort = 'id';
}
if(params.query.sort) {
params.query.sort = params.query.sort;
}
for(var key in params.query) {
if(!isNaN(parseFloat((params.query[key])))) {
params.query[key] = parseInt(params.query[key]);
}
}
if(params.query.find) {
let parsed = JSON.parse(params.query.find);
let queryArray = [];
for ( let property in parsed ) {
if( (searchable.indexOf(property) > -1) ) {
let userSearch = new RegExp(parsed[property], 'i');
let tempObj = {};
tempObj[property] = userSearch;
queryArray.push(tempObj);
}
}
//we ensure we only search for proper active/inactive records
let activeQuery = {
'active': urlQuery.active
}
queryArray.push(activeQuery);
urlQuery = {
$and: queryArray,
};
}
model.paginate( urlQuery ,params.query, function(err, result) {
if(err) {
console.log(err);
res.sendStatus(500);
}
res.json(result);
});
} | identifier_body |
environment.py | # -*- coding:utf-8 -*-
# @version: 1.0
# @author:
# @date: '14-4-10'
import os
import logging
import threading
from ConfigParser import ConfigParser
from ConfigParser import NoSectionError, InterpolationMissingOptionError, Error
import simplejson as json
from utils.logger import Logger
_lock = threading.RLock()
class Environment():
instance = None
def __init__(self):
self._working_path = ""
self._app_name = ""
@staticmethod
def get_instance():
if not Environment.instance:
Environment.instance = Environment()
return Environment.instance
def init_by_file_name(self, start_file_path, start_file_name, start_file_depth=1):
start_file_name = os.path.join(start_file_path, start_file_name)
self.init(start_file_name, start_file_depth)
def init(self, start_file_name, start_file_depth):
"""
初始化应用环境
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
"""
self._working_path, self._app_name = self._parse_start_file_name(
start_file_name, start_file_depth)
self._set_working_path(self._working_path)
self._init_logger()
self._configure_parser = ConfigParser()
self._is_configure_loaded = False
self._load_configure()
def get_db_setting(self, db_setting_section_name):
return self.get_configure_value(db_setting_section_name, "host"), \
self.get_configure_value(db_setting_section_name, "db"), \
self.get_configure_value(db_setting_section_name, "user"), \
self.get_configure_value(db_setting_section_name, "passwd")
def get_app_name(self):
return self._app_name
def get_working_path(self):
return self._working_path
def _get_configure_value(self, section, key):
value = None
try:
value = self._configure_parser.get(section, key)
return value
except NoSectionError, e:
logging.error(e.message)
return None
except InterpolationMissingOptionError, e:
value = e.message.split("rawval : ")
if value and len(value) > 1:
value = value[1][:-1]
else:
raise Error
return value
def get_configure_value(self, section, key, default="", value_type=str):
_lock.acquire()
value = self._get_configure_value(section, key)
_lock.release()
if value_type in [str, unicode]:
pass
elif value_type in [int, long]:
value = int(value)
elif value_type in [float]:
value = float(value)
elif value_type == json:
value = json.loads(value)
else:
pass
value = default if value is None else value
return value
def set_configure_value(self, section, key, value=""):
_lock.acquire()
if not section in self._configure_parser.sections():
self._con | ection(section)
if type(value) == dict:
value = json.dumps(value)
self._configure_parser.set(section, key, value)
with file(self._config_path, "w") as fp:
self._configure_parser.write(fp)
_lock.release()
def _parse_start_file_name(self, start_file_name, start_file_depth):
"""
解析启动文件名称和该文件深度,返回程序工作目录和程序名称
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
:return:
"""
start_file_name = start_file_name.replace("\\", "/")
file_name_parts = start_file_name.split('/')
file_name_parts.remove("")
if not file_name_parts:
logging.error(u"启动文件输入参数错误,输入的不是完整的文件名: " + start_file_name)
return
app_name = file_name_parts[-1]
if "." in app_name:
app_name = app_name[:app_name.rindex(".")]
file_name_parts = file_name_parts[:(start_file_depth) * -1]
working_dir = os.sep.join(file_name_parts)
return working_dir, app_name
def _init_logger(self, logging_file_name="logging.conf"):
log_file_whole_name = os.path.join(
self._working_path, "conf", logging_file_name)
print "Load logging file:", log_file_whole_name
Logger.load_configure(log_file_whole_name)
def _load_configure(self):
configure_file_name = os.path.join(
self._working_path, "conf", self._app_name + ".conf")
print "Load configure file:", configure_file_name
if self._is_configure_loaded:
return
if not configure_file_name:
return
self._configure_parser.read(configure_file_name)
def _set_working_path(self, work_path):
work_path = os.path.abspath(work_path)
os.chdir(work_path)
print "Set working dir:", work_path
if __name__ == "__main__":
# Environment.get_instance()._load_configure()
# print Environment.get_instance().get_configure_value("zhiShiTuPu",
# "user")
print Environment.get_instance()._parse_start_file_name(
"F:\\newgit\\nluData\\query-crawler\\crawler\\query_crawler.py", 1)
| figure_parser.add_s | identifier_name |
environment.py | # -*- coding:utf-8 -*-
# @version: 1.0
# @author:
# @date: '14-4-10'
import os
import logging
import threading
from ConfigParser import ConfigParser
from ConfigParser import NoSectionError, InterpolationMissingOptionError, Error
import simplejson as json
from utils.logger import Logger
_lock = threading.RLock()
class Environment():
instance = None
def __init__(self):
self._working_path = ""
self._app_name = ""
@staticmethod
def get_instance():
if not Environment.instance:
Environment.instance = Environment()
return Environment.instance
def init_by_file_name(self, start_file_path, start_file_name, start_file_depth=1):
start_file_name = os.path.join(start_file_path, start_file_name)
self.init(start_file_name, start_file_depth)
def init(self, start_file_name, start_file_depth):
"""
初始化应用环境
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
"""
self._working_path, self._app_name = self._parse_start_file_name(
start_file_name, start_file_depth)
self._set_working_path(self._working_path)
self._init_logger()
self._configure_parser = ConfigParser()
self._is_configure_loaded = False
self._load_configure()
def get_db_setting(self, db_setting_section_name):
return self.get_configure_value(db_setting_section_name, "host"), \
self.get_configure_value(db_setting_section_name, "db"), \
self.get_configure_value(db_setting_section_name, "user"), \
self.get_configure_value(db_setting_section_name, "passwd")
def get_app_name(self):
return self._app_name
def get_working_path(self):
return self._working_path
| def _get_configure_value(self, section, key):
value = None
try:
value = self._configure_parser.get(section, key)
return value
except NoSectionError, e:
logging.error(e.message)
return None
except InterpolationMissingOptionError, e:
value = e.message.split("rawval : ")
if value and len(value) > 1:
value = value[1][:-1]
else:
raise Error
return value
def get_configure_value(self, section, key, default="", value_type=str):
_lock.acquire()
value = self._get_configure_value(section, key)
_lock.release()
if value_type in [str, unicode]:
pass
elif value_type in [int, long]:
value = int(value)
elif value_type in [float]:
value = float(value)
elif value_type == json:
value = json.loads(value)
else:
pass
value = default if value is None else value
return value
def set_configure_value(self, section, key, value=""):
_lock.acquire()
if not section in self._configure_parser.sections():
self._configure_parser.add_section(section)
if type(value) == dict:
value = json.dumps(value)
self._configure_parser.set(section, key, value)
with file(self._config_path, "w") as fp:
self._configure_parser.write(fp)
_lock.release()
def _parse_start_file_name(self, start_file_name, start_file_depth):
"""
解析启动文件名称和该文件深度,返回程序工作目录和程序名称
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
:return:
"""
start_file_name = start_file_name.replace("\\", "/")
file_name_parts = start_file_name.split('/')
file_name_parts.remove("")
if not file_name_parts:
logging.error(u"启动文件输入参数错误,输入的不是完整的文件名: " + start_file_name)
return
app_name = file_name_parts[-1]
if "." in app_name:
app_name = app_name[:app_name.rindex(".")]
file_name_parts = file_name_parts[:(start_file_depth) * -1]
working_dir = os.sep.join(file_name_parts)
return working_dir, app_name
def _init_logger(self, logging_file_name="logging.conf"):
log_file_whole_name = os.path.join(
self._working_path, "conf", logging_file_name)
print "Load logging file:", log_file_whole_name
Logger.load_configure(log_file_whole_name)
def _load_configure(self):
configure_file_name = os.path.join(
self._working_path, "conf", self._app_name + ".conf")
print "Load configure file:", configure_file_name
if self._is_configure_loaded:
return
if not configure_file_name:
return
self._configure_parser.read(configure_file_name)
def _set_working_path(self, work_path):
work_path = os.path.abspath(work_path)
os.chdir(work_path)
print "Set working dir:", work_path
if __name__ == "__main__":
# Environment.get_instance()._load_configure()
# print Environment.get_instance().get_configure_value("zhiShiTuPu",
# "user")
print Environment.get_instance()._parse_start_file_name(
"F:\\newgit\\nluData\\query-crawler\\crawler\\query_crawler.py", 1) | random_line_split | |
environment.py | # -*- coding:utf-8 -*-
# @version: 1.0
# @author:
# @date: '14-4-10'
import os
import logging
import threading
from ConfigParser import ConfigParser
from ConfigParser import NoSectionError, InterpolationMissingOptionError, Error
import simplejson as json
from utils.logger import Logger
_lock = threading.RLock()
class Environment():
instance = None
def __init__(self):
self._working_path = ""
self._app_name = ""
@staticmethod
def get_instance():
if not Environment.instance:
Environment.instance = Environment()
return Environment.instance
def init_by_file_name(self, start_file_path, start_file_name, start_file_depth=1):
start_file_name = os.path.join(start_file_path, start_file_name)
self.init(start_file_name, start_file_depth)
def init(self, start_file_name, start_file_depth):
"""
初始化应用环境
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
"""
self._working_path, self._app_name = self._parse_start_file_name(
start_file_name, start_file_depth)
self._set_working_path(self._working_path)
self._init_logger()
self._configure_parser = ConfigParser()
self._is_configure_loaded = False
self._load_configure()
def get_db_setting(self, db_setting_section_name):
return self.get_configure_value(db_setting_section_name, "host"), \
self.get_configure_value(db_setting_section_name, "db"), \
self.get_configure_value(db_setting_section_name, "user"), \
self.get_configure_value(db_setting_section_name, "passwd")
def get_app_name(self):
return self._app_name
def get_working_path(self):
return self._working_path
def _get_configure_value(self, section, key):
value = None
try:
value = self._configure_parser.get(section, key)
return value
except NoSectionError, e:
logging.error(e.message)
return None
except InterpolationMissingOptionError, e:
value = e.message.split("rawval : ")
if value and len(value) > 1:
value = value[1][:-1]
else:
raise Error
return value
def get_configure_value(self, section, key, default="", value_type=str):
_lock.acquire()
value = self._get_configure_value(section, key)
_lock.release()
if value_type in [str, unicode]:
pass
elif value_type in [int, long]:
value = int(value)
elif value_type in [float]:
value = float(value)
elif value_type == json:
value = json.loads(value)
else:
pass
value = default if value is None else value
return value
def set_configure_value(self, section, key, value=""):
_lock.acquire()
if not section in self._configure_parser.sections():
self._configure_parser.add_section(section)
if type(value) == dict:
value = json.dumps(value)
self._configure_parser.set(section, key, value)
with file(self._config_path, "w") as fp:
self._configure_parser.write(fp)
_lock.release()
def _parse_start_file_name(self, start_file_name, start_file_depth):
"""
解析启动文件名称和该文件深度,返回程序工作目录和程序名称
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
:return:
"""
start_file_name = start_file_name.replace("\\", "/")
file_name_parts = start_file_name.split('/')
file_name_parts.remove("")
if not file_name_parts:
logging.error(u"启动文件输入参数错误,输入的不是完整的文件名: " + start_file_name)
return
app_name = file_name_parts[-1]
if "." in app_name:
app_name = app_name[:app_name.rindex(".")]
file_name_parts = file_name_parts[:(start_file_depth) * -1]
working_dir = os.sep.join(file_name_parts)
return working_dir, app_name
def _init_logger(self, logging_file_name="logging.conf"):
log_file_whole_name = os.path.join(
self._working_path, "conf", logging_file_name)
print "Load logging file:", log_file_whole_name
Logger.load_configure(log_file_whole_name)
def _load_configure(self):
configure_file_name = os.path.join(
self._working_path, "conf", self._app_name + ".conf")
print "Load configure file:", configure_file_name
if self._is_configure_loaded:
return
if not configure_file_name:
return
self._configure_parser.read(configure_file_name)
def _set_working_path(self, work_path):
work_path = os.path.abspath(work_path)
os.chdir(work_path)
print "Set working dir:", work_path
if __name__ == "__main__":
# Environment.get_instance()._load_configure()
# print Environment.get_instance().get_configure_value("zhiShiTuPu",
# "user")
print Environment.get_instance()._parse_start_file_name(
"F:\\newgit\\nluData\\query-crawler\\crawler\\que | ry_crawler.py", 1)
| identifier_body | |
environment.py | # -*- coding:utf-8 -*-
# @version: 1.0
# @author:
# @date: '14-4-10'
import os
import logging
import threading
from ConfigParser import ConfigParser
from ConfigParser import NoSectionError, InterpolationMissingOptionError, Error
import simplejson as json
from utils.logger import Logger
_lock = threading.RLock()
class Environment():
instance = None
def __init__(self):
self._working_path = ""
self._app_name = ""
@staticmethod
def get_instance():
if not Environment.instance:
Environment.instance = Environment()
return Environment.instance
def init_by_file_name(self, start_file_path, start_file_name, start_file_depth=1):
start_file_name = os.path.join(start_file_path, start_file_name)
self.init(start_file_name, start_file_depth)
def init(self, start_file_name, start_file_depth):
"""
初始化应用环境
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
"""
self._working_path, self._app_name = self._parse_start_file_name(
start_file_name, start_file_depth)
self._set_working_path(self._working_path)
self._init_logger()
self._configure_parser = ConfigParser()
self._is_configure_loaded = False
self._load_configure()
def get_db_setting(self, db_setting_section_name):
return self.get_configure_value(db_setting_section_name, "host"), \
self.get_configure_value(db_setting_section_name, "db"), \
self.get_configure_value(db_setting_section_name, "user"), \
self.get_configure_value(db_setting_section_name, "passwd")
def get_app_name(self):
return self._app_name
def get_working_path(self):
return self._working_path
def _get_configure_value(self, section, key):
value = None
try:
value = self._configure_parser.get(section, key)
return value
except NoSectionError, e:
logging.error(e.message)
return None
except InterpolationMissingOptionError, e:
value = e.message.split("rawval : ")
if value and len(value) > 1:
value = value[1][:-1]
else:
raise Error
return value
def get_configure_value(self, section, key, default="", value_type=str):
_lock.acquire()
value = self._get_configure_value(section, key)
_lock.release()
if value_type in [str, unicode]:
pass
elif value_type in [int, long]:
value = int(value)
elif value_type in [float]:
value = float(value)
elif value_type == json:
value = json.loads(value)
else:
pass
value = default if value is None else value
return value
def set_configure_value(self, section, key, value=""):
_lock.acquire()
if not section in self._configure_parser.sections():
self._configure_parser.add_section(section)
if type(value) == dict:
value = json.dumps(value)
self._configure_parser.set(section, key, value)
with file(self._config_path, "w") as fp:
self._configure_parser.write(fp)
_lock.release()
def _parse_start_file_name(self, start_file_name, start_file_depth):
"""
解析启动文件名称和该文件深度,返回程序工作目录和程序名称
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
:return:
"""
start_file_name = start_file_name.replace("\\", "/")
file_name_parts = start_file_name.split('/')
file_name_parts.remove("")
if not file_name_parts:
logging.error(u"启动文件输入参数错误,输入的不是完整的文件名: " + start_file_name)
return
app_name = file_name_parts[-1]
if "." in app_name:
app_name = app_name[:app_name.rindex(".")]
file_name_parts = file_name_parts[:(start_file_depth) * -1]
working_dir = os.sep.join(file_name_parts)
return working_dir, app_name
def _init_logger(self, logging_file_name="logging.conf"):
log_file_whole_name = os.path.join(
self._working_path, "conf", logging_file_name)
print "Load logging file:", log_file_whole_name
Logger.load_configure(log_file_whole_name)
def _load_configure(self):
configure_file_name = os.path.join(
self._working_path, "conf", self._app_name + ".conf")
print "Load configure file:", configure_file_name
if self._is_configure_loaded:
return
if not configure_file_name:
return
self._configure_parser.read(configure_file_name)
def _set_working_path(self, work_path):
work_path = os.path.abspath(work_path)
os.chdir(work_path)
print "Set working dir:", work_path
if __name__ == "__main__":
# Environment.get_instance()._load_configure()
# print Environment.get_instance().get_configure_value("zhiShiTuPu",
# "user") | print Environment.get_instance()._parse_start_file_name(
"F:\\newgit\\nluData\\query-crawler\\crawler\\query_crawler.py", 1)
| conditional_block | |
_pgsql.py | # The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
# http://initd.org/psycopg/docs/
import psycopg2
import select
import logger
import adm
import re
import threading
from wh import xlt, modPath
from Crypto.PublicKey._slowmath import rsa_construct
sqlKeywords=[]
moreKeywords=['serial', 'bigserial']
colKeywords=[]
def getSqlKeywords():
global colKeywords
global sqlKeywords
if not sqlKeywords:
f=open(modPath("kwlist.h", __name__))
lines=f.read()
f.close()
for line in lines.splitlines():
if line.startswith("PG_KEYWORD("):
tokens=line.split(',')
keyword=tokens[0][12:-1].lower()
# RESERVED, UNRESERVED, TYPE_FUNC_NAME, COL_NAME
if tokens[2].lstrip().startswith('COL_NAME'):
colKeywords.append(keyword)
else:
sqlKeywords.append(keyword)
colKeywords.extend(moreKeywords)
return sqlKeywords
identMatchPattern=re.compile("^[a-z][a-z0-9_]+$")
def quoteIdent(ident):
if identMatchPattern.match(ident) and ident not in getSqlKeywords():
return ident
return '"%s"' % ident.replace('"', '""')
def quoteValue(val, conn=None):
if isinstance(val, unicode): # psycopg2 quoting has some problems with unicode
return "'%s'" % val.replace("'", "''").replace("\\", "\\\\")
adapter=psycopg2.extensions.adapt(val)
if conn and hasattr(adapter, 'prepare'):
if isinstance(conn, pgConnection):
|
elif isinstance(conn, pgCursor):
conn=conn.conn.conn
adapter.prepare(conn)
return adapter.getquoted()
class SqlException(adm.ServerException):
def __init__(self, sql, error):
logger.querylog(sql, error=error)
self.error=error
self.sql=sql
Exception.__init__(self, sql, error)
def __str__(self):
return self.error
######################################################################
class pgType:
def __init__(self, row):
self.oid=row['oid']
self.name=row['typname']
self.namespace=row['nspname']
self.category=row['typcategory']
def IsNumeric(self):
return self.category == 'N'
class pgTypeCache:
def __init__(self, rowset):
self.cache={}
self.Add(rowset)
def Add(self, rowset):
if not isinstance(rowset, pgRowset):
rowset=[rowset]
typ=None
for row in rowset:
typ=pgType(row)
self.cache[typ.oid] = typ
return typ
def Get(self, oid):
return self.cache.get(oid)
######################################################################
class pgCursorResult:
def __init__(self, cursor, colNames=None):
self.cursor=cursor
if colNames:
self.colNames=colNames
else:
self.colNames=[]
for d in cursor.GetDescription():
self.colNames.append(d.name)
class pgRow(pgCursorResult):
def __init__(self, cursor, row, colNames=None):
pgCursorResult.__init__(self, cursor, colNames)
self.row=row
def getTuple(self):
return tuple(self.getList())
def getList(self):
l=[]
for i in range(len(self.colNames)):
l.append(self.getItem(i))
return l
def getDict(self):
d={}
for i in range(len(self.colNames)):
item=self.getItem(i)
# aggregate functions deliver [None] with empty left joins; we want []
if isinstance(item, list) and len(item) == 1 and item[0] == None:
item=[]
d[self.colNames[i]] = item
return d
def __str__(self):
cols=[]
for i in range(len(self.colNames)):
val=unicode(self.getItem(i))
cols.append("%s=%s" % (self.colNames[i], val))
return "( %s )" % ",".join(cols)
def hasAttr(self, colName):
try:
self.colNames.index(colName)
return True
except:
return False
def getItem(self, i):
val=self.row[i]
if isinstance(val, str):
return val.decode('utf8')
return val
def __getitem__(self, colName):
try:
if isinstance(colName, (str, unicode)):
i=self.colNames.index(colName)
else:
i=colName
return self.getItem(i)
except Exception as _e:
logger.debug("Column %s not found" % colName)
return None
class pgRowset(pgCursorResult):
def __init__(self, cursor):
pgCursorResult.__init__(self, cursor)
self.__fetchone()
def GetRowcount(self):
return self.cursor.GetRowcount()
def __fetchone(self):
if self.cursor.GetRowcount() > 0:
row = self.cursor.FetchOne()
else:
row=None
if row:
self.curRow = pgRow(self.cursor, row, self.colNames)
else:
self.curRow=None
def HasMore(self):
return self.curRow != None
def Next(self):
row=self.curRow
if row:
self.__fetchone()
return row
def getDict(self):
d={}
for row in self:
d[row[0]] = row.getDict()
return d
def getDictList(self):
d=[]
for row in self:
d.append(row.getDict())
return d
def getList(self):
d=[]
for row in self:
d.append(row[0])
return d
def __iter__(self):
class RowsetIterator:
def __init__(self, outer):
self.outer=outer
def __iter__(self):
return self
def next(self):
row=self.outer.Next()
if row:
return row
else:
raise StopIteration()
return RowsetIterator(self)
######################################################################
class pgConnection:
def __init__(self, dsn, pool=None):
self.pool=pool
self.conn=None
self.cursor=None
self.inUse=False
self.lastError=None
self.trapSqlException=True
self.conn=psycopg2.connect(dsn, async=True)
self.wait("Connect")
self.cursor=self.conn.cursor()
def disconnect(self):
self.cursor=None
if self.conn:
self.conn.close()
self.conn=None
if self.pool:
self.pool.RemoveConnection(self)
def wait(self, spot=""):
if self.conn.async:
while self.conn.isexecuting():
try:
state = self.conn.poll()
except Exception as e:
self._handleException(e)
return False
if state == psycopg2.extensions.POLL_OK:
return True
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [self.conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([self.conn.fileno()], [], [])
else:
raise adm.ConnectionException(self.node, xlt("WAIT %s" % spot), self.lastError)
return False
def _handleException(self, exception):
if self.cursor and self.cursor.query:
cmd=self.cursor.query
else:
cmd=None
exception.message=errlines=exception.message.decode('utf8')
logger.querylog(cmd, error=errlines)
if self.trapSqlException:
self.lastError=errlines
if self.pool:
self.pool.lastError=errlines
adm.StopWaiting(adm.mainframe)
if self.conn and self.conn.closed:
self.disconnect()
if self.trapSqlException:
raise SqlException(cmd, errlines)
else:
raise exception
def isRunning(self):
return self.conn.poll() != psycopg2.extensions.POLL_OK
def GetCursor(self):
return pgCursor(self)
######################################################################
class pgCursor():
def __init__(self, conn):
conn.trapSqlException=True
self.conn=conn
self.cursor=self.conn.cursor
def __del__(self):
self.Close()
def SetThrowSqlException(self, how):
"""
SetThrowSqlException(bool)
If set to false, will throw psycopg exception instead of SqlException.
Use this to catch expected exception without GUI display
"""
self.conn.trapSqlException=how
def Close(self):
if self.conn:
# logger.trace(2, 4, "RELEASING %s", str(self.conn))
self.conn.inUse=False
self.conn=None
self.cursor=None
def GetPid(self):
return self.conn.conn.get_backend_pid()
def Quote(self, val):
return quoteValue(val, self)
def GetDescription(self):
if self.cursor.description:
return self.cursor.description
return []
def GetRowcount(self):
return self.cursor.rowcount
def FetchOne(self):
row=self.cursor.fetchone()
return row
# def Rollback(self):
# self.cursor.execute("ROLLBACK")
# self.cursor.wait("ROLLBACK")
#
# def Commit(self):
# self.cursor.execute("COMMIT")
# self.cursor.wait("COMMIT")
def execute(self, cmd, args=None):
if args:
if isinstance(args, list):
args=tuple(args)
elif isinstance(args, tuple):
pass
else:
args=(args,)
try:
self.cursor.execute(cmd, args)
except Exception as e:
print "EXcept", e, unicode(e)
self.conn._handleException(e)
def wait(self, spot=""):
return self.conn.wait(spot)
def ExecuteSet(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSet")
rowset=pgRowset(self)
logger.querylog(self.cursor.query, result="%d rows" % rowset.GetRowcount())
adm.StopWaiting(frame)
return rowset
except Exception as e:
adm.StopWaiting(frame, e.error)
raise e
def ExecuteList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getList()
return None
def ExecuteDictList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getDictList()
return None
def ExecuteRow(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteRow")
row=self.cursor.fetchone()
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
row=pgRow(self, row)
logger.querylog(self.cursor.query, result=unicode(row))
return row
return None
def Execute(self, cmd, args=None, spot=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("Execute")
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
rc=self.GetRowcount()
if spot: spot += " "
else: spot=""
logger.querylog(self.cursor.query, result=spot+ xlt("%d rows") % rc)
return rc
def ExecuteSingle(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSingle")
try:
row=self.cursor.fetchone()
except Exception as _e:
#print e
row=None
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
result=row[0]
logger.querylog(self.cursor.query, result="%s" % result)
return result
else:
logger.querylog(self.cursor.query, result=xlt("no result"))
return None
def Insert(self, cmd, returning=None):
if returning:
cmd += "\nRETURNING %s" % returning
rowset=self.ExecuteSet(cmd)
if not self.GetRowcount():
return None
result=[]
for row in rowset:
line=row.getTuple()
if len(line) > 1:
result.append(line)
else:
result.append(line[0])
if len(result) > 1:
return result
else:
return result[0]
else:
self.ExecuteSingle(cmd)
return self.cursor.lastrowid
def ExecuteDict(self, cmd, args=None):
set=self.ExecuteSet(cmd, args)
d={}
for row in set:
d[row[0]] = row[1]
return d
def ExecuteAsync(self, cmd, args=None):
worker=QueryWorker(self, cmd, args)
return worker
#############################################################################
class pgConnectionPool:
def __init__(self, node, dsn):
self.node=node
self.lastError=None
self.connections=[]
self.lock=threading.Lock()
self.dsn=dsn
# create first connection to make sure params are ok
conn=self.CreateConnection()
with self.lock:
self.connections.append(conn)
def __del__(self):
self.Disconnect()
def ServerVersion(self):
if not self.connections:
return None
v=self.connections[0].conn.server_version
return int(v/10000) + ((v%10000)/100)*0.1
def HasFailed(self):
return len(self.connections) == 0
def Disconnect(self):
for conn in self.connections:
conn.disconnect()
self.connections=[]
def RemoveConnection(self, conn):
try: self.connections.remove(conn)
except: pass
def GetCursor(self):
conn=None
with self.lock:
for c in self.connections:
if not c.inUse:
conn=c
# logger.trace(2, 4, "USING %s", str(c))
c.inUse=True
break
if not conn:
conn=self.CreateConnection()
# logger.trace(2, 4, "CREATING %s", str(c))
return conn.GetCursor()
def CreateConnection(self):
try:
conn=pgConnection(self.dsn, self)
return conn
except Exception as e:
self.lastError = unicode(e)
raise adm.ConnectionException(self.node, xlt("Connect"), self.lastError)
##########################################################
class QueryWorker(threading.Thread):
def __init__(self, cursor, cmd, args):
threading.Thread.__init__(self)
self.cursor=cursor
self.cmd=cmd
self.args=args
self.running=True
def __del__(self):
self.cancel()
self.cursor=None
def run(self):
self.cancelled=False
self.error=None
try:
self.cursor.execute(self.cmd, self.args)
self.cursor.wait("AsyncWorker")
except Exception as e:
self.error=e
self.running=False
def cancel(self):
if self.running:
self.cancelled=True
self.running=False
self.cursor.conn.conn.cancel()
def GetRowcount(self):
return self.cursor.GetRowcount()
def GetResult(self):
rs=None
try:
rs=pgRowset(self.cursor)
except:
pass
self.cursor=None
return rs
def IsRunning(self):
return self.running
def Cancel(self):
if self.running:
self.cancel()
#######################################################################
class pgQuery:
def __init__(self, tab=None, cursor=None):
self.columns=[]
self.vals=[]
self.tables=[]
self.where=[]
self.order=[]
self.group=[]
self.cursor=cursor
if tab:
self.tables.append(tab)
def quoteIdent(self, identifier):
return quoteIdent(identifier)
def SetCursor(self, cursor):
self.cursor=cursor
def AddCol(self, name, quoted=False):
if name:
if isinstance(name, list):
map(lambda x: self.AddCol(x, quoted), name)
else:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
def AddColVal(self, name, val, quoted=False):
if name:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
self.vals.append(val)
def AddJoin(self, tab):
if tab:
self.tables.append("JOIN %s" % tab)
def AddLeft(self, tab):
if tab:
self.tables.append("LEFT OUTER JOIN %s" % tab)
def AddWhere(self, where, val=None):
if where:
if val:
where="%s=%s" % (quoteIdent(where), quoteValue(val))
self.where.append(where)
def AddOrder(self, order, quoted=False):
if order:
if quoted:
order=quoteIdent(order)
self.order.append(order)
def AddGroup(self, group):
if group:
self.group.append(group)
def groupJoin(self, partList, sep=', ', breakLen=80):
result=[]
line=""
for part in partList:
if line: line += "%s%s" % (sep, part)
else: line=part
if len(line) > breakLen:
result.append(line)
line=""
if line:
result.append(line)
return ",\n ".join(result)
def SelectQueryString(self):
sql=["SELECT %s" % self.groupJoin(self.columns),
" FROM %s" % "\n ".join(self.tables) ]
if self.where:
sql.append(" WHERE %s" % "\n AND ".join(self.where))
if self.group:
sql.append(" GROUP BY %s" % ", ".join(self.group))
if self.order:
sql.append(" ORDER BY %s" % ", ".join(self.order))
return "\n".join(sql)
def Select(self):
return self.cursor.ExecuteSet(self.SelectQueryString())
def Insert(self, returning=None):
if len(self.tables) != 1:
raise Exception("pgQuery: INSERT with single table only")
sql=["INSERT INTO %s (%s)" % (self.tables[0], ",".join(self.columns))]
values=[]
for col in range(len(self.columns)):
values.append("%s" % quoteValue(self.vals[col], self.cursor))
sql.append(" VALUES (%s)" % self.groupJoin(values))
return self.cursor.Insert("\n".join(sql), returning)
def Update(self):
if len(self.tables) != 1:
raise Exception("pgQuery: UPDATE with single table only")
sql=["UPDATE %s" % self.tables[0]]
cols=[]
for col in range(len(self.columns)):
val=quoteValue(self.vals[col], self.cursor)
cols.append( "%s=%s" % ( self.columns[col], val ))
sql.append(" SET %s" % self.groupJoin(cols))
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="UPDATE")
def Delete(self):
if len(self.tables) != 1:
raise Exception("pgQuery: DELETE with single table only")
sql=["DELETE FROM %s" % self.tables[0]]
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="DELETE")
| conn=conn.conn | conditional_block |
_pgsql.py | # The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
# http://initd.org/psycopg/docs/
import psycopg2
import select
import logger
import adm
import re
import threading
from wh import xlt, modPath
from Crypto.PublicKey._slowmath import rsa_construct
sqlKeywords=[]
moreKeywords=['serial', 'bigserial']
colKeywords=[]
def getSqlKeywords():
global colKeywords
global sqlKeywords
if not sqlKeywords:
f=open(modPath("kwlist.h", __name__))
lines=f.read()
f.close()
for line in lines.splitlines():
if line.startswith("PG_KEYWORD("):
tokens=line.split(',')
keyword=tokens[0][12:-1].lower()
# RESERVED, UNRESERVED, TYPE_FUNC_NAME, COL_NAME
if tokens[2].lstrip().startswith('COL_NAME'):
colKeywords.append(keyword)
else:
sqlKeywords.append(keyword)
colKeywords.extend(moreKeywords)
return sqlKeywords
identMatchPattern=re.compile("^[a-z][a-z0-9_]+$")
def quoteIdent(ident):
if identMatchPattern.match(ident) and ident not in getSqlKeywords():
return ident
return '"%s"' % ident.replace('"', '""')
def quoteValue(val, conn=None):
if isinstance(val, unicode): # psycopg2 quoting has some problems with unicode
return "'%s'" % val.replace("'", "''").replace("\\", "\\\\")
adapter=psycopg2.extensions.adapt(val)
if conn and hasattr(adapter, 'prepare'):
if isinstance(conn, pgConnection):
conn=conn.conn
elif isinstance(conn, pgCursor):
conn=conn.conn.conn
adapter.prepare(conn)
return adapter.getquoted()
class SqlException(adm.ServerException):
def __init__(self, sql, error):
logger.querylog(sql, error=error)
self.error=error
self.sql=sql
Exception.__init__(self, sql, error)
def __str__(self):
return self.error
######################################################################
class pgType:
def __init__(self, row):
self.oid=row['oid']
self.name=row['typname']
self.namespace=row['nspname']
self.category=row['typcategory']
def IsNumeric(self):
return self.category == 'N'
class pgTypeCache:
def __init__(self, rowset):
self.cache={}
self.Add(rowset)
def Add(self, rowset):
if not isinstance(rowset, pgRowset):
rowset=[rowset]
typ=None
for row in rowset:
typ=pgType(row)
self.cache[typ.oid] = typ
return typ
def Get(self, oid):
return self.cache.get(oid)
######################################################################
class pgCursorResult:
def __init__(self, cursor, colNames=None):
self.cursor=cursor
if colNames:
self.colNames=colNames
else:
self.colNames=[]
for d in cursor.GetDescription():
self.colNames.append(d.name)
class pgRow(pgCursorResult):
def __init__(self, cursor, row, colNames=None):
pgCursorResult.__init__(self, cursor, colNames)
self.row=row
def getTuple(self):
return tuple(self.getList())
def getList(self):
l=[]
for i in range(len(self.colNames)):
l.append(self.getItem(i))
return l
def getDict(self):
d={}
for i in range(len(self.colNames)):
item=self.getItem(i)
# aggregate functions deliver [None] with empty left joins; we want []
if isinstance(item, list) and len(item) == 1 and item[0] == None:
item=[]
d[self.colNames[i]] = item
return d
def __str__(self):
cols=[]
for i in range(len(self.colNames)):
val=unicode(self.getItem(i))
cols.append("%s=%s" % (self.colNames[i], val))
return "( %s )" % ",".join(cols)
def hasAttr(self, colName):
try:
self.colNames.index(colName)
return True
except:
return False
def getItem(self, i):
val=self.row[i]
if isinstance(val, str):
return val.decode('utf8')
return val
def __getitem__(self, colName):
try:
if isinstance(colName, (str, unicode)):
i=self.colNames.index(colName)
else:
i=colName
return self.getItem(i)
except Exception as _e:
logger.debug("Column %s not found" % colName)
return None
class pgRowset(pgCursorResult):
def __init__(self, cursor):
pgCursorResult.__init__(self, cursor)
self.__fetchone()
def GetRowcount(self):
return self.cursor.GetRowcount()
def __fetchone(self):
if self.cursor.GetRowcount() > 0:
row = self.cursor.FetchOne()
else:
row=None
if row:
self.curRow = pgRow(self.cursor, row, self.colNames)
else:
self.curRow=None
def HasMore(self):
return self.curRow != None
def Next(self):
row=self.curRow
if row:
self.__fetchone()
return row
def getDict(self):
d={}
for row in self:
d[row[0]] = row.getDict()
return d
def getDictList(self):
d=[]
for row in self:
d.append(row.getDict())
return d
def getList(self):
d=[]
for row in self:
d.append(row[0])
return d
def __iter__(self):
class RowsetIterator:
def __init__(self, outer):
self.outer=outer
def __iter__(self):
return self
def next(self):
row=self.outer.Next()
if row:
return row
else:
raise StopIteration()
return RowsetIterator(self)
######################################################################
class pgConnection:
def __init__(self, dsn, pool=None):
self.pool=pool
self.conn=None
self.cursor=None
self.inUse=False
self.lastError=None
self.trapSqlException=True
self.conn=psycopg2.connect(dsn, async=True)
self.wait("Connect")
self.cursor=self.conn.cursor()
def disconnect(self):
self.cursor=None
if self.conn:
self.conn.close()
self.conn=None
if self.pool:
self.pool.RemoveConnection(self)
def wait(self, spot=""):
if self.conn.async:
while self.conn.isexecuting():
try:
state = self.conn.poll()
except Exception as e:
self._handleException(e)
return False
if state == psycopg2.extensions.POLL_OK:
return True
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [self.conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([self.conn.fileno()], [], [])
else:
raise adm.ConnectionException(self.node, xlt("WAIT %s" % spot), self.lastError)
return False
def _handleException(self, exception):
if self.cursor and self.cursor.query:
cmd=self.cursor.query
else:
cmd=None
exception.message=errlines=exception.message.decode('utf8')
logger.querylog(cmd, error=errlines)
if self.trapSqlException:
self.lastError=errlines
if self.pool:
self.pool.lastError=errlines
adm.StopWaiting(adm.mainframe)
if self.conn and self.conn.closed:
self.disconnect()
if self.trapSqlException:
raise SqlException(cmd, errlines)
else:
raise exception
def isRunning(self):
return self.conn.poll() != psycopg2.extensions.POLL_OK
def GetCursor(self):
return pgCursor(self)
######################################################################
class pgCursor():
def __init__(self, conn):
conn.trapSqlException=True
self.conn=conn
self.cursor=self.conn.cursor
def __del__(self):
self.Close()
def SetThrowSqlException(self, how):
"""
SetThrowSqlException(bool)
If set to false, will throw psycopg exception instead of SqlException.
Use this to catch expected exception without GUI display
"""
self.conn.trapSqlException=how
def Close(self):
if self.conn:
# logger.trace(2, 4, "RELEASING %s", str(self.conn))
self.conn.inUse=False
self.conn=None
self.cursor=None
def GetPid(self):
return self.conn.conn.get_backend_pid()
def Quote(self, val):
return quoteValue(val, self)
def GetDescription(self):
if self.cursor.description:
return self.cursor.description
return []
def GetRowcount(self):
return self.cursor.rowcount
def FetchOne(self):
row=self.cursor.fetchone()
return row
# def Rollback(self):
# self.cursor.execute("ROLLBACK")
# self.cursor.wait("ROLLBACK")
#
# def Commit(self):
# self.cursor.execute("COMMIT")
# self.cursor.wait("COMMIT")
def execute(self, cmd, args=None):
if args:
if isinstance(args, list):
args=tuple(args)
elif isinstance(args, tuple):
pass
else:
args=(args,)
try:
self.cursor.execute(cmd, args)
except Exception as e:
print "EXcept", e, unicode(e)
self.conn._handleException(e)
def wait(self, spot=""):
return self.conn.wait(spot)
def ExecuteSet(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSet")
rowset=pgRowset(self)
logger.querylog(self.cursor.query, result="%d rows" % rowset.GetRowcount())
adm.StopWaiting(frame)
return rowset
except Exception as e:
adm.StopWaiting(frame, e.error)
raise e
def ExecuteList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getList()
return None
def ExecuteDictList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getDictList()
return None
def ExecuteRow(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteRow")
row=self.cursor.fetchone()
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
row=pgRow(self, row)
logger.querylog(self.cursor.query, result=unicode(row))
return row
return None
def Execute(self, cmd, args=None, spot=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("Execute")
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
rc=self.GetRowcount()
if spot: spot += " "
else: spot=""
logger.querylog(self.cursor.query, result=spot+ xlt("%d rows") % rc)
return rc
def ExecuteSingle(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSingle")
try:
row=self.cursor.fetchone()
except Exception as _e:
#print e
row=None
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
result=row[0]
logger.querylog(self.cursor.query, result="%s" % result)
return result
else:
logger.querylog(self.cursor.query, result=xlt("no result"))
return None
def Insert(self, cmd, returning=None):
if returning:
cmd += "\nRETURNING %s" % returning
rowset=self.ExecuteSet(cmd)
if not self.GetRowcount():
return None
result=[]
for row in rowset:
line=row.getTuple()
if len(line) > 1:
result.append(line)
else:
result.append(line[0])
if len(result) > 1:
return result
else:
return result[0]
else:
self.ExecuteSingle(cmd)
return self.cursor.lastrowid
def ExecuteDict(self, cmd, args=None):
set=self.ExecuteSet(cmd, args)
d={}
for row in set:
d[row[0]] = row[1]
return d
def ExecuteAsync(self, cmd, args=None):
worker=QueryWorker(self, cmd, args)
return worker
#############################################################################
class pgConnectionPool:
def __init__(self, node, dsn):
self.node=node
self.lastError=None
self.connections=[]
self.lock=threading.Lock()
self.dsn=dsn
# create first connection to make sure params are ok
conn=self.CreateConnection()
with self.lock:
self.connections.append(conn)
def __del__(self):
self.Disconnect() | return int(v/10000) + ((v%10000)/100)*0.1
def HasFailed(self):
return len(self.connections) == 0
def Disconnect(self):
for conn in self.connections:
conn.disconnect()
self.connections=[]
def RemoveConnection(self, conn):
try: self.connections.remove(conn)
except: pass
def GetCursor(self):
conn=None
with self.lock:
for c in self.connections:
if not c.inUse:
conn=c
# logger.trace(2, 4, "USING %s", str(c))
c.inUse=True
break
if not conn:
conn=self.CreateConnection()
# logger.trace(2, 4, "CREATING %s", str(c))
return conn.GetCursor()
def CreateConnection(self):
try:
conn=pgConnection(self.dsn, self)
return conn
except Exception as e:
self.lastError = unicode(e)
raise adm.ConnectionException(self.node, xlt("Connect"), self.lastError)
##########################################################
class QueryWorker(threading.Thread):
def __init__(self, cursor, cmd, args):
threading.Thread.__init__(self)
self.cursor=cursor
self.cmd=cmd
self.args=args
self.running=True
def __del__(self):
self.cancel()
self.cursor=None
def run(self):
self.cancelled=False
self.error=None
try:
self.cursor.execute(self.cmd, self.args)
self.cursor.wait("AsyncWorker")
except Exception as e:
self.error=e
self.running=False
def cancel(self):
if self.running:
self.cancelled=True
self.running=False
self.cursor.conn.conn.cancel()
def GetRowcount(self):
return self.cursor.GetRowcount()
def GetResult(self):
rs=None
try:
rs=pgRowset(self.cursor)
except:
pass
self.cursor=None
return rs
def IsRunning(self):
return self.running
def Cancel(self):
if self.running:
self.cancel()
#######################################################################
class pgQuery:
def __init__(self, tab=None, cursor=None):
self.columns=[]
self.vals=[]
self.tables=[]
self.where=[]
self.order=[]
self.group=[]
self.cursor=cursor
if tab:
self.tables.append(tab)
def quoteIdent(self, identifier):
return quoteIdent(identifier)
def SetCursor(self, cursor):
self.cursor=cursor
def AddCol(self, name, quoted=False):
if name:
if isinstance(name, list):
map(lambda x: self.AddCol(x, quoted), name)
else:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
def AddColVal(self, name, val, quoted=False):
if name:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
self.vals.append(val)
def AddJoin(self, tab):
if tab:
self.tables.append("JOIN %s" % tab)
def AddLeft(self, tab):
if tab:
self.tables.append("LEFT OUTER JOIN %s" % tab)
def AddWhere(self, where, val=None):
if where:
if val:
where="%s=%s" % (quoteIdent(where), quoteValue(val))
self.where.append(where)
def AddOrder(self, order, quoted=False):
if order:
if quoted:
order=quoteIdent(order)
self.order.append(order)
def AddGroup(self, group):
if group:
self.group.append(group)
def groupJoin(self, partList, sep=', ', breakLen=80):
result=[]
line=""
for part in partList:
if line: line += "%s%s" % (sep, part)
else: line=part
if len(line) > breakLen:
result.append(line)
line=""
if line:
result.append(line)
return ",\n ".join(result)
def SelectQueryString(self):
sql=["SELECT %s" % self.groupJoin(self.columns),
" FROM %s" % "\n ".join(self.tables) ]
if self.where:
sql.append(" WHERE %s" % "\n AND ".join(self.where))
if self.group:
sql.append(" GROUP BY %s" % ", ".join(self.group))
if self.order:
sql.append(" ORDER BY %s" % ", ".join(self.order))
return "\n".join(sql)
def Select(self):
return self.cursor.ExecuteSet(self.SelectQueryString())
def Insert(self, returning=None):
if len(self.tables) != 1:
raise Exception("pgQuery: INSERT with single table only")
sql=["INSERT INTO %s (%s)" % (self.tables[0], ",".join(self.columns))]
values=[]
for col in range(len(self.columns)):
values.append("%s" % quoteValue(self.vals[col], self.cursor))
sql.append(" VALUES (%s)" % self.groupJoin(values))
return self.cursor.Insert("\n".join(sql), returning)
def Update(self):
if len(self.tables) != 1:
raise Exception("pgQuery: UPDATE with single table only")
sql=["UPDATE %s" % self.tables[0]]
cols=[]
for col in range(len(self.columns)):
val=quoteValue(self.vals[col], self.cursor)
cols.append( "%s=%s" % ( self.columns[col], val ))
sql.append(" SET %s" % self.groupJoin(cols))
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="UPDATE")
def Delete(self):
if len(self.tables) != 1:
raise Exception("pgQuery: DELETE with single table only")
sql=["DELETE FROM %s" % self.tables[0]]
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="DELETE") |
def ServerVersion(self):
if not self.connections:
return None
v=self.connections[0].conn.server_version | random_line_split |
_pgsql.py | # The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
# http://initd.org/psycopg/docs/
import psycopg2
import select
import logger
import adm
import re
import threading
from wh import xlt, modPath
from Crypto.PublicKey._slowmath import rsa_construct
sqlKeywords=[]
moreKeywords=['serial', 'bigserial']
colKeywords=[]
def getSqlKeywords():
global colKeywords
global sqlKeywords
if not sqlKeywords:
f=open(modPath("kwlist.h", __name__))
lines=f.read()
f.close()
for line in lines.splitlines():
if line.startswith("PG_KEYWORD("):
tokens=line.split(',')
keyword=tokens[0][12:-1].lower()
# RESERVED, UNRESERVED, TYPE_FUNC_NAME, COL_NAME
if tokens[2].lstrip().startswith('COL_NAME'):
colKeywords.append(keyword)
else:
sqlKeywords.append(keyword)
colKeywords.extend(moreKeywords)
return sqlKeywords
identMatchPattern=re.compile("^[a-z][a-z0-9_]+$")
def quoteIdent(ident):
if identMatchPattern.match(ident) and ident not in getSqlKeywords():
return ident
return '"%s"' % ident.replace('"', '""')
def quoteValue(val, conn=None):
if isinstance(val, unicode): # psycopg2 quoting has some problems with unicode
return "'%s'" % val.replace("'", "''").replace("\\", "\\\\")
adapter=psycopg2.extensions.adapt(val)
if conn and hasattr(adapter, 'prepare'):
if isinstance(conn, pgConnection):
conn=conn.conn
elif isinstance(conn, pgCursor):
conn=conn.conn.conn
adapter.prepare(conn)
return adapter.getquoted()
class SqlException(adm.ServerException):
def __init__(self, sql, error):
logger.querylog(sql, error=error)
self.error=error
self.sql=sql
Exception.__init__(self, sql, error)
def __str__(self):
return self.error
######################################################################
class pgType:
def __init__(self, row):
self.oid=row['oid']
self.name=row['typname']
self.namespace=row['nspname']
self.category=row['typcategory']
def IsNumeric(self):
return self.category == 'N'
class pgTypeCache:
def __init__(self, rowset):
self.cache={}
self.Add(rowset)
def Add(self, rowset):
if not isinstance(rowset, pgRowset):
rowset=[rowset]
typ=None
for row in rowset:
typ=pgType(row)
self.cache[typ.oid] = typ
return typ
def Get(self, oid):
return self.cache.get(oid)
######################################################################
class pgCursorResult:
def __init__(self, cursor, colNames=None):
self.cursor=cursor
if colNames:
self.colNames=colNames
else:
self.colNames=[]
for d in cursor.GetDescription():
self.colNames.append(d.name)
class pgRow(pgCursorResult):
def __init__(self, cursor, row, colNames=None):
pgCursorResult.__init__(self, cursor, colNames)
self.row=row
def getTuple(self):
return tuple(self.getList())
def getList(self):
l=[]
for i in range(len(self.colNames)):
l.append(self.getItem(i))
return l
def getDict(self):
d={}
for i in range(len(self.colNames)):
item=self.getItem(i)
# aggregate functions deliver [None] with empty left joins; we want []
if isinstance(item, list) and len(item) == 1 and item[0] == None:
item=[]
d[self.colNames[i]] = item
return d
def __str__(self):
cols=[]
for i in range(len(self.colNames)):
val=unicode(self.getItem(i))
cols.append("%s=%s" % (self.colNames[i], val))
return "( %s )" % ",".join(cols)
def hasAttr(self, colName):
try:
self.colNames.index(colName)
return True
except:
return False
def getItem(self, i):
val=self.row[i]
if isinstance(val, str):
return val.decode('utf8')
return val
def __getitem__(self, colName):
try:
if isinstance(colName, (str, unicode)):
i=self.colNames.index(colName)
else:
i=colName
return self.getItem(i)
except Exception as _e:
logger.debug("Column %s not found" % colName)
return None
class pgRowset(pgCursorResult):
def __init__(self, cursor):
pgCursorResult.__init__(self, cursor)
self.__fetchone()
def GetRowcount(self):
return self.cursor.GetRowcount()
def __fetchone(self):
if self.cursor.GetRowcount() > 0:
row = self.cursor.FetchOne()
else:
row=None
if row:
self.curRow = pgRow(self.cursor, row, self.colNames)
else:
self.curRow=None
def HasMore(self):
return self.curRow != None
def Next(self):
row=self.curRow
if row:
self.__fetchone()
return row
def getDict(self):
d={}
for row in self:
d[row[0]] = row.getDict()
return d
def getDictList(self):
d=[]
for row in self:
d.append(row.getDict())
return d
def getList(self):
d=[]
for row in self:
d.append(row[0])
return d
def __iter__(self):
class RowsetIterator:
def __init__(self, outer):
self.outer=outer
def __iter__(self):
return self
def next(self):
row=self.outer.Next()
if row:
return row
else:
raise StopIteration()
return RowsetIterator(self)
######################################################################
class pgConnection:
def __init__(self, dsn, pool=None):
self.pool=pool
self.conn=None
self.cursor=None
self.inUse=False
self.lastError=None
self.trapSqlException=True
self.conn=psycopg2.connect(dsn, async=True)
self.wait("Connect")
self.cursor=self.conn.cursor()
def disconnect(self):
self.cursor=None
if self.conn:
self.conn.close()
self.conn=None
if self.pool:
self.pool.RemoveConnection(self)
def wait(self, spot=""):
if self.conn.async:
while self.conn.isexecuting():
try:
state = self.conn.poll()
except Exception as e:
self._handleException(e)
return False
if state == psycopg2.extensions.POLL_OK:
return True
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [self.conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([self.conn.fileno()], [], [])
else:
raise adm.ConnectionException(self.node, xlt("WAIT %s" % spot), self.lastError)
return False
def _handleException(self, exception):
if self.cursor and self.cursor.query:
cmd=self.cursor.query
else:
cmd=None
exception.message=errlines=exception.message.decode('utf8')
logger.querylog(cmd, error=errlines)
if self.trapSqlException:
self.lastError=errlines
if self.pool:
self.pool.lastError=errlines
adm.StopWaiting(adm.mainframe)
if self.conn and self.conn.closed:
self.disconnect()
if self.trapSqlException:
raise SqlException(cmd, errlines)
else:
raise exception
def isRunning(self):
return self.conn.poll() != psycopg2.extensions.POLL_OK
def GetCursor(self):
return pgCursor(self)
######################################################################
class pgCursor():
def __init__(self, conn):
conn.trapSqlException=True
self.conn=conn
self.cursor=self.conn.cursor
def __del__(self):
self.Close()
def SetThrowSqlException(self, how):
"""
SetThrowSqlException(bool)
If set to false, will throw psycopg exception instead of SqlException.
Use this to catch expected exception without GUI display
"""
self.conn.trapSqlException=how
def Close(self):
if self.conn:
# logger.trace(2, 4, "RELEASING %s", str(self.conn))
self.conn.inUse=False
self.conn=None
self.cursor=None
def GetPid(self):
return self.conn.conn.get_backend_pid()
def Quote(self, val):
return quoteValue(val, self)
def GetDescription(self):
if self.cursor.description:
return self.cursor.description
return []
def GetRowcount(self):
return self.cursor.rowcount
def FetchOne(self):
row=self.cursor.fetchone()
return row
# def Rollback(self):
# self.cursor.execute("ROLLBACK")
# self.cursor.wait("ROLLBACK")
#
# def Commit(self):
# self.cursor.execute("COMMIT")
# self.cursor.wait("COMMIT")
def execute(self, cmd, args=None):
if args:
if isinstance(args, list):
args=tuple(args)
elif isinstance(args, tuple):
pass
else:
args=(args,)
try:
self.cursor.execute(cmd, args)
except Exception as e:
print "EXcept", e, unicode(e)
self.conn._handleException(e)
def wait(self, spot=""):
return self.conn.wait(spot)
def ExecuteSet(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSet")
rowset=pgRowset(self)
logger.querylog(self.cursor.query, result="%d rows" % rowset.GetRowcount())
adm.StopWaiting(frame)
return rowset
except Exception as e:
adm.StopWaiting(frame, e.error)
raise e
def ExecuteList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getList()
return None
def ExecuteDictList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getDictList()
return None
def ExecuteRow(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteRow")
row=self.cursor.fetchone()
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
row=pgRow(self, row)
logger.querylog(self.cursor.query, result=unicode(row))
return row
return None
def Execute(self, cmd, args=None, spot=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("Execute")
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
rc=self.GetRowcount()
if spot: spot += " "
else: spot=""
logger.querylog(self.cursor.query, result=spot+ xlt("%d rows") % rc)
return rc
def ExecuteSingle(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSingle")
try:
row=self.cursor.fetchone()
except Exception as _e:
#print e
row=None
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
result=row[0]
logger.querylog(self.cursor.query, result="%s" % result)
return result
else:
logger.querylog(self.cursor.query, result=xlt("no result"))
return None
def Insert(self, cmd, returning=None):
if returning:
cmd += "\nRETURNING %s" % returning
rowset=self.ExecuteSet(cmd)
if not self.GetRowcount():
return None
result=[]
for row in rowset:
line=row.getTuple()
if len(line) > 1:
result.append(line)
else:
result.append(line[0])
if len(result) > 1:
return result
else:
return result[0]
else:
self.ExecuteSingle(cmd)
return self.cursor.lastrowid
def ExecuteDict(self, cmd, args=None):
set=self.ExecuteSet(cmd, args)
d={}
for row in set:
d[row[0]] = row[1]
return d
def ExecuteAsync(self, cmd, args=None):
worker=QueryWorker(self, cmd, args)
return worker
#############################################################################
class pgConnectionPool:
def __init__(self, node, dsn):
self.node=node
self.lastError=None
self.connections=[]
self.lock=threading.Lock()
self.dsn=dsn
# create first connection to make sure params are ok
conn=self.CreateConnection()
with self.lock:
self.connections.append(conn)
def __del__(self):
self.Disconnect()
def ServerVersion(self):
if not self.connections:
return None
v=self.connections[0].conn.server_version
return int(v/10000) + ((v%10000)/100)*0.1
def HasFailed(self):
return len(self.connections) == 0
def Disconnect(self):
for conn in self.connections:
conn.disconnect()
self.connections=[]
def RemoveConnection(self, conn):
try: self.connections.remove(conn)
except: pass
def GetCursor(self):
conn=None
with self.lock:
for c in self.connections:
if not c.inUse:
conn=c
# logger.trace(2, 4, "USING %s", str(c))
c.inUse=True
break
if not conn:
conn=self.CreateConnection()
# logger.trace(2, 4, "CREATING %s", str(c))
return conn.GetCursor()
def CreateConnection(self):
try:
conn=pgConnection(self.dsn, self)
return conn
except Exception as e:
self.lastError = unicode(e)
raise adm.ConnectionException(self.node, xlt("Connect"), self.lastError)
##########################################################
class QueryWorker(threading.Thread):
def __init__(self, cursor, cmd, args):
threading.Thread.__init__(self)
self.cursor=cursor
self.cmd=cmd
self.args=args
self.running=True
def __del__(self):
self.cancel()
self.cursor=None
def run(self):
self.cancelled=False
self.error=None
try:
self.cursor.execute(self.cmd, self.args)
self.cursor.wait("AsyncWorker")
except Exception as e:
self.error=e
self.running=False
def cancel(self):
if self.running:
self.cancelled=True
self.running=False
self.cursor.conn.conn.cancel()
def GetRowcount(self):
return self.cursor.GetRowcount()
def GetResult(self):
rs=None
try:
rs=pgRowset(self.cursor)
except:
pass
self.cursor=None
return rs
def IsRunning(self):
return self.running
def Cancel(self):
if self.running:
self.cancel()
#######################################################################
class pgQuery:
def __init__(self, tab=None, cursor=None):
self.columns=[]
self.vals=[]
self.tables=[]
self.where=[]
self.order=[]
self.group=[]
self.cursor=cursor
if tab:
self.tables.append(tab)
def quoteIdent(self, identifier):
return quoteIdent(identifier)
def SetCursor(self, cursor):
self.cursor=cursor
def AddCol(self, name, quoted=False):
if name:
if isinstance(name, list):
map(lambda x: self.AddCol(x, quoted), name)
else:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
def AddColVal(self, name, val, quoted=False):
if name:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
self.vals.append(val)
def AddJoin(self, tab):
if tab:
self.tables.append("JOIN %s" % tab)
def AddLeft(self, tab):
if tab:
self.tables.append("LEFT OUTER JOIN %s" % tab)
def AddWhere(self, where, val=None):
|
def AddOrder(self, order, quoted=False):
if order:
if quoted:
order=quoteIdent(order)
self.order.append(order)
def AddGroup(self, group):
if group:
self.group.append(group)
def groupJoin(self, partList, sep=', ', breakLen=80):
result=[]
line=""
for part in partList:
if line: line += "%s%s" % (sep, part)
else: line=part
if len(line) > breakLen:
result.append(line)
line=""
if line:
result.append(line)
return ",\n ".join(result)
def SelectQueryString(self):
sql=["SELECT %s" % self.groupJoin(self.columns),
" FROM %s" % "\n ".join(self.tables) ]
if self.where:
sql.append(" WHERE %s" % "\n AND ".join(self.where))
if self.group:
sql.append(" GROUP BY %s" % ", ".join(self.group))
if self.order:
sql.append(" ORDER BY %s" % ", ".join(self.order))
return "\n".join(sql)
def Select(self):
return self.cursor.ExecuteSet(self.SelectQueryString())
def Insert(self, returning=None):
if len(self.tables) != 1:
raise Exception("pgQuery: INSERT with single table only")
sql=["INSERT INTO %s (%s)" % (self.tables[0], ",".join(self.columns))]
values=[]
for col in range(len(self.columns)):
values.append("%s" % quoteValue(self.vals[col], self.cursor))
sql.append(" VALUES (%s)" % self.groupJoin(values))
return self.cursor.Insert("\n".join(sql), returning)
def Update(self):
if len(self.tables) != 1:
raise Exception("pgQuery: UPDATE with single table only")
sql=["UPDATE %s" % self.tables[0]]
cols=[]
for col in range(len(self.columns)):
val=quoteValue(self.vals[col], self.cursor)
cols.append( "%s=%s" % ( self.columns[col], val ))
sql.append(" SET %s" % self.groupJoin(cols))
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="UPDATE")
def Delete(self):
if len(self.tables) != 1:
raise Exception("pgQuery: DELETE with single table only")
sql=["DELETE FROM %s" % self.tables[0]]
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="DELETE")
| if where:
if val:
where="%s=%s" % (quoteIdent(where), quoteValue(val))
self.where.append(where) | identifier_body |
_pgsql.py | # The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
# http://initd.org/psycopg/docs/
import psycopg2
import select
import logger
import adm
import re
import threading
from wh import xlt, modPath
from Crypto.PublicKey._slowmath import rsa_construct
sqlKeywords=[]
moreKeywords=['serial', 'bigserial']
colKeywords=[]
def getSqlKeywords():
global colKeywords
global sqlKeywords
if not sqlKeywords:
f=open(modPath("kwlist.h", __name__))
lines=f.read()
f.close()
for line in lines.splitlines():
if line.startswith("PG_KEYWORD("):
tokens=line.split(',')
keyword=tokens[0][12:-1].lower()
# RESERVED, UNRESERVED, TYPE_FUNC_NAME, COL_NAME
if tokens[2].lstrip().startswith('COL_NAME'):
colKeywords.append(keyword)
else:
sqlKeywords.append(keyword)
colKeywords.extend(moreKeywords)
return sqlKeywords
identMatchPattern=re.compile("^[a-z][a-z0-9_]+$")
def quoteIdent(ident):
if identMatchPattern.match(ident) and ident not in getSqlKeywords():
return ident
return '"%s"' % ident.replace('"', '""')
def quoteValue(val, conn=None):
if isinstance(val, unicode): # psycopg2 quoting has some problems with unicode
return "'%s'" % val.replace("'", "''").replace("\\", "\\\\")
adapter=psycopg2.extensions.adapt(val)
if conn and hasattr(adapter, 'prepare'):
if isinstance(conn, pgConnection):
conn=conn.conn
elif isinstance(conn, pgCursor):
conn=conn.conn.conn
adapter.prepare(conn)
return adapter.getquoted()
class SqlException(adm.ServerException):
def __init__(self, sql, error):
logger.querylog(sql, error=error)
self.error=error
self.sql=sql
Exception.__init__(self, sql, error)
def __str__(self):
return self.error
######################################################################
class pgType:
def __init__(self, row):
self.oid=row['oid']
self.name=row['typname']
self.namespace=row['nspname']
self.category=row['typcategory']
def IsNumeric(self):
return self.category == 'N'
class pgTypeCache:
def __init__(self, rowset):
self.cache={}
self.Add(rowset)
def Add(self, rowset):
if not isinstance(rowset, pgRowset):
rowset=[rowset]
typ=None
for row in rowset:
typ=pgType(row)
self.cache[typ.oid] = typ
return typ
def Get(self, oid):
return self.cache.get(oid)
######################################################################
class pgCursorResult:
def __init__(self, cursor, colNames=None):
self.cursor=cursor
if colNames:
self.colNames=colNames
else:
self.colNames=[]
for d in cursor.GetDescription():
self.colNames.append(d.name)
class pgRow(pgCursorResult):
def __init__(self, cursor, row, colNames=None):
pgCursorResult.__init__(self, cursor, colNames)
self.row=row
def getTuple(self):
return tuple(self.getList())
def getList(self):
l=[]
for i in range(len(self.colNames)):
l.append(self.getItem(i))
return l
def getDict(self):
d={}
for i in range(len(self.colNames)):
item=self.getItem(i)
# aggregate functions deliver [None] with empty left joins; we want []
if isinstance(item, list) and len(item) == 1 and item[0] == None:
item=[]
d[self.colNames[i]] = item
return d
def __str__(self):
cols=[]
for i in range(len(self.colNames)):
val=unicode(self.getItem(i))
cols.append("%s=%s" % (self.colNames[i], val))
return "( %s )" % ",".join(cols)
def hasAttr(self, colName):
try:
self.colNames.index(colName)
return True
except:
return False
def getItem(self, i):
val=self.row[i]
if isinstance(val, str):
return val.decode('utf8')
return val
def __getitem__(self, colName):
try:
if isinstance(colName, (str, unicode)):
i=self.colNames.index(colName)
else:
i=colName
return self.getItem(i)
except Exception as _e:
logger.debug("Column %s not found" % colName)
return None
class pgRowset(pgCursorResult):
def __init__(self, cursor):
pgCursorResult.__init__(self, cursor)
self.__fetchone()
def GetRowcount(self):
return self.cursor.GetRowcount()
def __fetchone(self):
if self.cursor.GetRowcount() > 0:
row = self.cursor.FetchOne()
else:
row=None
if row:
self.curRow = pgRow(self.cursor, row, self.colNames)
else:
self.curRow=None
def HasMore(self):
return self.curRow != None
def Next(self):
row=self.curRow
if row:
self.__fetchone()
return row
def getDict(self):
d={}
for row in self:
d[row[0]] = row.getDict()
return d
def getDictList(self):
d=[]
for row in self:
d.append(row.getDict())
return d
def getList(self):
d=[]
for row in self:
d.append(row[0])
return d
def __iter__(self):
class RowsetIterator:
def __init__(self, outer):
self.outer=outer
def __iter__(self):
return self
def next(self):
row=self.outer.Next()
if row:
return row
else:
raise StopIteration()
return RowsetIterator(self)
######################################################################
class pgConnection:
def __init__(self, dsn, pool=None):
self.pool=pool
self.conn=None
self.cursor=None
self.inUse=False
self.lastError=None
self.trapSqlException=True
self.conn=psycopg2.connect(dsn, async=True)
self.wait("Connect")
self.cursor=self.conn.cursor()
def disconnect(self):
self.cursor=None
if self.conn:
self.conn.close()
self.conn=None
if self.pool:
self.pool.RemoveConnection(self)
def wait(self, spot=""):
if self.conn.async:
while self.conn.isexecuting():
try:
state = self.conn.poll()
except Exception as e:
self._handleException(e)
return False
if state == psycopg2.extensions.POLL_OK:
return True
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [self.conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([self.conn.fileno()], [], [])
else:
raise adm.ConnectionException(self.node, xlt("WAIT %s" % spot), self.lastError)
return False
def _handleException(self, exception):
if self.cursor and self.cursor.query:
cmd=self.cursor.query
else:
cmd=None
exception.message=errlines=exception.message.decode('utf8')
logger.querylog(cmd, error=errlines)
if self.trapSqlException:
self.lastError=errlines
if self.pool:
self.pool.lastError=errlines
adm.StopWaiting(adm.mainframe)
if self.conn and self.conn.closed:
self.disconnect()
if self.trapSqlException:
raise SqlException(cmd, errlines)
else:
raise exception
def isRunning(self):
return self.conn.poll() != psycopg2.extensions.POLL_OK
def GetCursor(self):
return pgCursor(self)
######################################################################
class pgCursor():
def __init__(self, conn):
conn.trapSqlException=True
self.conn=conn
self.cursor=self.conn.cursor
def __del__(self):
self.Close()
def SetThrowSqlException(self, how):
"""
SetThrowSqlException(bool)
If set to false, will throw psycopg exception instead of SqlException.
Use this to catch expected exception without GUI display
"""
self.conn.trapSqlException=how
def Close(self):
if self.conn:
# logger.trace(2, 4, "RELEASING %s", str(self.conn))
self.conn.inUse=False
self.conn=None
self.cursor=None
def GetPid(self):
return self.conn.conn.get_backend_pid()
def Quote(self, val):
return quoteValue(val, self)
def GetDescription(self):
if self.cursor.description:
return self.cursor.description
return []
def GetRowcount(self):
return self.cursor.rowcount
def FetchOne(self):
row=self.cursor.fetchone()
return row
# def Rollback(self):
# self.cursor.execute("ROLLBACK")
# self.cursor.wait("ROLLBACK")
#
# def Commit(self):
# self.cursor.execute("COMMIT")
# self.cursor.wait("COMMIT")
def execute(self, cmd, args=None):
if args:
if isinstance(args, list):
args=tuple(args)
elif isinstance(args, tuple):
pass
else:
args=(args,)
try:
self.cursor.execute(cmd, args)
except Exception as e:
print "EXcept", e, unicode(e)
self.conn._handleException(e)
def wait(self, spot=""):
return self.conn.wait(spot)
def ExecuteSet(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSet")
rowset=pgRowset(self)
logger.querylog(self.cursor.query, result="%d rows" % rowset.GetRowcount())
adm.StopWaiting(frame)
return rowset
except Exception as e:
adm.StopWaiting(frame, e.error)
raise e
def ExecuteList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getList()
return None
def ExecuteDictList(self, cmd, args=None):
rowset=self.ExecuteSet(cmd, args)
if rowset:
return rowset.getDictList()
return None
def ExecuteRow(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteRow")
row=self.cursor.fetchone()
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
row=pgRow(self, row)
logger.querylog(self.cursor.query, result=unicode(row))
return row
return None
def Execute(self, cmd, args=None, spot=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("Execute")
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
rc=self.GetRowcount()
if spot: spot += " "
else: spot=""
logger.querylog(self.cursor.query, result=spot+ xlt("%d rows") % rc)
return rc
def ExecuteSingle(self, cmd, args=None):
frame=adm.StartWaiting()
try:
self.execute(cmd, args)
self.wait("ExecuteSingle")
try:
row=self.cursor.fetchone()
except Exception as _e:
#print e
row=None
adm.StopWaiting(frame)
except Exception as e:
adm.StopWaiting(frame, e)
raise e
if row:
result=row[0]
logger.querylog(self.cursor.query, result="%s" % result)
return result
else:
logger.querylog(self.cursor.query, result=xlt("no result"))
return None
def Insert(self, cmd, returning=None):
if returning:
cmd += "\nRETURNING %s" % returning
rowset=self.ExecuteSet(cmd)
if not self.GetRowcount():
return None
result=[]
for row in rowset:
line=row.getTuple()
if len(line) > 1:
result.append(line)
else:
result.append(line[0])
if len(result) > 1:
return result
else:
return result[0]
else:
self.ExecuteSingle(cmd)
return self.cursor.lastrowid
def ExecuteDict(self, cmd, args=None):
set=self.ExecuteSet(cmd, args)
d={}
for row in set:
d[row[0]] = row[1]
return d
def ExecuteAsync(self, cmd, args=None):
worker=QueryWorker(self, cmd, args)
return worker
#############################################################################
class pgConnectionPool:
def __init__(self, node, dsn):
self.node=node
self.lastError=None
self.connections=[]
self.lock=threading.Lock()
self.dsn=dsn
# create first connection to make sure params are ok
conn=self.CreateConnection()
with self.lock:
self.connections.append(conn)
def __del__(self):
self.Disconnect()
def ServerVersion(self):
if not self.connections:
return None
v=self.connections[0].conn.server_version
return int(v/10000) + ((v%10000)/100)*0.1
def HasFailed(self):
return len(self.connections) == 0
def Disconnect(self):
for conn in self.connections:
conn.disconnect()
self.connections=[]
def RemoveConnection(self, conn):
try: self.connections.remove(conn)
except: pass
def GetCursor(self):
conn=None
with self.lock:
for c in self.connections:
if not c.inUse:
conn=c
# logger.trace(2, 4, "USING %s", str(c))
c.inUse=True
break
if not conn:
conn=self.CreateConnection()
# logger.trace(2, 4, "CREATING %s", str(c))
return conn.GetCursor()
def CreateConnection(self):
try:
conn=pgConnection(self.dsn, self)
return conn
except Exception as e:
self.lastError = unicode(e)
raise adm.ConnectionException(self.node, xlt("Connect"), self.lastError)
##########################################################
class QueryWorker(threading.Thread):
def __init__(self, cursor, cmd, args):
threading.Thread.__init__(self)
self.cursor=cursor
self.cmd=cmd
self.args=args
self.running=True
def __del__(self):
self.cancel()
self.cursor=None
def run(self):
self.cancelled=False
self.error=None
try:
self.cursor.execute(self.cmd, self.args)
self.cursor.wait("AsyncWorker")
except Exception as e:
self.error=e
self.running=False
def cancel(self):
if self.running:
self.cancelled=True
self.running=False
self.cursor.conn.conn.cancel()
def GetRowcount(self):
return self.cursor.GetRowcount()
def GetResult(self):
rs=None
try:
rs=pgRowset(self.cursor)
except:
pass
self.cursor=None
return rs
def IsRunning(self):
return self.running
def Cancel(self):
if self.running:
self.cancel()
#######################################################################
class | :
def __init__(self, tab=None, cursor=None):
self.columns=[]
self.vals=[]
self.tables=[]
self.where=[]
self.order=[]
self.group=[]
self.cursor=cursor
if tab:
self.tables.append(tab)
def quoteIdent(self, identifier):
return quoteIdent(identifier)
def SetCursor(self, cursor):
self.cursor=cursor
def AddCol(self, name, quoted=False):
if name:
if isinstance(name, list):
map(lambda x: self.AddCol(x, quoted), name)
else:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
def AddColVal(self, name, val, quoted=False):
if name:
if quoted:
name=quoteIdent(name)
self.columns.append(name)
self.vals.append(val)
def AddJoin(self, tab):
if tab:
self.tables.append("JOIN %s" % tab)
def AddLeft(self, tab):
if tab:
self.tables.append("LEFT OUTER JOIN %s" % tab)
def AddWhere(self, where, val=None):
if where:
if val:
where="%s=%s" % (quoteIdent(where), quoteValue(val))
self.where.append(where)
def AddOrder(self, order, quoted=False):
if order:
if quoted:
order=quoteIdent(order)
self.order.append(order)
def AddGroup(self, group):
if group:
self.group.append(group)
def groupJoin(self, partList, sep=', ', breakLen=80):
result=[]
line=""
for part in partList:
if line: line += "%s%s" % (sep, part)
else: line=part
if len(line) > breakLen:
result.append(line)
line=""
if line:
result.append(line)
return ",\n ".join(result)
def SelectQueryString(self):
sql=["SELECT %s" % self.groupJoin(self.columns),
" FROM %s" % "\n ".join(self.tables) ]
if self.where:
sql.append(" WHERE %s" % "\n AND ".join(self.where))
if self.group:
sql.append(" GROUP BY %s" % ", ".join(self.group))
if self.order:
sql.append(" ORDER BY %s" % ", ".join(self.order))
return "\n".join(sql)
def Select(self):
return self.cursor.ExecuteSet(self.SelectQueryString())
def Insert(self, returning=None):
if len(self.tables) != 1:
raise Exception("pgQuery: INSERT with single table only")
sql=["INSERT INTO %s (%s)" % (self.tables[0], ",".join(self.columns))]
values=[]
for col in range(len(self.columns)):
values.append("%s" % quoteValue(self.vals[col], self.cursor))
sql.append(" VALUES (%s)" % self.groupJoin(values))
return self.cursor.Insert("\n".join(sql), returning)
def Update(self):
if len(self.tables) != 1:
raise Exception("pgQuery: UPDATE with single table only")
sql=["UPDATE %s" % self.tables[0]]
cols=[]
for col in range(len(self.columns)):
val=quoteValue(self.vals[col], self.cursor)
cols.append( "%s=%s" % ( self.columns[col], val ))
sql.append(" SET %s" % self.groupJoin(cols))
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="UPDATE")
def Delete(self):
if len(self.tables) != 1:
raise Exception("pgQuery: DELETE with single table only")
sql=["DELETE FROM %s" % self.tables[0]]
sql.append(" WHERE %s" % "\n AND ".join(self.where))
return self.cursor.Execute("\n".join(sql), spot="DELETE")
| pgQuery | identifier_name |
2.js | var fs = require('fs');
fs.readFile('input.txt', 'utf8', function(err, contents) {
var lines = contents.split('\n');
var packageStrings = [];
// ensure we don't get any empty lines (like a trailing newline)
lines.forEach(function(line) {
if (line.length) | ;
});
calculateWrappingPaper(packageStrings);
});
var calculateWrappingPaper = function(packageStrings) {
var packages = [];
packageStrings.forEach(function(pkgString) {
packages.push(parseDimensions(pkgString));
});
console.log('Square Feet of Wrapping Paper: ' + sumAreas(packages));
console.log('Feet of Ribbon: ' + sumRibbonLength(packages));
};
// take a package string in the form of 'lxwxh' and parse it
// into its consituent integers
var parseDimensions = function(pkg) {
var split = pkg.split('x');
var box = {};
// basic sanity check
if (split.length !== 3) {
console.error('Parsed an invalid package: ' + pkg +'. Expecting format "lxwxh"!')
}
box.l = parseInt(split[0]);
box.w = parseInt(split[1]);
box.h = parseInt(split[2]);
box.smallestSideArea = findSmallestSideArea([box.l, box.w, box.h]);
box.shortestDistanceAround = findShortestDistance([box.l, box.w, box.h])
box.wrappingArea = calculateWrappingArea(box);
box.ribbonLength = calculateRibbonLength(box);
return box;
};
// given an array of [l,w,h], calculate the area of the smallest side and return it
var findSmallestSideArea = function(dimensions) {
var area;
var max = Math.max.apply(Math, dimensions);
var maxIndex = dimensions.indexOf(max);
// remove the largest size from the dimensions array
dimensions.splice(maxIndex, 1);
// return the area by multiplying the remaining sides
return dimensions[0] * dimensions[1];
};
// given a box with l,w,h and smallestArea calculate how much paper is
// required to wrap the box
var calculateWrappingArea = function(box) {
// surface area of a box = 2*l*w + 2*w*h + 2*h*l
var surfaceArea = 2 * ((box.l * box.w) + (box.w * box.h) + (box.h * box.l));
// required wrapping paper = surface area + slack of smallest side's area
return surfaceArea + box.smallestSideArea;
};
var findShortestDistance = function(dimensions) {
var area;
var max = Math.max.apply(Math, dimensions);
var maxIndex = dimensions.indexOf(max);
// remove the largest size from the dimensions array
dimensions.splice(maxIndex, 1);
return 2 * (dimensions[0] + dimensions[1]);
};
// bow length = shortestDistanceAround + cubic volume of the box (l*w*h)
var calculateRibbonLength = function(box) {
var volume = box.l * box.w * box.h;
return box.shortestDistanceAround + volume;
};
// sum the wrappingAreas of all packages
var sumAreas = function(packages) {
var sum = 0;
packages.forEach(function(box) {
sum += box.wrappingArea;
});
return sum;
};
// sum the required ribbonLength of all packages
sumRibbonLength = function(packages) {
var sum = 0;
packages.forEach(function(box) {
sum += box.ribbonLength;
});
return sum;
}
| { packageStrings.push(line) } | conditional_block |
2.js | var fs = require('fs');
fs.readFile('input.txt', 'utf8', function(err, contents) {
var lines = contents.split('\n');
var packageStrings = [];
// ensure we don't get any empty lines (like a trailing newline)
lines.forEach(function(line) {
if (line.length) { packageStrings.push(line) };
});
calculateWrappingPaper(packageStrings);
});
var calculateWrappingPaper = function(packageStrings) {
var packages = [];
packageStrings.forEach(function(pkgString) {
packages.push(parseDimensions(pkgString));
});
console.log('Square Feet of Wrapping Paper: ' + sumAreas(packages));
console.log('Feet of Ribbon: ' + sumRibbonLength(packages));
};
// take a package string in the form of 'lxwxh' and parse it
// into its consituent integers
var parseDimensions = function(pkg) {
var split = pkg.split('x');
var box = {};
// basic sanity check
if (split.length !== 3) {
console.error('Parsed an invalid package: ' + pkg +'. Expecting format "lxwxh"!')
}
box.l = parseInt(split[0]);
box.w = parseInt(split[1]);
box.h = parseInt(split[2]);
box.smallestSideArea = findSmallestSideArea([box.l, box.w, box.h]);
box.shortestDistanceAround = findShortestDistance([box.l, box.w, box.h])
box.wrappingArea = calculateWrappingArea(box);
box.ribbonLength = calculateRibbonLength(box);
return box;
};
// given an array of [l,w,h], calculate the area of the smallest side and return it
var findSmallestSideArea = function(dimensions) {
var area;
var max = Math.max.apply(Math, dimensions);
var maxIndex = dimensions.indexOf(max);
// remove the largest size from the dimensions array
dimensions.splice(maxIndex, 1);
// return the area by multiplying the remaining sides
return dimensions[0] * dimensions[1];
};
// given a box with l,w,h and smallestArea calculate how much paper is
// required to wrap the box
var calculateWrappingArea = function(box) {
// surface area of a box = 2*l*w + 2*w*h + 2*h*l
var surfaceArea = 2 * ((box.l * box.w) + (box.w * box.h) + (box.h * box.l));
// required wrapping paper = surface area + slack of smallest side's area
return surfaceArea + box.smallestSideArea;
};
var findShortestDistance = function(dimensions) {
var area;
var max = Math.max.apply(Math, dimensions);
var maxIndex = dimensions.indexOf(max);
// remove the largest size from the dimensions array
dimensions.splice(maxIndex, 1);
return 2 * (dimensions[0] + dimensions[1]);
};
// bow length = shortestDistanceAround + cubic volume of the box (l*w*h)
var calculateRibbonLength = function(box) {
var volume = box.l * box.w * box.h;
return box.shortestDistanceAround + volume;
};
// sum the wrappingAreas of all packages
var sumAreas = function(packages) { | var sum = 0;
packages.forEach(function(box) {
sum += box.wrappingArea;
});
return sum;
};
// sum the required ribbonLength of all packages
sumRibbonLength = function(packages) {
var sum = 0;
packages.forEach(function(box) {
sum += box.ribbonLength;
});
return sum;
} | random_line_split | |
instructors_activity.py | import os
import logging
from django.core.management.base import BaseCommand
from django.core.mail import send_mail
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = 'Report instructors activity.'
def add_arguments(self, parser):
parser.add_argument(
'--send-out-for-real', action='store_true', default=False,
help='Send information to the instructors.',
)
parser.add_argument(
'--no-may-contact-only', action='store_true', default=False,
help='Include instructors not willing to be contacted.',
)
parser.add_argument(
'--django-mailing', action='store_true', default=False,
help='Use Django mailing system. This requires some environmental '
'variables to be set, see `settings.py`.',
)
parser.add_argument(
'-s', '--sender', action='store',
default='workshops@carpentries.org',
help='E-mail used in "from:" field.',
)
def foreign_tasks(self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related('person')
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=['instructor', 'helper'])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related('airport') \
.prefetch_related('task_set', 'lessons',
'award_set', 'badges')
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles) \
.select_related('event', 'role')
record = {
'person': person,
'lessons': person.lessons.all(),
'instructor_awards': person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
'tasks': zip(tasks,
self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template('mailing/instructor_activity.txt')
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return 'Updating your Software Carpentry information'
def recipient(self, record):
return record['person'].email
def send_message(self, subject, message, sender, recipient, for_real=False,
django_mailing=False):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, 'w')
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write('-' * 40 + '\n')
self.stdout.write('To: {}\n'.format(recipient))
self.stdout.write('Subject: {}\n'.format(subject))
self.stdout.write('From: {}\n'.format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + '\n')
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
| send_for_real = options['send_out_for_real']
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options['no_may_contact_only']
# use mailing options from settings.py or the `mail` system command?
django_mailing = options['django_mailing']
# verbosity option is added by Django
self.verbosity = int(options['verbosity'])
sender = options['sender']
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(subject, message, sender, recipient,
for_real=send_for_real,
django_mailing=django_mailing)
if self.verbosity >= 1:
self.stdout.write('Sent {} emails.\n'.format(len(results))) | identifier_body | |
instructors_activity.py | import os
import logging
from django.core.management.base import BaseCommand
from django.core.mail import send_mail
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = 'Report instructors activity.'
def add_arguments(self, parser):
parser.add_argument(
'--send-out-for-real', action='store_true', default=False,
help='Send information to the instructors.',
)
parser.add_argument(
'--no-may-contact-only', action='store_true', default=False,
help='Include instructors not willing to be contacted.',
)
parser.add_argument(
'--django-mailing', action='store_true', default=False,
help='Use Django mailing system. This requires some environmental '
'variables to be set, see `settings.py`.',
)
parser.add_argument(
'-s', '--sender', action='store',
default='workshops@carpentries.org',
help='E-mail used in "from:" field.',
)
def foreign_tasks(self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related('person')
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=['instructor', 'helper'])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related('airport') \
.prefetch_related('task_set', 'lessons',
'award_set', 'badges')
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles) \
.select_related('event', 'role')
record = {
'person': person,
'lessons': person.lessons.all(),
'instructor_awards': person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
'tasks': zip(tasks,
self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template('mailing/instructor_activity.txt')
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return 'Updating your Software Carpentry information'
def recipient(self, record):
return record['person'].email
def send_message(self, subject, message, sender, recipient, for_real=False,
django_mailing=False):
if for_real:
if django_mailing:
|
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, 'w')
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write('-' * 40 + '\n')
self.stdout.write('To: {}\n'.format(recipient))
self.stdout.write('Subject: {}\n'.format(subject))
self.stdout.write('From: {}\n'.format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + '\n')
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
send_for_real = options['send_out_for_real']
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options['no_may_contact_only']
# use mailing options from settings.py or the `mail` system command?
django_mailing = options['django_mailing']
# verbosity option is added by Django
self.verbosity = int(options['verbosity'])
sender = options['sender']
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(subject, message, sender, recipient,
for_real=send_for_real,
django_mailing=django_mailing)
if self.verbosity >= 1:
self.stdout.write('Sent {} emails.\n'.format(len(results)))
| send_mail(subject, message, sender, [recipient]) | conditional_block |
instructors_activity.py | import os
import logging
from django.core.management.base import BaseCommand
from django.core.mail import send_mail
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = 'Report instructors activity.'
def add_arguments(self, parser):
parser.add_argument(
'--send-out-for-real', action='store_true', default=False,
help='Send information to the instructors.',
)
parser.add_argument(
'--no-may-contact-only', action='store_true', default=False,
help='Include instructors not willing to be contacted.',
)
parser.add_argument(
'--django-mailing', action='store_true', default=False,
help='Use Django mailing system. This requires some environmental '
'variables to be set, see `settings.py`.',
)
parser.add_argument(
'-s', '--sender', action='store',
default='workshops@carpentries.org',
help='E-mail used in "from:" field.',
)
def | (self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related('person')
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=['instructor', 'helper'])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related('airport') \
.prefetch_related('task_set', 'lessons',
'award_set', 'badges')
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles) \
.select_related('event', 'role')
record = {
'person': person,
'lessons': person.lessons.all(),
'instructor_awards': person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
'tasks': zip(tasks,
self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template('mailing/instructor_activity.txt')
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return 'Updating your Software Carpentry information'
def recipient(self, record):
return record['person'].email
def send_message(self, subject, message, sender, recipient, for_real=False,
django_mailing=False):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, 'w')
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write('-' * 40 + '\n')
self.stdout.write('To: {}\n'.format(recipient))
self.stdout.write('Subject: {}\n'.format(subject))
self.stdout.write('From: {}\n'.format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + '\n')
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
send_for_real = options['send_out_for_real']
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options['no_may_contact_only']
# use mailing options from settings.py or the `mail` system command?
django_mailing = options['django_mailing']
# verbosity option is added by Django
self.verbosity = int(options['verbosity'])
sender = options['sender']
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(subject, message, sender, recipient,
for_real=send_for_real,
django_mailing=django_mailing)
if self.verbosity >= 1:
self.stdout.write('Sent {} emails.\n'.format(len(results)))
| foreign_tasks | identifier_name |
instructors_activity.py | import os
import logging
from django.core.management.base import BaseCommand
from django.core.mail import send_mail
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = 'Report instructors activity.'
def add_arguments(self, parser):
parser.add_argument(
'--send-out-for-real', action='store_true', default=False,
help='Send information to the instructors.',
)
parser.add_argument(
'--no-may-contact-only', action='store_true', default=False,
help='Include instructors not willing to be contacted.',
)
parser.add_argument(
'--django-mailing', action='store_true', default=False,
help='Use Django mailing system. This requires some environmental '
'variables to be set, see `settings.py`.',
)
parser.add_argument(
'-s', '--sender', action='store',
default='workshops@carpentries.org',
help='E-mail used in "from:" field.',
)
def foreign_tasks(self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related('person')
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=['instructor', 'helper'])
instructor_badges = Badge.objects.instructor_badges()
| instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related('airport') \
.prefetch_related('task_set', 'lessons',
'award_set', 'badges')
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles) \
.select_related('event', 'role')
record = {
'person': person,
'lessons': person.lessons.all(),
'instructor_awards': person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
'tasks': zip(tasks,
self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template('mailing/instructor_activity.txt')
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return 'Updating your Software Carpentry information'
def recipient(self, record):
return record['person'].email
def send_message(self, subject, message, sender, recipient, for_real=False,
django_mailing=False):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, 'w')
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write('-' * 40 + '\n')
self.stdout.write('To: {}\n'.format(recipient))
self.stdout.write('Subject: {}\n'.format(subject))
self.stdout.write('From: {}\n'.format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + '\n')
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
send_for_real = options['send_out_for_real']
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options['no_may_contact_only']
# use mailing options from settings.py or the `mail` system command?
django_mailing = options['django_mailing']
# verbosity option is added by Django
self.verbosity = int(options['verbosity'])
sender = options['sender']
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(subject, message, sender, recipient,
for_real=send_for_real,
django_mailing=django_mailing)
if self.verbosity >= 1:
self.stdout.write('Sent {} emails.\n'.format(len(results))) | instructors = Person.objects.filter(badges__in=instructor_badges) | random_line_split |
UrlTypeTest.ts | import UrlType from 'tinymce/themes/inlite/core/UrlType';
import { UnitTest, assert } from '@ephox/bedrock';
UnitTest.test('atomic.themes.core.UrlTypeTest', function () {
const testIsDomainLike = function () {
const mostUsedTopLevelDomains = [
'com', 'org', 'edu', 'gov', 'uk', 'net', 'ca', 'de', 'jp',
'fr', 'au', 'us', 'ru', 'ch', 'it', 'nl', 'se', 'no', 'es', 'mil'
];
assert.eq(UrlType.isDomainLike('www.site.com'), true);
assert.eq(UrlType.isDomainLike('www.site.xyz'), true);
assert.eq(UrlType.isDomainLike(' www.site.xyz'), true);
assert.eq(UrlType.isDomainLike('site.xyz'), false);
mostUsedTopLevelDomains.forEach(function (tld) {
assert.eq(UrlType.isDomainLike('site.' + tld), true);
assert.eq(UrlType.isDomainLike(' site.' + tld), true);
assert.eq(UrlType.isDomainLike('site.' + tld + ' '), true); |
const testIsAbsoluteUrl = function () {
assert.eq(UrlType.isAbsolute('http://www.site.com'), true);
assert.eq(UrlType.isAbsolute('https://www.site.com'), true);
assert.eq(UrlType.isAbsolute('www.site.com'), false);
assert.eq(UrlType.isAbsolute('file.gif'), false);
};
testIsDomainLike();
testIsAbsoluteUrl();
}); | });
assert.eq(UrlType.isDomainLike('/a/b'), false);
}; | random_line_split |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed {
return MuxCmd::Close;
}
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLOUT | EPOLLET
}
fn with_epfd(&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
| EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
// .io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
} | ::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE, | random_line_split |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed {
return MuxCmd::Close;
}
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLOUT | EPOLLET
}
fn | (&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE,
EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
// .io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
}
| with_epfd | identifier_name |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed |
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLOUT | EPOLLET
}
fn with_epfd(&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE,
EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
// .io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
}
| {
return MuxCmd::Close;
} | conditional_block |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed {
return MuxCmd::Close;
}
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind |
fn with_epfd(&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE,
EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
// .io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
}
| {
EPOLLIN | EPOLLOUT | EPOLLET
} | identifier_body |
router-config-base-manager.ts | // Copyright (c) 2018 Göran Gustafsson. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
import extend from 'extend';
import { RouterUrlParams, RouterQueryParams, RouterStateData } from './router-types';
import { RouterConfig } from './router-types';
export abstract class RouterConfigBaseManager<UP extends RouterUrlParams, QP extends RouterQueryParams, SD extends RouterStateData, CX> {
protected root: RouterConfig<UP, QP, SD, CX>;
constructor() {
this.root = {
unrouted: true,
configs: {}
};
}
protected internalAddConfig(configPathParts: string[], config: RouterConfig<UP, QP, SD, CX>): void {
let parentConfig: RouterConfig<UP, QP, SD, CX> = this.root;
for(let n = 0; n < configPathParts.length; n++) { | }
}
|
const configPathPart = configPathParts[n];
const configs = parentConfig.configs || {};
let currentConfig = configs[configPathPart];
if(!currentConfig) {
currentConfig = {
configs: {}
};
configs[configPathPart] = currentConfig;
}
if(n === configPathParts.length - 1) {
configs[configPathPart] = extend(true, currentConfig, config);
break;
}
parentConfig = currentConfig;
}
| conditional_block |
router-config-base-manager.ts | // Copyright (c) 2018 Göran Gustafsson. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
import extend from 'extend';
import { RouterUrlParams, RouterQueryParams, RouterStateData } from './router-types';
import { RouterConfig } from './router-types';
export abstract class RouterConfigBaseManager<UP extends RouterUrlParams, QP extends RouterQueryParams, SD extends RouterStateData, CX> {
| constructor() {
this.root = {
unrouted: true,
configs: {}
};
}
protected internalAddConfig(configPathParts: string[], config: RouterConfig<UP, QP, SD, CX>): void {
let parentConfig: RouterConfig<UP, QP, SD, CX> = this.root;
for(let n = 0; n < configPathParts.length; n++) {
const configPathPart = configPathParts[n];
const configs = parentConfig.configs || {};
let currentConfig = configs[configPathPart];
if(!currentConfig) {
currentConfig = {
configs: {}
};
configs[configPathPart] = currentConfig;
}
if(n === configPathParts.length - 1) {
configs[configPathPart] = extend(true, currentConfig, config);
break;
}
parentConfig = currentConfig;
}
}
} | protected root: RouterConfig<UP, QP, SD, CX>;
| random_line_split |
router-config-base-manager.ts | // Copyright (c) 2018 Göran Gustafsson. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
import extend from 'extend';
import { RouterUrlParams, RouterQueryParams, RouterStateData } from './router-types';
import { RouterConfig } from './router-types';
export abstract class RouterConfigBaseManager<UP extends RouterUrlParams, QP extends RouterQueryParams, SD extends RouterStateData, CX> {
protected root: RouterConfig<UP, QP, SD, CX>;
c | ) {
this.root = {
unrouted: true,
configs: {}
};
}
protected internalAddConfig(configPathParts: string[], config: RouterConfig<UP, QP, SD, CX>): void {
let parentConfig: RouterConfig<UP, QP, SD, CX> = this.root;
for(let n = 0; n < configPathParts.length; n++) {
const configPathPart = configPathParts[n];
const configs = parentConfig.configs || {};
let currentConfig = configs[configPathPart];
if(!currentConfig) {
currentConfig = {
configs: {}
};
configs[configPathPart] = currentConfig;
}
if(n === configPathParts.length - 1) {
configs[configPathPart] = extend(true, currentConfig, config);
break;
}
parentConfig = currentConfig;
}
}
}
| onstructor( | identifier_name |
router-config-base-manager.ts | // Copyright (c) 2018 Göran Gustafsson. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
import extend from 'extend';
import { RouterUrlParams, RouterQueryParams, RouterStateData } from './router-types';
import { RouterConfig } from './router-types';
export abstract class RouterConfigBaseManager<UP extends RouterUrlParams, QP extends RouterQueryParams, SD extends RouterStateData, CX> {
protected root: RouterConfig<UP, QP, SD, CX>;
constructor() {
this.root = {
unrouted: true,
configs: {}
};
}
protected internalAddConfig(configPathParts: string[], config: RouterConfig<UP, QP, SD, CX>): void { |
}
|
let parentConfig: RouterConfig<UP, QP, SD, CX> = this.root;
for(let n = 0; n < configPathParts.length; n++) {
const configPathPart = configPathParts[n];
const configs = parentConfig.configs || {};
let currentConfig = configs[configPathPart];
if(!currentConfig) {
currentConfig = {
configs: {}
};
configs[configPathPart] = currentConfig;
}
if(n === configPathParts.length - 1) {
configs[configPathPart] = extend(true, currentConfig, config);
break;
}
parentConfig = currentConfig;
}
}
| identifier_body |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn chunk_file<P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?;
let path = entry.path();
let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else {
dir_tree.insert(entry_path, self.load_file(&path)?);
}
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> |
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
}
| {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
} | identifier_body |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn chunk_file<P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?;
let path = entry.path();
let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else |
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
}
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
}
| {
dir_tree.insert(entry_path, self.load_file(&path)?);
} | conditional_block |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn chunk_file<P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?; | let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else {
dir_tree.insert(entry_path, self.load_file(&path)?);
}
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
}
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
} | let path = entry.path(); | random_line_split |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn | <P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?;
let path = entry.path();
let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else {
dir_tree.insert(entry_path, self.load_file(&path)?);
}
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
}
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
}
| chunk_file | identifier_name |
config.py | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron import scheduler
ml2_opts = [
cfg.ListOpt('type_drivers',
default=['local', 'flat', 'vlan'],
help=_("List of network type driver entrypoints to be loaded "
"from the neutron.ml2.type_drivers namespace.")),
cfg.ListOpt('tenant_network_types',
default=['local'], | help=_("List of networking mechanism driver entrypoints to "
"be loaded from the neutron.ml2.mechanism_drivers "
"namespace.")),
]
cfg.CONF.register_opts(ml2_opts, "ml2")
cfg.CONF.register_opts(scheduler.AGENTS_SCHEDULER_OPTS) | help=_("Ordered list of network_types to allocate as tenant "
"networks.")),
cfg.ListOpt('mechanism_drivers',
default=[], | random_line_split |
hero.ts | export class Hero {
public id:number
constructor(
public firstName:string,
public lastName?:string,
public birthdate?:Date,
public url?:string,
public rate:number = 100,
id?:number) {
this.id = id != null ? id : Hero.nextId++;
}
static clone({firstName, lastName, birthdate, url, rate, id} : Hero){
return new Hero (firstName, lastName, birthdate, url, rate, id );
}
get fullName() {return `${this.firstName} ${this.lastName}`;}
|
static MockHeroes = [
new Hero(
'Hercules',
'Son of Zeus',
new Date(1970, 1, 25),
'http://www.imdb.com/title/tt0065832/',
325),
new Hero('eenie', 'toe'),
new Hero('Meanie', 'Toe'),
new Hero('Miny', 'Toe'),
new Hero('Moe', 'Toe')
];
} | static nextId = 1; | random_line_split |
hero.ts | export class | {
public id:number
constructor(
public firstName:string,
public lastName?:string,
public birthdate?:Date,
public url?:string,
public rate:number = 100,
id?:number) {
this.id = id != null ? id : Hero.nextId++;
}
static clone({firstName, lastName, birthdate, url, rate, id} : Hero){
return new Hero (firstName, lastName, birthdate, url, rate, id );
}
get fullName() {return `${this.firstName} ${this.lastName}`;}
static nextId = 1;
static MockHeroes = [
new Hero(
'Hercules',
'Son of Zeus',
new Date(1970, 1, 25),
'http://www.imdb.com/title/tt0065832/',
325),
new Hero('eenie', 'toe'),
new Hero('Meanie', 'Toe'),
new Hero('Miny', 'Toe'),
new Hero('Moe', 'Toe')
];
} | Hero | identifier_name |
hero.ts | export class Hero {
public id:number
constructor(
public firstName:string,
public lastName?:string,
public birthdate?:Date,
public url?:string,
public rate:number = 100,
id?:number) {
this.id = id != null ? id : Hero.nextId++;
}
static clone({firstName, lastName, birthdate, url, rate, id} : Hero){
return new Hero (firstName, lastName, birthdate, url, rate, id );
}
get fullName() |
static nextId = 1;
static MockHeroes = [
new Hero(
'Hercules',
'Son of Zeus',
new Date(1970, 1, 25),
'http://www.imdb.com/title/tt0065832/',
325),
new Hero('eenie', 'toe'),
new Hero('Meanie', 'Toe'),
new Hero('Miny', 'Toe'),
new Hero('Moe', 'Toe')
];
} | {return `${this.firstName} ${this.lastName}`;} | identifier_body |
label.py | # coding=utf-8
from __future__ import unicode_literals, print_function
from datetime import datetime
from celery import group
from urlobject import URLObject
from webhookdb import db, celery
from webhookdb.process import process_label
from webhookdb.models import IssueLabel, Repository, Mutex
from webhookdb.exceptions import NotFound, StaleData, MissingData, DatabaseError
from sqlalchemy.exc import IntegrityError
from webhookdb.tasks import logger
from webhookdb.tasks.fetch import fetch_url_from_github
LOCK_TEMPLATE = "Repository|{owner}/{repo}|labels"
@celery.task(bind=True)
def sync_label(self, owner, repo, name, children=False, requestor_id=None):
|
@celery.task(bind=True)
def sync_page_of_labels(self, owner, repo, children=False, requestor_id=None,
per_page=100, page=1):
label_page_url = (
"/repos/{owner}/{repo}/labels?"
"per_page={per_page}&page={page}"
).format(
owner=owner, repo=repo,
per_page=per_page, page=page
)
resp = fetch_url_from_github(label_page_url, requestor_id=requestor_id)
fetched_at = datetime.now()
label_data_list = resp.json()
results = []
repo_id = None
for label_data in label_data_list:
try:
label = process_label(
label_data, via="api", fetched_at=fetched_at, commit=True,
repo_id=repo_id,
)
repo_id = repo_id or label.repo_id
results.append(label.name)
except IntegrityError as exc:
self.retry(exc=exc)
return results
@celery.task()
def labels_scanned(owner, repo, requestor_id=None):
"""
Update the timestamp on the repository object,
and delete old labels that weren't updated.
"""
repo_name = repo
repo = Repository.get(owner, repo_name)
prev_scan_at = repo.labels_last_scanned_at
repo.labels_last_scanned_at = datetime.now()
db.session.add(repo)
if prev_scan_at:
# delete any labels that were not updated since the previous scan --
# they have been removed from Github
query = (
Label.query.filter_by(repo_id=repo.id)
.filter(Label.last_replicated_at < prev_scan_at)
)
query.delete()
# delete the mutex
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo_name)
Mutex.query.filter_by(name=lock_name).delete()
logger.info("Lock {name} deleted".format(name=lock_name))
db.session.commit()
@celery.task()
def spawn_page_tasks_for_labels(owner, repo, children=False,
requestor_id=None, per_page=100):
# acquire lock or fail (we're already in a transaction)
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo)
existing = Mutex.query.get(lock_name)
if existing:
return False
lock = Mutex(name=lock_name, user_id=requestor_id)
db.session.add(lock)
try:
db.session.commit()
except IntegrityError:
return False
else:
logger.info("Lock {name} set by {requestor_id}".format(
name=lock_name, requestor_id=requestor_id,
))
label_list_url = (
"/repos/{owner}/{repo}/labels?per_page={per_page}"
).format(
owner=owner, repo=repo, per_page=per_page,
)
resp = fetch_url_from_github(
label_list_url, method="HEAD", requestor_id=requestor_id,
)
last_page_url = URLObject(resp.links.get('last', {}).get('url', ""))
last_page_num = int(last_page_url.query.dict.get('page', 1))
g = group(
sync_page_of_labels.s(
owner=owner, repo=repo, requestor_id=requestor_id,
per_page=per_page, page=page
) for page in xrange(1, last_page_num+1)
)
finisher = labels_scanned.si(
owner=owner, repo=repo, requestor_id=requestor_id,
)
return (g | finisher).delay()
| label_url = "/repos/{owner}/{repo}/labels/{name}".format(
owner=owner, repo=repo, name=name,
)
try:
resp = fetch_url_from_github(label_url, requestor_id=requestor_id)
except NotFound:
# add more context
msg = "Label {name} on {owner}/{repo} not found".format(
name=name, owner=owner, repo=repo,
)
raise NotFound(msg, {
"type": "label",
"name": name,
"owner": owner,
"repo": repo,
})
label_data = resp.json()
try:
label = process_label(
label_data, via="api", fetched_at=datetime.now(), commit=True,
)
except IntegrityError as exc:
# multiple workers tried to insert the same label simulataneously. Retry!
self.retry(exc=exc)
return label.name | identifier_body |
label.py | # coding=utf-8
from __future__ import unicode_literals, print_function
from datetime import datetime
from celery import group
from urlobject import URLObject
from webhookdb import db, celery
from webhookdb.process import process_label
from webhookdb.models import IssueLabel, Repository, Mutex
from webhookdb.exceptions import NotFound, StaleData, MissingData, DatabaseError
from sqlalchemy.exc import IntegrityError
from webhookdb.tasks import logger
from webhookdb.tasks.fetch import fetch_url_from_github
LOCK_TEMPLATE = "Repository|{owner}/{repo}|labels"
@celery.task(bind=True)
def sync_label(self, owner, repo, name, children=False, requestor_id=None):
label_url = "/repos/{owner}/{repo}/labels/{name}".format(
owner=owner, repo=repo, name=name,
)
try:
resp = fetch_url_from_github(label_url, requestor_id=requestor_id)
except NotFound:
# add more context
msg = "Label {name} on {owner}/{repo} not found".format(
name=name, owner=owner, repo=repo,
)
raise NotFound(msg, {
"type": "label",
"name": name,
"owner": owner,
"repo": repo,
})
label_data = resp.json()
try:
label = process_label(
label_data, via="api", fetched_at=datetime.now(), commit=True,
)
except IntegrityError as exc:
# multiple workers tried to insert the same label simulataneously. Retry!
self.retry(exc=exc)
return label.name
@celery.task(bind=True)
def sync_page_of_labels(self, owner, repo, children=False, requestor_id=None, | label_page_url = (
"/repos/{owner}/{repo}/labels?"
"per_page={per_page}&page={page}"
).format(
owner=owner, repo=repo,
per_page=per_page, page=page
)
resp = fetch_url_from_github(label_page_url, requestor_id=requestor_id)
fetched_at = datetime.now()
label_data_list = resp.json()
results = []
repo_id = None
for label_data in label_data_list:
try:
label = process_label(
label_data, via="api", fetched_at=fetched_at, commit=True,
repo_id=repo_id,
)
repo_id = repo_id or label.repo_id
results.append(label.name)
except IntegrityError as exc:
self.retry(exc=exc)
return results
@celery.task()
def labels_scanned(owner, repo, requestor_id=None):
"""
Update the timestamp on the repository object,
and delete old labels that weren't updated.
"""
repo_name = repo
repo = Repository.get(owner, repo_name)
prev_scan_at = repo.labels_last_scanned_at
repo.labels_last_scanned_at = datetime.now()
db.session.add(repo)
if prev_scan_at:
# delete any labels that were not updated since the previous scan --
# they have been removed from Github
query = (
Label.query.filter_by(repo_id=repo.id)
.filter(Label.last_replicated_at < prev_scan_at)
)
query.delete()
# delete the mutex
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo_name)
Mutex.query.filter_by(name=lock_name).delete()
logger.info("Lock {name} deleted".format(name=lock_name))
db.session.commit()
@celery.task()
def spawn_page_tasks_for_labels(owner, repo, children=False,
requestor_id=None, per_page=100):
# acquire lock or fail (we're already in a transaction)
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo)
existing = Mutex.query.get(lock_name)
if existing:
return False
lock = Mutex(name=lock_name, user_id=requestor_id)
db.session.add(lock)
try:
db.session.commit()
except IntegrityError:
return False
else:
logger.info("Lock {name} set by {requestor_id}".format(
name=lock_name, requestor_id=requestor_id,
))
label_list_url = (
"/repos/{owner}/{repo}/labels?per_page={per_page}"
).format(
owner=owner, repo=repo, per_page=per_page,
)
resp = fetch_url_from_github(
label_list_url, method="HEAD", requestor_id=requestor_id,
)
last_page_url = URLObject(resp.links.get('last', {}).get('url', ""))
last_page_num = int(last_page_url.query.dict.get('page', 1))
g = group(
sync_page_of_labels.s(
owner=owner, repo=repo, requestor_id=requestor_id,
per_page=per_page, page=page
) for page in xrange(1, last_page_num+1)
)
finisher = labels_scanned.si(
owner=owner, repo=repo, requestor_id=requestor_id,
)
return (g | finisher).delay() | per_page=100, page=1): | random_line_split |
label.py | # coding=utf-8
from __future__ import unicode_literals, print_function
from datetime import datetime
from celery import group
from urlobject import URLObject
from webhookdb import db, celery
from webhookdb.process import process_label
from webhookdb.models import IssueLabel, Repository, Mutex
from webhookdb.exceptions import NotFound, StaleData, MissingData, DatabaseError
from sqlalchemy.exc import IntegrityError
from webhookdb.tasks import logger
from webhookdb.tasks.fetch import fetch_url_from_github
LOCK_TEMPLATE = "Repository|{owner}/{repo}|labels"
@celery.task(bind=True)
def sync_label(self, owner, repo, name, children=False, requestor_id=None):
label_url = "/repos/{owner}/{repo}/labels/{name}".format(
owner=owner, repo=repo, name=name,
)
try:
resp = fetch_url_from_github(label_url, requestor_id=requestor_id)
except NotFound:
# add more context
msg = "Label {name} on {owner}/{repo} not found".format(
name=name, owner=owner, repo=repo,
)
raise NotFound(msg, {
"type": "label",
"name": name,
"owner": owner,
"repo": repo,
})
label_data = resp.json()
try:
label = process_label(
label_data, via="api", fetched_at=datetime.now(), commit=True,
)
except IntegrityError as exc:
# multiple workers tried to insert the same label simulataneously. Retry!
self.retry(exc=exc)
return label.name
@celery.task(bind=True)
def | (self, owner, repo, children=False, requestor_id=None,
per_page=100, page=1):
label_page_url = (
"/repos/{owner}/{repo}/labels?"
"per_page={per_page}&page={page}"
).format(
owner=owner, repo=repo,
per_page=per_page, page=page
)
resp = fetch_url_from_github(label_page_url, requestor_id=requestor_id)
fetched_at = datetime.now()
label_data_list = resp.json()
results = []
repo_id = None
for label_data in label_data_list:
try:
label = process_label(
label_data, via="api", fetched_at=fetched_at, commit=True,
repo_id=repo_id,
)
repo_id = repo_id or label.repo_id
results.append(label.name)
except IntegrityError as exc:
self.retry(exc=exc)
return results
@celery.task()
def labels_scanned(owner, repo, requestor_id=None):
"""
Update the timestamp on the repository object,
and delete old labels that weren't updated.
"""
repo_name = repo
repo = Repository.get(owner, repo_name)
prev_scan_at = repo.labels_last_scanned_at
repo.labels_last_scanned_at = datetime.now()
db.session.add(repo)
if prev_scan_at:
# delete any labels that were not updated since the previous scan --
# they have been removed from Github
query = (
Label.query.filter_by(repo_id=repo.id)
.filter(Label.last_replicated_at < prev_scan_at)
)
query.delete()
# delete the mutex
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo_name)
Mutex.query.filter_by(name=lock_name).delete()
logger.info("Lock {name} deleted".format(name=lock_name))
db.session.commit()
@celery.task()
def spawn_page_tasks_for_labels(owner, repo, children=False,
requestor_id=None, per_page=100):
# acquire lock or fail (we're already in a transaction)
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo)
existing = Mutex.query.get(lock_name)
if existing:
return False
lock = Mutex(name=lock_name, user_id=requestor_id)
db.session.add(lock)
try:
db.session.commit()
except IntegrityError:
return False
else:
logger.info("Lock {name} set by {requestor_id}".format(
name=lock_name, requestor_id=requestor_id,
))
label_list_url = (
"/repos/{owner}/{repo}/labels?per_page={per_page}"
).format(
owner=owner, repo=repo, per_page=per_page,
)
resp = fetch_url_from_github(
label_list_url, method="HEAD", requestor_id=requestor_id,
)
last_page_url = URLObject(resp.links.get('last', {}).get('url', ""))
last_page_num = int(last_page_url.query.dict.get('page', 1))
g = group(
sync_page_of_labels.s(
owner=owner, repo=repo, requestor_id=requestor_id,
per_page=per_page, page=page
) for page in xrange(1, last_page_num+1)
)
finisher = labels_scanned.si(
owner=owner, repo=repo, requestor_id=requestor_id,
)
return (g | finisher).delay()
| sync_page_of_labels | identifier_name |
label.py | # coding=utf-8
from __future__ import unicode_literals, print_function
from datetime import datetime
from celery import group
from urlobject import URLObject
from webhookdb import db, celery
from webhookdb.process import process_label
from webhookdb.models import IssueLabel, Repository, Mutex
from webhookdb.exceptions import NotFound, StaleData, MissingData, DatabaseError
from sqlalchemy.exc import IntegrityError
from webhookdb.tasks import logger
from webhookdb.tasks.fetch import fetch_url_from_github
LOCK_TEMPLATE = "Repository|{owner}/{repo}|labels"
@celery.task(bind=True)
def sync_label(self, owner, repo, name, children=False, requestor_id=None):
label_url = "/repos/{owner}/{repo}/labels/{name}".format(
owner=owner, repo=repo, name=name,
)
try:
resp = fetch_url_from_github(label_url, requestor_id=requestor_id)
except NotFound:
# add more context
msg = "Label {name} on {owner}/{repo} not found".format(
name=name, owner=owner, repo=repo,
)
raise NotFound(msg, {
"type": "label",
"name": name,
"owner": owner,
"repo": repo,
})
label_data = resp.json()
try:
label = process_label(
label_data, via="api", fetched_at=datetime.now(), commit=True,
)
except IntegrityError as exc:
# multiple workers tried to insert the same label simulataneously. Retry!
self.retry(exc=exc)
return label.name
@celery.task(bind=True)
def sync_page_of_labels(self, owner, repo, children=False, requestor_id=None,
per_page=100, page=1):
label_page_url = (
"/repos/{owner}/{repo}/labels?"
"per_page={per_page}&page={page}"
).format(
owner=owner, repo=repo,
per_page=per_page, page=page
)
resp = fetch_url_from_github(label_page_url, requestor_id=requestor_id)
fetched_at = datetime.now()
label_data_list = resp.json()
results = []
repo_id = None
for label_data in label_data_list:
try:
label = process_label(
label_data, via="api", fetched_at=fetched_at, commit=True,
repo_id=repo_id,
)
repo_id = repo_id or label.repo_id
results.append(label.name)
except IntegrityError as exc:
self.retry(exc=exc)
return results
@celery.task()
def labels_scanned(owner, repo, requestor_id=None):
"""
Update the timestamp on the repository object,
and delete old labels that weren't updated.
"""
repo_name = repo
repo = Repository.get(owner, repo_name)
prev_scan_at = repo.labels_last_scanned_at
repo.labels_last_scanned_at = datetime.now()
db.session.add(repo)
if prev_scan_at:
# delete any labels that were not updated since the previous scan --
# they have been removed from Github
|
# delete the mutex
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo_name)
Mutex.query.filter_by(name=lock_name).delete()
logger.info("Lock {name} deleted".format(name=lock_name))
db.session.commit()
@celery.task()
def spawn_page_tasks_for_labels(owner, repo, children=False,
requestor_id=None, per_page=100):
# acquire lock or fail (we're already in a transaction)
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo)
existing = Mutex.query.get(lock_name)
if existing:
return False
lock = Mutex(name=lock_name, user_id=requestor_id)
db.session.add(lock)
try:
db.session.commit()
except IntegrityError:
return False
else:
logger.info("Lock {name} set by {requestor_id}".format(
name=lock_name, requestor_id=requestor_id,
))
label_list_url = (
"/repos/{owner}/{repo}/labels?per_page={per_page}"
).format(
owner=owner, repo=repo, per_page=per_page,
)
resp = fetch_url_from_github(
label_list_url, method="HEAD", requestor_id=requestor_id,
)
last_page_url = URLObject(resp.links.get('last', {}).get('url', ""))
last_page_num = int(last_page_url.query.dict.get('page', 1))
g = group(
sync_page_of_labels.s(
owner=owner, repo=repo, requestor_id=requestor_id,
per_page=per_page, page=page
) for page in xrange(1, last_page_num+1)
)
finisher = labels_scanned.si(
owner=owner, repo=repo, requestor_id=requestor_id,
)
return (g | finisher).delay()
| query = (
Label.query.filter_by(repo_id=repo.id)
.filter(Label.last_replicated_at < prev_scan_at)
)
query.delete() | conditional_block |
jquery.datepick-ur.js | /* http://keith-wood.name/datepick.html
Urdu localisation for jQuery Datepicker.
Mansoor Munib -- mansoormunib@gmail.com <http://www.mansoor.co.nr/mansoor.html>
Thanks to Habib Ahmed, ObaidUllah Anwar. */
(function($) {
$.datepick.regionalOptions.ur = {
monthNames: ['جنوری','فروری','مارچ','اپریل','مئی','جون',
'جولائی','اگست','ستمبر','اکتوبر','نومبر','دسمبر'],
monthNamesShort: ['1','2','3','4','5','6',
'7','8','9','10','11','12'],
dayNames: ['اتوار','پير','منگل','بدھ','جمعرات','جمعہ','ہفتہ'],
dayNamesShort: ['اتوار','پير','منگل','بدھ','جمعرات','جمعہ','ہفتہ'],
dayNamesMin: ['اتوار','پير','منگل','بدھ','جمعرات','جمعہ','ہفتہ'],
dateFormat: 'dd/mm/yyyy', firstDay: 0,
renderer: $.datepick.defaultRenderer,
prevText: '<گذشتہ', prevStatus: 'ماه گذشتہ',
prevJumpText: '<<', prevJumpStatus: 'برس گذشتہ',
nextText: 'آئندہ>', nextStatus: 'ماه آئندہ',
nextJumpText: '>>', nextJumpStatus: 'برس آئندہ',
currentText: 'رواں', currentStatus: 'ماه رواں',
todayText: 'آج', todayStatus: 'آج',
clearText: 'حذف تاريخ', clearStatus: 'کریں حذف تاریخ',
closeText: 'کریں بند', closeStatus: 'کیلئے کرنے بند',
yearStatus: 'برس تبدیلی', monthStatus: 'ماه تبدیلی',
weekText: 'ہفتہ', weekStatus: 'ہفتہ',
dayStatus: 'انتخاب D, M d', defaultStatus: 'کریں منتخب تاريخ',
isRTL: true | })(jQuery); | };
$.datepick.setDefaults($.datepick.regionalOptions.ur); | random_line_split |
views.py | import json
import logging
import pytz
import datetime
import dateutil.parser
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from django.conf import settings
from mitxmako.shortcuts import render_to_response
from django_future.csrf import ensure_csrf_cookie
from track.models import TrackingLog
from pytz import UTC
log = logging.getLogger("tracking")
LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', 'page', 'time', 'host']
def | (event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
event['time'] = dateutil.parser.parse(event['time'])
tldat = TrackingLog(**dict((x, event[x]) for x in LOGFIELDS))
try:
tldat.save()
except Exception as err:
log.exception(err)
def user_track(request):
"""
Log when POST call to "event" URL is made by a user. Uses request.REQUEST
to allow for GET calls.
GET or POST call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters
username = request.user.username
except:
username = "anonymous"
try:
scookie = request.META['HTTP_COOKIE'] # Get cookies
scookie = ";".join([c.split('=')[1] for c in scookie.split(";") if "sessionid" in c]).strip() # Extract session ID
except:
scookie = ""
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"session": scookie,
"ip": request.META['REMOTE_ADDR'],
"event_source": "browser",
"event_type": request.REQUEST['event_type'],
"event": request.REQUEST['event'],
"agent": agent,
"page": request.REQUEST['page'],
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
log_event(event)
return HttpResponse('success')
def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try:
username = request.user.username
except:
username = "anonymous"
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"ip": request.META['REMOTE_ADDR'],
"event_source": "server",
"event_type": event_type,
"event": event,
"agent": agent,
"page": page,
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
if event_type.startswith("/event_logs") and request.user.is_staff: # don't log
return
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
username = ''
if args:
for arg in args.split('/'):
if arg.isdigit():
nlen = int(arg)
if arg.startswith('username='):
username = arg[9:]
record_instances = TrackingLog.objects.all().order_by('-time')
if username:
record_instances = record_instances.filter(username=username)
record_instances = record_instances[0:nlen]
# fix dtstamp
fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z"
for rinst in record_instances:
rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt)
return render_to_response('tracking_log.html', {'records': record_instances})
| log_event | identifier_name |
views.py | import json
import logging
import pytz
import datetime
import dateutil.parser
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from django.conf import settings
from mitxmako.shortcuts import render_to_response
from django_future.csrf import ensure_csrf_cookie
from track.models import TrackingLog
from pytz import UTC
log = logging.getLogger("tracking")
LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', 'page', 'time', 'host']
def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
event['time'] = dateutil.parser.parse(event['time'])
tldat = TrackingLog(**dict((x, event[x]) for x in LOGFIELDS))
try:
tldat.save()
except Exception as err:
log.exception(err)
def user_track(request):
"""
Log when POST call to "event" URL is made by a user. Uses request.REQUEST
to allow for GET calls.
GET or POST call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters
username = request.user.username
except:
username = "anonymous"
try:
scookie = request.META['HTTP_COOKIE'] # Get cookies
scookie = ";".join([c.split('=')[1] for c in scookie.split(";") if "sessionid" in c]).strip() # Extract session ID
except:
scookie = ""
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"session": scookie,
"ip": request.META['REMOTE_ADDR'], | "agent": agent,
"page": request.REQUEST['page'],
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
log_event(event)
return HttpResponse('success')
def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try:
username = request.user.username
except:
username = "anonymous"
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"ip": request.META['REMOTE_ADDR'],
"event_source": "server",
"event_type": event_type,
"event": event,
"agent": agent,
"page": page,
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
if event_type.startswith("/event_logs") and request.user.is_staff: # don't log
return
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
username = ''
if args:
for arg in args.split('/'):
if arg.isdigit():
nlen = int(arg)
if arg.startswith('username='):
username = arg[9:]
record_instances = TrackingLog.objects.all().order_by('-time')
if username:
record_instances = record_instances.filter(username=username)
record_instances = record_instances[0:nlen]
# fix dtstamp
fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z"
for rinst in record_instances:
rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt)
return render_to_response('tracking_log.html', {'records': record_instances}) | "event_source": "browser",
"event_type": request.REQUEST['event_type'],
"event": request.REQUEST['event'], | random_line_split |
views.py | import json
import logging
import pytz
import datetime
import dateutil.parser
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from django.conf import settings
from mitxmako.shortcuts import render_to_response
from django_future.csrf import ensure_csrf_cookie
from track.models import TrackingLog
from pytz import UTC
log = logging.getLogger("tracking")
LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', 'page', 'time', 'host']
def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
|
def user_track(request):
"""
Log when POST call to "event" URL is made by a user. Uses request.REQUEST
to allow for GET calls.
GET or POST call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters
username = request.user.username
except:
username = "anonymous"
try:
scookie = request.META['HTTP_COOKIE'] # Get cookies
scookie = ";".join([c.split('=')[1] for c in scookie.split(";") if "sessionid" in c]).strip() # Extract session ID
except:
scookie = ""
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"session": scookie,
"ip": request.META['REMOTE_ADDR'],
"event_source": "browser",
"event_type": request.REQUEST['event_type'],
"event": request.REQUEST['event'],
"agent": agent,
"page": request.REQUEST['page'],
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
log_event(event)
return HttpResponse('success')
def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try:
username = request.user.username
except:
username = "anonymous"
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"ip": request.META['REMOTE_ADDR'],
"event_source": "server",
"event_type": event_type,
"event": event,
"agent": agent,
"page": page,
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
if event_type.startswith("/event_logs") and request.user.is_staff: # don't log
return
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
username = ''
if args:
for arg in args.split('/'):
if arg.isdigit():
nlen = int(arg)
if arg.startswith('username='):
username = arg[9:]
record_instances = TrackingLog.objects.all().order_by('-time')
if username:
record_instances = record_instances.filter(username=username)
record_instances = record_instances[0:nlen]
# fix dtstamp
fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z"
for rinst in record_instances:
rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt)
return render_to_response('tracking_log.html', {'records': record_instances})
| event['time'] = dateutil.parser.parse(event['time'])
tldat = TrackingLog(**dict((x, event[x]) for x in LOGFIELDS))
try:
tldat.save()
except Exception as err:
log.exception(err) | conditional_block |
views.py | import json
import logging
import pytz
import datetime
import dateutil.parser
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from django.conf import settings
from mitxmako.shortcuts import render_to_response
from django_future.csrf import ensure_csrf_cookie
from track.models import TrackingLog
from pytz import UTC
log = logging.getLogger("tracking")
LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', 'page', 'time', 'host']
def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
event['time'] = dateutil.parser.parse(event['time'])
tldat = TrackingLog(**dict((x, event[x]) for x in LOGFIELDS))
try:
tldat.save()
except Exception as err:
log.exception(err)
def user_track(request):
|
def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try:
username = request.user.username
except:
username = "anonymous"
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"ip": request.META['REMOTE_ADDR'],
"event_source": "server",
"event_type": event_type,
"event": event,
"agent": agent,
"page": page,
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
if event_type.startswith("/event_logs") and request.user.is_staff: # don't log
return
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
username = ''
if args:
for arg in args.split('/'):
if arg.isdigit():
nlen = int(arg)
if arg.startswith('username='):
username = arg[9:]
record_instances = TrackingLog.objects.all().order_by('-time')
if username:
record_instances = record_instances.filter(username=username)
record_instances = record_instances[0:nlen]
# fix dtstamp
fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z"
for rinst in record_instances:
rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt)
return render_to_response('tracking_log.html', {'records': record_instances})
| """
Log when POST call to "event" URL is made by a user. Uses request.REQUEST
to allow for GET calls.
GET or POST call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters
username = request.user.username
except:
username = "anonymous"
try:
scookie = request.META['HTTP_COOKIE'] # Get cookies
scookie = ";".join([c.split('=')[1] for c in scookie.split(";") if "sessionid" in c]).strip() # Extract session ID
except:
scookie = ""
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"session": scookie,
"ip": request.META['REMOTE_ADDR'],
"event_source": "browser",
"event_type": request.REQUEST['event_type'],
"event": request.REQUEST['event'],
"agent": agent,
"page": request.REQUEST['page'],
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
log_event(event)
return HttpResponse('success') | identifier_body |
upload_training_data.py | import json
import os
import glob
import sys
import logging
from watson_developer_cloud import WatsonException
if '__file__' in globals():
sys.path.insert(0, os.path.join(os.path.abspath(__file__), 'scripts'))
else:
sys.path.insert(0, os.path.join(os.path.abspath(os.getcwd()), 'scripts'))
from discovery_setup_utils import ( # noqa
discovery,
curdir,
get_constants,
write_progress
)
|
# set the TRAINING_PATH to the location of the training data relative
# to the 'data' directory
# by default, evaluates to <DATA_TYPE>/training
TRAINING_PATH = os.path.join(DATA_TYPE, 'training')
DATA_DIRECTORY = os.path.abspath(os.path.join(curdir, '..', 'data'))
TRAINING_DIRECTORY = os.path.join(DATA_DIRECTORY, TRAINING_PATH)
LOG_FILE_PATH = os.path.join(DATA_DIRECTORY, 'training_upload.log')
logging.basicConfig(filename=LOG_FILE_PATH,
filemode='w',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def upload_training_doc(training_json, environment_id, collection_id):
try:
r = discovery.add_training_data_query(
environment_id=environment_id,
collection_id=collection_id,
natural_language_query=training_json['natural_language_query'],
examples=training_json['examples'])
logging.info("Response:\n%s", json.dumps(r, indent=4))
except WatsonException as exception:
logging.error(exception)
def upload_training_data(training_directory):
print("Training directory: %s" % training_directory)
files = glob.glob(os.path.join(training_directory, '*.json'))
total_files = len(files)
print("Number of files to process: %d" % total_files)
training_data_uploaded = 0
done_percent = 0
write_progress(training_data_uploaded, total_files)
for file in files:
with open(file, 'rb') as file_object:
logging.info("Processing file: %s", file_object.name)
upload_training_doc(
json.loads(file_object.read()),
discovery_constants['environment_id'],
discovery_constants['collection_id']['trained']
)
training_data_uploaded += 1
done_percent = write_progress(training_data_uploaded,
total_files,
done_percent)
logging.info("Finished uploading %d files", total_files)
print("\nFinished uploading %d files" % total_files)
print('Retrieving environment and collection constants...')
"""
retrieve the following:
{
environment_id: env_id,
collection_id: {
trained: trained_id
}
}
"""
discovery_constants = get_constants(
discovery,
trained_name=os.getenv(
'DISCOVERY_TRAINED_COLLECTION_NAME',
'knowledge_base_trained'
)
)
print('Constants retrieved!')
print(discovery_constants)
print("Log file located at: %s" % LOG_FILE_PATH)
upload_training_data(TRAINING_DIRECTORY) | # set the DATA_TYPE the same to what was downloaded
DATA_TYPE = 'travel' | random_line_split |
upload_training_data.py | import json
import os
import glob
import sys
import logging
from watson_developer_cloud import WatsonException
if '__file__' in globals():
sys.path.insert(0, os.path.join(os.path.abspath(__file__), 'scripts'))
else:
sys.path.insert(0, os.path.join(os.path.abspath(os.getcwd()), 'scripts'))
from discovery_setup_utils import ( # noqa
discovery,
curdir,
get_constants,
write_progress
)
# set the DATA_TYPE the same to what was downloaded
DATA_TYPE = 'travel'
# set the TRAINING_PATH to the location of the training data relative
# to the 'data' directory
# by default, evaluates to <DATA_TYPE>/training
TRAINING_PATH = os.path.join(DATA_TYPE, 'training')
DATA_DIRECTORY = os.path.abspath(os.path.join(curdir, '..', 'data'))
TRAINING_DIRECTORY = os.path.join(DATA_DIRECTORY, TRAINING_PATH)
LOG_FILE_PATH = os.path.join(DATA_DIRECTORY, 'training_upload.log')
logging.basicConfig(filename=LOG_FILE_PATH,
filemode='w',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def upload_training_doc(training_json, environment_id, collection_id):
try:
r = discovery.add_training_data_query(
environment_id=environment_id,
collection_id=collection_id,
natural_language_query=training_json['natural_language_query'],
examples=training_json['examples'])
logging.info("Response:\n%s", json.dumps(r, indent=4))
except WatsonException as exception:
logging.error(exception)
def | (training_directory):
print("Training directory: %s" % training_directory)
files = glob.glob(os.path.join(training_directory, '*.json'))
total_files = len(files)
print("Number of files to process: %d" % total_files)
training_data_uploaded = 0
done_percent = 0
write_progress(training_data_uploaded, total_files)
for file in files:
with open(file, 'rb') as file_object:
logging.info("Processing file: %s", file_object.name)
upload_training_doc(
json.loads(file_object.read()),
discovery_constants['environment_id'],
discovery_constants['collection_id']['trained']
)
training_data_uploaded += 1
done_percent = write_progress(training_data_uploaded,
total_files,
done_percent)
logging.info("Finished uploading %d files", total_files)
print("\nFinished uploading %d files" % total_files)
print('Retrieving environment and collection constants...')
"""
retrieve the following:
{
environment_id: env_id,
collection_id: {
trained: trained_id
}
}
"""
discovery_constants = get_constants(
discovery,
trained_name=os.getenv(
'DISCOVERY_TRAINED_COLLECTION_NAME',
'knowledge_base_trained'
)
)
print('Constants retrieved!')
print(discovery_constants)
print("Log file located at: %s" % LOG_FILE_PATH)
upload_training_data(TRAINING_DIRECTORY)
| upload_training_data | identifier_name |
upload_training_data.py | import json
import os
import glob
import sys
import logging
from watson_developer_cloud import WatsonException
if '__file__' in globals():
sys.path.insert(0, os.path.join(os.path.abspath(__file__), 'scripts'))
else:
|
from discovery_setup_utils import ( # noqa
discovery,
curdir,
get_constants,
write_progress
)
# set the DATA_TYPE the same to what was downloaded
DATA_TYPE = 'travel'
# set the TRAINING_PATH to the location of the training data relative
# to the 'data' directory
# by default, evaluates to <DATA_TYPE>/training
TRAINING_PATH = os.path.join(DATA_TYPE, 'training')
DATA_DIRECTORY = os.path.abspath(os.path.join(curdir, '..', 'data'))
TRAINING_DIRECTORY = os.path.join(DATA_DIRECTORY, TRAINING_PATH)
LOG_FILE_PATH = os.path.join(DATA_DIRECTORY, 'training_upload.log')
logging.basicConfig(filename=LOG_FILE_PATH,
filemode='w',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def upload_training_doc(training_json, environment_id, collection_id):
try:
r = discovery.add_training_data_query(
environment_id=environment_id,
collection_id=collection_id,
natural_language_query=training_json['natural_language_query'],
examples=training_json['examples'])
logging.info("Response:\n%s", json.dumps(r, indent=4))
except WatsonException as exception:
logging.error(exception)
def upload_training_data(training_directory):
print("Training directory: %s" % training_directory)
files = glob.glob(os.path.join(training_directory, '*.json'))
total_files = len(files)
print("Number of files to process: %d" % total_files)
training_data_uploaded = 0
done_percent = 0
write_progress(training_data_uploaded, total_files)
for file in files:
with open(file, 'rb') as file_object:
logging.info("Processing file: %s", file_object.name)
upload_training_doc(
json.loads(file_object.read()),
discovery_constants['environment_id'],
discovery_constants['collection_id']['trained']
)
training_data_uploaded += 1
done_percent = write_progress(training_data_uploaded,
total_files,
done_percent)
logging.info("Finished uploading %d files", total_files)
print("\nFinished uploading %d files" % total_files)
print('Retrieving environment and collection constants...')
"""
retrieve the following:
{
environment_id: env_id,
collection_id: {
trained: trained_id
}
}
"""
discovery_constants = get_constants(
discovery,
trained_name=os.getenv(
'DISCOVERY_TRAINED_COLLECTION_NAME',
'knowledge_base_trained'
)
)
print('Constants retrieved!')
print(discovery_constants)
print("Log file located at: %s" % LOG_FILE_PATH)
upload_training_data(TRAINING_DIRECTORY)
| sys.path.insert(0, os.path.join(os.path.abspath(os.getcwd()), 'scripts')) | conditional_block |
upload_training_data.py | import json
import os
import glob
import sys
import logging
from watson_developer_cloud import WatsonException
if '__file__' in globals():
sys.path.insert(0, os.path.join(os.path.abspath(__file__), 'scripts'))
else:
sys.path.insert(0, os.path.join(os.path.abspath(os.getcwd()), 'scripts'))
from discovery_setup_utils import ( # noqa
discovery,
curdir,
get_constants,
write_progress
)
# set the DATA_TYPE the same to what was downloaded
DATA_TYPE = 'travel'
# set the TRAINING_PATH to the location of the training data relative
# to the 'data' directory
# by default, evaluates to <DATA_TYPE>/training
TRAINING_PATH = os.path.join(DATA_TYPE, 'training')
DATA_DIRECTORY = os.path.abspath(os.path.join(curdir, '..', 'data'))
TRAINING_DIRECTORY = os.path.join(DATA_DIRECTORY, TRAINING_PATH)
LOG_FILE_PATH = os.path.join(DATA_DIRECTORY, 'training_upload.log')
logging.basicConfig(filename=LOG_FILE_PATH,
filemode='w',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def upload_training_doc(training_json, environment_id, collection_id):
try:
r = discovery.add_training_data_query(
environment_id=environment_id,
collection_id=collection_id,
natural_language_query=training_json['natural_language_query'],
examples=training_json['examples'])
logging.info("Response:\n%s", json.dumps(r, indent=4))
except WatsonException as exception:
logging.error(exception)
def upload_training_data(training_directory):
|
print('Retrieving environment and collection constants...')
"""
retrieve the following:
{
environment_id: env_id,
collection_id: {
trained: trained_id
}
}
"""
discovery_constants = get_constants(
discovery,
trained_name=os.getenv(
'DISCOVERY_TRAINED_COLLECTION_NAME',
'knowledge_base_trained'
)
)
print('Constants retrieved!')
print(discovery_constants)
print("Log file located at: %s" % LOG_FILE_PATH)
upload_training_data(TRAINING_DIRECTORY)
| print("Training directory: %s" % training_directory)
files = glob.glob(os.path.join(training_directory, '*.json'))
total_files = len(files)
print("Number of files to process: %d" % total_files)
training_data_uploaded = 0
done_percent = 0
write_progress(training_data_uploaded, total_files)
for file in files:
with open(file, 'rb') as file_object:
logging.info("Processing file: %s", file_object.name)
upload_training_doc(
json.loads(file_object.read()),
discovery_constants['environment_id'],
discovery_constants['collection_id']['trained']
)
training_data_uploaded += 1
done_percent = write_progress(training_data_uploaded,
total_files,
done_percent)
logging.info("Finished uploading %d files", total_files)
print("\nFinished uploading %d files" % total_files) | identifier_body |
test_compute_code.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import unittest
from bs4 import BeautifulSoup
from compute.code import CodeExtractor
logging.basicConfig(level=logging.INFO, format="%(message)s")
class ExtractCodeTest(unittest.TestCase):
def setUp(self):
self.code_extractor = CodeExtractor()
def _extract_code(self, document):
return self.code_extractor.extract(document)
def _make_document_with_body(self, body):
return BeautifulSoup('\n'.join([
"<html>",
" <body>",
body,
" </body>",
"</html>",
]), 'html.parser')
def test_extract_valid_javascript(self):
document = self._make_document_with_body("<code>var i = 0;</code")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0;")
def test_extract_valid_javascript_with_padding(self):
# In the past, some parsers I have used have had trouble parsing with whitespace
# surrounding the parsed content. This is a sanity test to make sure that the
# backend parser will still detect JavaScript padded with whitespace.
document = self._make_document_with_body("<code>\n\n\t var i = 0;\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], "\n\n\t var i = 0;\t \n")
def test_extract_valid_multiline_javascript(self):
document = self._make_document_with_body('\n'.join([
"<code>for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], '\n'.join([
"for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}",
]))
def test_extract_multiple_blocks(self):
document = self._make_document_with_body('\n'.join([
"<code>var i = 0;</code>",
"<code>i = i + 1;</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 2)
self.assertIn("var i = 0;", snippets)
self.assertIn("i = i + 1;", snippets)
def test_fail_to_detect_text_in_code_block(self):
document = self._make_document_with_body("<code>This is a plain English sentence.</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_fail_to_detect_command_line(self):
|
def test_skip_whitespace_only(self):
document = self._make_document_with_body("<code>\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
# In practice I don't expect the next two scenarios to come up. But the expected behavior of
# the code extractor is to scan children of all nodes that are marked as invalid. This
# test makes sure that functionality is correct.
def test_skip_child_of_code_block_parent(self):
document = self._make_document_with_body('\n'.join([
"<code>",
"var outer = 0;",
"<code>var inner = 1;</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], '\n'.join([
"",
"var outer = 0;",
"var inner = 1;",
"",
]))
def test_detect_code_block_nested_inside_invalid_code_block(self):
document = self._make_document_with_body('\n'.join([
"<code>",
" This plaintext invalidates this block as a whole.",
" <code>var i = 0; // But this child will be valid</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0; // But this child will be valid")
| document = self._make_document_with_body("<code>npm install package</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0) | identifier_body |
test_compute_code.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import unittest
from bs4 import BeautifulSoup
from compute.code import CodeExtractor
logging.basicConfig(level=logging.INFO, format="%(message)s")
class ExtractCodeTest(unittest.TestCase):
def setUp(self):
self.code_extractor = CodeExtractor()
def _extract_code(self, document):
return self.code_extractor.extract(document)
def _make_document_with_body(self, body):
return BeautifulSoup('\n'.join([
"<html>",
" <body>",
body,
" </body>",
"</html>",
]), 'html.parser')
def test_extract_valid_javascript(self):
document = self._make_document_with_body("<code>var i = 0;</code")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0;")
def test_extract_valid_javascript_with_padding(self):
# In the past, some parsers I have used have had trouble parsing with whitespace
# surrounding the parsed content. This is a sanity test to make sure that the
# backend parser will still detect JavaScript padded with whitespace.
document = self._make_document_with_body("<code>\n\n\t var i = 0;\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], "\n\n\t var i = 0;\t \n")
def test_extract_valid_multiline_javascript(self):
document = self._make_document_with_body('\n'.join([
"<code>for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], '\n'.join([
"for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}", | ]))
def test_extract_multiple_blocks(self):
document = self._make_document_with_body('\n'.join([
"<code>var i = 0;</code>",
"<code>i = i + 1;</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 2)
self.assertIn("var i = 0;", snippets)
self.assertIn("i = i + 1;", snippets)
def test_fail_to_detect_text_in_code_block(self):
document = self._make_document_with_body("<code>This is a plain English sentence.</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_fail_to_detect_command_line(self):
document = self._make_document_with_body("<code>npm install package</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_skip_whitespace_only(self):
document = self._make_document_with_body("<code>\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
# In practice I don't expect the next two scenarios to come up. But the expected behavior of
# the code extractor is to scan children of all nodes that are marked as invalid. This
# test makes sure that functionality is correct.
def test_skip_child_of_code_block_parent(self):
document = self._make_document_with_body('\n'.join([
"<code>",
"var outer = 0;",
"<code>var inner = 1;</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], '\n'.join([
"",
"var outer = 0;",
"var inner = 1;",
"",
]))
def test_detect_code_block_nested_inside_invalid_code_block(self):
document = self._make_document_with_body('\n'.join([
"<code>",
" This plaintext invalidates this block as a whole.",
" <code>var i = 0; // But this child will be valid</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0; // But this child will be valid") | random_line_split | |
test_compute_code.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import unittest
from bs4 import BeautifulSoup
from compute.code import CodeExtractor
logging.basicConfig(level=logging.INFO, format="%(message)s")
class ExtractCodeTest(unittest.TestCase):
def setUp(self):
self.code_extractor = CodeExtractor()
def _extract_code(self, document):
return self.code_extractor.extract(document)
def _make_document_with_body(self, body):
return BeautifulSoup('\n'.join([
"<html>",
" <body>",
body,
" </body>",
"</html>",
]), 'html.parser')
def test_extract_valid_javascript(self):
document = self._make_document_with_body("<code>var i = 0;</code")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0;")
def | (self):
# In the past, some parsers I have used have had trouble parsing with whitespace
# surrounding the parsed content. This is a sanity test to make sure that the
# backend parser will still detect JavaScript padded with whitespace.
document = self._make_document_with_body("<code>\n\n\t var i = 0;\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], "\n\n\t var i = 0;\t \n")
def test_extract_valid_multiline_javascript(self):
document = self._make_document_with_body('\n'.join([
"<code>for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], '\n'.join([
"for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}",
]))
def test_extract_multiple_blocks(self):
document = self._make_document_with_body('\n'.join([
"<code>var i = 0;</code>",
"<code>i = i + 1;</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 2)
self.assertIn("var i = 0;", snippets)
self.assertIn("i = i + 1;", snippets)
def test_fail_to_detect_text_in_code_block(self):
document = self._make_document_with_body("<code>This is a plain English sentence.</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_fail_to_detect_command_line(self):
document = self._make_document_with_body("<code>npm install package</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_skip_whitespace_only(self):
document = self._make_document_with_body("<code>\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
# In practice I don't expect the next two scenarios to come up. But the expected behavior of
# the code extractor is to scan children of all nodes that are marked as invalid. This
# test makes sure that functionality is correct.
def test_skip_child_of_code_block_parent(self):
document = self._make_document_with_body('\n'.join([
"<code>",
"var outer = 0;",
"<code>var inner = 1;</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], '\n'.join([
"",
"var outer = 0;",
"var inner = 1;",
"",
]))
def test_detect_code_block_nested_inside_invalid_code_block(self):
document = self._make_document_with_body('\n'.join([
"<code>",
" This plaintext invalidates this block as a whole.",
" <code>var i = 0; // But this child will be valid</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0; // But this child will be valid")
| test_extract_valid_javascript_with_padding | identifier_name |
minimatch.js | module.exports = minimatch
minimatch.Minimatch = Minimatch
var path = { sep: '/' }
try {
path = require('path')
} catch (er) {}
var GLOBSTAR = minimatch.GLOBSTAR = Minimatch.GLOBSTAR = {}
var expand = require('brace-expansion')
// any single thing other than /
// don't need to escape / when using new RegExp()
var qmark = '[^/]'
// * => any number of characters
var star = qmark + '*?'
// ** when dots are allowed. Anything goes, except .. and .
// not (^ or / followed by one or two dots followed by $ or /),
// followed by anything, any number of times.
var twoStarDot = '(?:(?!(?:\\\/|^)(?:\\.{1,2})($|\\\/)).)*?'
// not a ^ or / followed by a dot,
// followed by anything, any number of times.
var twoStarNoDot = '(?:(?!(?:\\\/|^)\\.).)*?'
// characters that need to be escaped in RegExp.
var reSpecials = charSet('().*{}+?[]^$\\!')
// "abc" -> { a:true, b:true, c:true }
function charSet (s) |
// normalizes slashes.
var slashSplit = /\/+/
minimatch.filter = filter
function filter (pattern, options) {
options = options || {}
return function (p, i, list) {
return minimatch(p, pattern, options)
}
}
function ext (a, b) {
a = a || {}
b = b || {}
var t = {}
Object.keys(b).forEach(function (k) {
t[k] = b[k]
})
Object.keys(a).forEach(function (k) {
t[k] = a[k]
})
return t
}
minimatch.defaults = function (def) {
if (!def || !Object.keys(def).length) return minimatch
var orig = minimatch
var m = function minimatch (p, pattern, options) {
return orig.minimatch(p, pattern, ext(def, options))
}
m.Minimatch = function Minimatch (pattern, options) {
return new orig.Minimatch(pattern, ext(def, options))
}
return m
}
Minimatch.defaults = function (def) {
if (!def || !Object.keys(def).length) return Minimatch
return minimatch.defaults(def).Minimatch
}
function minimatch (p, pattern, options) {
if (typeof pattern !== 'string') {
throw new TypeError('glob pattern string required')
}
if (!options) options = {}
// shortcut: comments match nothing.
if (!options.nocomment && pattern.charAt(0) === '#') {
return false
}
// "" only matches ""
if (pattern.trim() === '') return p === ''
return new Minimatch(pattern, options).match(p)
}
function Minimatch (pattern, options) {
if (!(this instanceof Minimatch)) {
return new Minimatch(pattern, options)
}
if (typeof pattern !== 'string') {
throw new TypeError('glob pattern string required')
}
if (!options) options = {}
pattern = pattern.trim()
// windows support: need to use /, not \
if (path.sep !== '/') {
pattern = pattern.split(path.sep).join('/')
}
this.options = options
this.set = []
this.pattern = pattern
this.regexp = null
this.negate = false
this.comment = false
this.empty = false
// make the set of regexps etc.
this.make()
}
Minimatch.prototype.debug = function () {}
Minimatch.prototype.make = make
function make () {
// don't do it more than once.
if (this._made) return
var pattern = this.pattern
var options = this.options
// empty patterns and comments match nothing.
if (!options.nocomment && pattern.charAt(0) === '#') {
this.comment = true
return
}
if (!pattern) {
this.empty = true
return
}
// step 1: figure out negation, etc.
this.parseNegate()
// step 2: expand braces
var set = this.globSet = this.braceExpand()
if (options.debug) this.debug = console.error
this.debug(this.pattern, set)
// step 3: now we have a set, so turn each one into a series of path-portion
// matching patterns.
// These will be regexps, except in the case of "**", which is
// set to the GLOBSTAR object for globstar behavior,
// and will not contain any / characters
set = this.globParts = set.map(function (s) {
return s.split(slashSplit)
})
this.debug(this.pattern, set)
// glob --> regexps
set = set.map(function (s, si, set) {
return s.map(this.parse, this)
}, this)
this.debug(this.pattern, set)
// filter out everything that didn't compile properly.
set = set.filter(function (s) {
return s.indexOf(false) === -1
})
this.debug(this.pattern, set)
this.set = set
}
Minimatch.prototype.parseNegate = parseNegate
function parseNegate () {
var pattern = this.pattern
var negate = false
var options = this.options
var negateOffset = 0
if (options.nonegate) return
for (var i = 0, l = pattern.length
; i < l && pattern.charAt(i) === '!'
; i++) {
negate = !negate
negateOffset++
}
if (negateOffset) this.pattern = pattern.substr(negateOffset)
this.negate = negate
}
// Brace expansion:
// a{b,c}d -> abd acd
// a{b,}c -> abc ac
// a{0..3}d -> a0d a1d a2d a3d
// a{b,c{d,e}f}g -> abg acdfg acefg
// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg
//
// Invalid sets are not expanded.
// a{2..}b -> a{2..}b
// a{b}c -> a{b}c
minimatch.braceExpand = function (pattern, options) {
return braceExpand(pattern, options)
}
Minimatch.prototype.braceExpand = braceExpand
function braceExpand (pattern, options) {
if (!options) {
if (this instanceof Minimatch) {
options = this.options
} else {
options = {}
}
}
pattern = typeof pattern === 'undefined'
? this.pattern : pattern
if (typeof pattern === 'undefined') {
throw new Error('undefined pattern')
}
if (options.nobrace ||
!pattern.match(/\{.*\}/)) {
// shortcut. no need to expand.
return [pattern]
}
return expand(pattern)
}
// parse a component of the expanded set.
// At this point, no pattern may contain "/" in it
// so we're going to return a 2d array, where each entry is the full
// pattern, split on '/', and then turned into a regular expression.
// A regexp is made at the end which joins each array with an
// escaped /, and another full one which joins each regexp with |.
//
// Following the lead of Bash 4.1, note that "**" only has special meaning
// when it is the *only* thing in a path portion. Otherwise, any series
// of * is equivalent to a single *. Globstar behavior is enabled by
// default, and can be disabled by setting options.noglobstar.
Minimatch.prototype.parse = parse
var SUBPARSE = {}
function parse (pattern, isSub) {
var options = this.options
// shortcuts
if (!options.noglobstar && pattern === '**') return GLOBSTAR
if (pattern === '') return ''
var re = ''
var hasMagic = !!options.nocase
var escaping = false
// ? => one single character
var patternListStack = []
var negativeLists = []
var plType
var stateChar
var inClass = false
var reClassStart = -1
var classStart = -1
// . and .. never match anything that doesn't start with .,
// even when options.dot is set.
var patternStart = pattern.charAt(0) === '.' ? '' // anything
// not (start or / followed by . or .. followed by / or end)
: options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))'
: '(?!\\.)'
var self = this
function clearStateChar () {
if (stateChar) {
// we had some state-tracking character
// that wasn't consumed by this pass.
switch (stateChar) {
case '*':
re += star
hasMagic = true
break
case '?':
re += qmark
hasMagic = true
break
default:
re += '\\' + stateChar
break
}
self.debug('clearStateChar %j %j', stateChar, re)
stateChar = false
}
}
for (var i = 0, len = pattern.length, c
; (i < len) && (c = pattern.charAt(i))
; i++) {
this.debug('%s\t%s %s %j', pattern, i, re, c)
// skip over any that are escaped.
if (escaping && reSpecials[c]) {
re += '\\' + c
escaping = false
continue
}
switch (c) {
case '/':
// completely not allowed, even escaped.
// Should already be path-split by now.
return false
case '\\':
clearStateChar()
escaping = true
continue
// the various stateChar values
// for the "extglob" stuff.
case '?':
case '*':
case '+':
case '@':
case '!':
this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c)
// all of those are literals inside a class, except that
// the glob [!a] means [^a] in regexp
if (inClass) {
this.debug(' in class')
if (c === '!' && i === classStart + 1) c = '^'
re += c
continue
}
// if we already have a stateChar, then it means
// that there was something like ** or +? in there.
// Handle the stateChar, then proceed with this one.
self.debug('call clearStateChar %j', stateChar)
clearStateChar()
stateChar = c
// if extglob is disabled, then +(asdf|foo) isn't a thing.
// just clear the statechar *now*, rather than even diving into
// the patternList stuff.
if (options.noext) clearStateChar()
continue
case '(':
if (inClass) {
re += '('
continue
}
if (!stateChar) {
re += '\\('
continue
}
plType = stateChar
patternListStack.push({
type: plType,
start: i - 1,
reStart: re.length
})
// negation is (?:(?!js)[^/]*)
re += stateChar === '!' ? '(?:(?!(?:' : '(?:'
this.debug('plType %j %j', stateChar, re)
stateChar = false
continue
case ')':
if (inClass || !patternListStack.length) {
re += '\\)'
continue
}
clearStateChar()
hasMagic = true
re += ')'
var pl = patternListStack.pop()
plType = pl.type
// negation is (?:(?!js)[^/]*)
// The others are (?:<pattern>)<type>
switch (plType) {
case '!':
negativeLists.push(pl)
re += ')[^/]*?)'
pl.reEnd = re.length
break
case '?':
case '+':
case '*':
re += plType
break
case '@': break // the default anyway
}
continue
case '|':
if (inClass || !patternListStack.length || escaping) {
re += '\\|'
escaping = false
continue
}
clearStateChar()
re += '|'
continue
// these are mostly the same in regexp and glob
case '[':
// swallow any state-tracking char before the [
clearStateChar()
if (inClass) {
re += '\\' + c
continue
}
inClass = true
classStart = i
reClassStart = re.length
re += c
continue
case ']':
// a right bracket shall lose its special
// meaning and represent itself in
// a bracket expression if it occurs
// first in the list. -- POSIX.2 2.8.3.2
if (i === classStart + 1 || !inClass) {
re += '\\' + c
escaping = false
continue
}
// handle the case where we left a class open.
// "[z-a]" is valid, equivalent to "\[z-a\]"
if (inClass) {
// split where the last [ was, make sure we don't have
// an invalid re. if so, re-walk the contents of the
// would-be class to re-translate any characters that
// were passed through as-is
// TODO: It would probably be faster to determine this
// without a try/catch and a new RegExp, but it's tricky
// to do safely. For now, this is safe and works.
var cs = pattern.substring(classStart + 1, i)
try {
RegExp('[' + cs + ']')
} catch (er) {
// not a valid class!
var sp = this.parse(cs, SUBPARSE)
re = re.substr(0, reClassStart) + '\\[' + sp[0] + '\\]'
hasMagic = hasMagic || sp[1]
inClass = false
continue
}
}
// finish up the class.
hasMagic = true
inClass = false
re += c
continue
default:
// swallow any state char that wasn't consumed
clearStateChar()
if (escaping) {
// no need
escaping = false
} else if (reSpecials[c]
&& !(c === '^' && inClass)) {
re += '\\'
}
re += c
} // switch
} // for
// handle the case where we left a class open.
// "[abc" is valid, equivalent to "\[abc"
if (inClass) {
// split where the last [ was, and escape it
// this is a huge pita. We now have to re-walk
// the contents of the would-be class to re-translate
// any characters that were passed through as-is
cs = pattern.substr(classStart + 1)
sp = this.parse(cs, SUBPARSE)
re = re.substr(0, reClassStart) + '\\[' + sp[0]
hasMagic = hasMagic || sp[1]
}
// handle the case where we had a +( thing at the *end*
// of the pattern.
// each pattern list stack adds 3 chars, and we need to go through
// and escape any | chars that were passed through as-is for the regexp.
// Go through and escape them, taking care not to double-escape any
// | chars that were already escaped.
for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) {
var tail = re.slice(pl.reStart + 3)
// maybe some even number of \, then maybe 1 \, followed by a |
tail = tail.replace(/((?:\\{2})*)(\\?)\|/g, function (_, $1, $2) {
if (!$2) {
// the | isn't already escaped, so escape it.
$2 = '\\'
}
// need to escape all those slashes *again*, without escaping the
// one that we need for escaping the | character. As it works out,
// escaping an even number of slashes can be done by simply repeating
// it exactly after itself. That's why this trick works.
//
// I am sorry that you have to see this.
return $1 + $1 + $2 + '|'
})
this.debug('tail=%j\n %s', tail, tail)
var t = pl.type === '*' ? star
: pl.type === '?' ? qmark
: '\\' + pl.type
hasMagic = true
re = re.slice(0, pl.reStart) + t + '\\(' + tail
}
// handle trailing things that only matter at the very end.
clearStateChar()
if (escaping) {
// trailing \\
re += '\\\\'
}
// only need to apply the nodot start if the re starts with
// something that could conceivably capture a dot
var addPatternStart = false
switch (re.charAt(0)) {
case '.':
case '[':
case '(': addPatternStart = true
}
// Hack to work around lack of negative lookbehind in JS
// A pattern like: *.!(x).!(y|z) needs to ensure that a name
// like 'a.xyz.yz' doesn't match. So, the first negative
// lookahead, has to look ALL the way ahead, to the end of
// the pattern.
for (var n = negativeLists.length - 1; n > -1; n--) {
var nl = negativeLists[n]
var nlBefore = re.slice(0, nl.reStart)
var nlFirst = re.slice(nl.reStart, nl.reEnd - 8)
var nlLast = re.slice(nl.reEnd - 8, nl.reEnd)
var nlAfter = re.slice(nl.reEnd)
nlLast += nlAfter
// Handle nested stuff like *(*.js|!(*.json)), where open parens
// mean that we should *not* include the ) in the bit that is considered
// "after" the negated section.
var openParensBefore = nlBefore.split('(').length - 1
var cleanAfter = nlAfter
for (i = 0; i < openParensBefore; i++) {
cleanAfter = cleanAfter.replace(/\)[+*?]?/, '')
}
nlAfter = cleanAfter
var dollar = ''
if (nlAfter === '' && isSub !== SUBPARSE) {
dollar = '$'
}
var newRe = nlBefore + nlFirst + nlAfter + dollar + nlLast
re = newRe
}
// if the re is not "" at this point, then we need to make sure
// it doesn't match against an empty path part.
// Otherwise a/* will match a/, which it should not.
if (re !== '' && hasMagic) {
re = '(?=.)' + re
}
if (addPatternStart) {
re = patternStart + re
}
// parsing just a piece of a larger pattern.
if (isSub === SUBPARSE) {
return [re, hasMagic]
}
// skip the regexp for non-magical patterns
// unescape anything in it, though, so that it'll be
// an exact match against a file etc.
if (!hasMagic) {
return globUnescape(pattern)
}
var flags = options.nocase ? 'i' : ''
var regExp = new RegExp('^' + re + '$', flags)
regExp._glob = pattern
regExp._src = re
return regExp
}
minimatch.makeRe = function (pattern, options) {
return new Minimatch(pattern, options || {}).makeRe()
}
Minimatch.prototype.makeRe = makeRe
function makeRe () {
if (this.regexp || this.regexp === false) return this.regexp
// at this point, this.set is a 2d array of partial
// pattern strings, or "**".
//
// It's better to use .match(). This function shouldn't
// be used, really, but it's pretty convenient sometimes,
// when you just want to work with a regex.
var set = this.set
if (!set.length) {
this.regexp = false
return this.regexp
}
var options = this.options
var twoStar = options.noglobstar ? star
: options.dot ? twoStarDot
: twoStarNoDot
var flags = options.nocase ? 'i' : ''
var re = set.map(function (pattern) {
return pattern.map(function (p) {
return (p === GLOBSTAR) ? twoStar
: (typeof p === 'string') ? regExpEscape(p)
: p._src
}).join('\\\/')
}).join('|')
// must match entire pattern
// ending in a * or ** will make it less strict.
re = '^(?:' + re + ')$'
// can match anything, as long as it's not this.
if (this.negate) re = '^(?!' + re + ').*$'
try {
this.regexp = new RegExp(re, flags)
} catch (ex) {
this.regexp = false
}
return this.regexp
}
minimatch.match = function (list, pattern, options) {
options = options || {}
var mm = new Minimatch(pattern, options)
list = list.filter(function (f) {
return mm.match(f)
})
if (mm.options.nonull && !list.length) {
list.push(pattern)
}
return list
}
Minimatch.prototype.match = match
function match (f, partial) {
this.debug('match', f, this.pattern)
// short-circuit in the case of busted things.
// comments, etc.
if (this.comment) return false
if (this.empty) return f === ''
if (f === '/' && partial) return true
var options = this.options
// windows: need to use /, not \
if (path.sep !== '/') {
f = f.split(path.sep).join('/')
}
// treat the test path as a set of pathparts.
f = f.split(slashSplit)
this.debug(this.pattern, 'split', f)
// just ONE of the pattern sets in this.set needs to match
// in order for it to be valid. If negating, then just one
// match means that we have failed.
// Either way, return on the first hit.
var set = this.set
this.debug(this.pattern, 'set', set)
// Find the basename of the path by looking for the last non-empty segment
var filename
var i
for (i = f.length - 1; i >= 0; i--) {
filename = f[i]
if (filename) break
}
for (i = 0; i < set.length; i++) {
var pattern = set[i]
var file = f
if (options.matchBase && pattern.length === 1) {
file = [filename]
}
var hit = this.matchOne(file, pattern, partial)
if (hit) {
if (options.flipNegate) return true
return !this.negate
}
}
// didn't get any hits. this is success if it's a negative
// pattern, failure otherwise.
if (options.flipNegate) return false
return this.negate
}
// set partial to true to test if, for example,
// "/a/b" matches the start of "/*/b/*/d"
// Partial means, if you run out of file before you run
// out of pattern, then that's fine, as long as all
// the parts match.
Minimatch.prototype.matchOne = function (file, pattern, partial) {
var options = this.options
this.debug('matchOne',
{ 'this': this, file: file, pattern: pattern })
this.debug('matchOne', file.length, pattern.length)
for (var fi = 0,
pi = 0,
fl = file.length,
pl = pattern.length
; (fi < fl) && (pi < pl)
; fi++, pi++) {
this.debug('matchOne loop')
var p = pattern[pi]
var f = file[fi]
this.debug(pattern, p, f)
// should be impossible.
// some invalid regexp stuff in the set.
if (p === false) return false
if (p === GLOBSTAR) {
this.debug('GLOBSTAR', [pattern, p, f])
// "**"
// a/**/b/**/c would match the following:
// a/b/x/y/z/c
// a/x/y/z/b/c
// a/b/x/b/x/c
// a/b/c
// To do this, take the rest of the pattern after
// the **, and see if it would match the file remainder.
// If so, return success.
// If not, the ** "swallows" a segment, and try again.
// This is recursively awful.
//
// a/**/b/**/c matching a/b/x/y/z/c
// - a matches a
// - doublestar
// - matchOne(b/x/y/z/c, b/**/c)
// - b matches b
// - doublestar
// - matchOne(x/y/z/c, c) -> no
// - matchOne(y/z/c, c) -> no
// - matchOne(z/c, c) -> no
// - matchOne(c, c) yes, hit
var fr = fi
var pr = pi + 1
if (pr === pl) {
this.debug('** at the end')
// a ** at the end will just swallow the rest.
// We have found a match.
// however, it will not swallow /.x, unless
// options.dot is set.
// . and .. are *never* matched by **, for explosively
// exponential reasons.
for (; fi < fl; fi++) {
if (file[fi] === '.' || file[fi] === '..' ||
(!options.dot && file[fi].charAt(0) === '.')) return false
}
return true
}
// ok, let's see if we can swallow whatever we can.
while (fr < fl) {
var swallowee = file[fr]
this.debug('\nglobstar while', file, fr, pattern, pr, swallowee)
// XXX remove this slice. Just pass the start index.
if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) {
this.debug('globstar found match!', fr, fl, swallowee)
// found a match.
return true
} else {
// can't swallow "." or ".." ever.
// can only swallow ".foo" when explicitly asked.
if (swallowee === '.' || swallowee === '..' ||
(!options.dot && swallowee.charAt(0) === '.')) {
this.debug('dot detected!', file, fr, pattern, pr)
break
}
// ** swallows a segment, and continue.
this.debug('globstar swallow a segment, and continue')
fr++
}
}
// no match was found.
// However, in partial mode, we can't say this is necessarily over.
// If there's more *pattern* left, then
if (partial) {
// ran out of file
this.debug('\n>>> no match, partial?', file, fr, pattern, pr)
if (fr === fl) return true
}
return false
}
// something other than **
// non-magic patterns just have to match exactly
// patterns with magic have been turned into regexps.
var hit
if (typeof p === 'string') {
if (options.nocase) {
hit = f.toLowerCase() === p.toLowerCase()
} else {
hit = f === p
}
this.debug('string match', p, f, hit)
} else {
hit = f.match(p)
this.debug('pattern match', p, f, hit)
}
if (!hit) return false
}
// Note: ending in / means that we'll get a final ""
// at the end of the pattern. This can only match a
// corresponding "" at the end of the file.
// If the file ends in /, then it can only match a
// a pattern that ends in /, unless the pattern just
// doesn't have any more for it. But, a/b/ should *not*
// match "a/b/*", even though "" matches against the
// [^/]*? pattern, except in partial mode, where it might
// simply not be reached yet.
// However, a/b/ should still satisfy a/*
// now either we fell off the end of the pattern, or we're done.
if (fi === fl && pi === pl) {
// ran out of pattern and filename at the same time.
// an exact hit!
return true
} else if (fi === fl) {
// ran out of file, but still had pattern left.
// this is ok if we're doing the match as part of
// a glob fs traversal.
return partial
} else if (pi === pl) {
// ran out of pattern, still have file left.
// this is only acceptable if we're on the very last
// empty segment of a file with a trailing slash.
// a/* should match a/b/
var emptyFileEnd = (fi === fl - 1) && (file[fi] === '')
return emptyFileEnd
}
// should be unreachable.
throw new Error('wtf?')
}
// replace stuff like \* with *
function globUnescape (s) {
return s.replace(/\\(.)/g, '$1')
}
function regExpEscape (s) {
return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&')
}
| {
return s.split('').reduce(function (set, c) {
set[c] = true
return set
}, {})
} | identifier_body |
minimatch.js | module.exports = minimatch
minimatch.Minimatch = Minimatch
var path = { sep: '/' }
try {
path = require('path')
} catch (er) {}
var GLOBSTAR = minimatch.GLOBSTAR = Minimatch.GLOBSTAR = {}
var expand = require('brace-expansion')
// any single thing other than /
// don't need to escape / when using new RegExp()
var qmark = '[^/]'
// * => any number of characters
var star = qmark + '*?'
// ** when dots are allowed. Anything goes, except .. and .
// not (^ or / followed by one or two dots followed by $ or /),
// followed by anything, any number of times.
var twoStarDot = '(?:(?!(?:\\\/|^)(?:\\.{1,2})($|\\\/)).)*?'
// not a ^ or / followed by a dot,
// followed by anything, any number of times.
var twoStarNoDot = '(?:(?!(?:\\\/|^)\\.).)*?'
// characters that need to be escaped in RegExp.
var reSpecials = charSet('().*{}+?[]^$\\!')
// "abc" -> { a:true, b:true, c:true }
function charSet (s) {
return s.split('').reduce(function (set, c) {
set[c] = true
return set
}, {})
}
// normalizes slashes.
var slashSplit = /\/+/
minimatch.filter = filter
function filter (pattern, options) {
options = options || {}
return function (p, i, list) {
return minimatch(p, pattern, options)
}
}
function ext (a, b) {
a = a || {}
b = b || {}
var t = {}
Object.keys(b).forEach(function (k) {
t[k] = b[k]
})
Object.keys(a).forEach(function (k) {
t[k] = a[k]
})
return t
}
minimatch.defaults = function (def) {
if (!def || !Object.keys(def).length) return minimatch
var orig = minimatch
var m = function minimatch (p, pattern, options) {
return orig.minimatch(p, pattern, ext(def, options))
}
m.Minimatch = function Minimatch (pattern, options) {
return new orig.Minimatch(pattern, ext(def, options))
}
return m
}
Minimatch.defaults = function (def) {
if (!def || !Object.keys(def).length) return Minimatch
return minimatch.defaults(def).Minimatch
}
function minimatch (p, pattern, options) {
if (typeof pattern !== 'string') {
throw new TypeError('glob pattern string required')
}
| return false
}
// "" only matches ""
if (pattern.trim() === '') return p === ''
return new Minimatch(pattern, options).match(p)
}
function Minimatch (pattern, options) {
if (!(this instanceof Minimatch)) {
return new Minimatch(pattern, options)
}
if (typeof pattern !== 'string') {
throw new TypeError('glob pattern string required')
}
if (!options) options = {}
pattern = pattern.trim()
// windows support: need to use /, not \
if (path.sep !== '/') {
pattern = pattern.split(path.sep).join('/')
}
this.options = options
this.set = []
this.pattern = pattern
this.regexp = null
this.negate = false
this.comment = false
this.empty = false
// make the set of regexps etc.
this.make()
}
Minimatch.prototype.debug = function () {}
Minimatch.prototype.make = make
function make () {
// don't do it more than once.
if (this._made) return
var pattern = this.pattern
var options = this.options
// empty patterns and comments match nothing.
if (!options.nocomment && pattern.charAt(0) === '#') {
this.comment = true
return
}
if (!pattern) {
this.empty = true
return
}
// step 1: figure out negation, etc.
this.parseNegate()
// step 2: expand braces
var set = this.globSet = this.braceExpand()
if (options.debug) this.debug = console.error
this.debug(this.pattern, set)
// step 3: now we have a set, so turn each one into a series of path-portion
// matching patterns.
// These will be regexps, except in the case of "**", which is
// set to the GLOBSTAR object for globstar behavior,
// and will not contain any / characters
set = this.globParts = set.map(function (s) {
return s.split(slashSplit)
})
this.debug(this.pattern, set)
// glob --> regexps
set = set.map(function (s, si, set) {
return s.map(this.parse, this)
}, this)
this.debug(this.pattern, set)
// filter out everything that didn't compile properly.
set = set.filter(function (s) {
return s.indexOf(false) === -1
})
this.debug(this.pattern, set)
this.set = set
}
Minimatch.prototype.parseNegate = parseNegate
function parseNegate () {
var pattern = this.pattern
var negate = false
var options = this.options
var negateOffset = 0
if (options.nonegate) return
for (var i = 0, l = pattern.length
; i < l && pattern.charAt(i) === '!'
; i++) {
negate = !negate
negateOffset++
}
if (negateOffset) this.pattern = pattern.substr(negateOffset)
this.negate = negate
}
// Brace expansion:
// a{b,c}d -> abd acd
// a{b,}c -> abc ac
// a{0..3}d -> a0d a1d a2d a3d
// a{b,c{d,e}f}g -> abg acdfg acefg
// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg
//
// Invalid sets are not expanded.
// a{2..}b -> a{2..}b
// a{b}c -> a{b}c
minimatch.braceExpand = function (pattern, options) {
return braceExpand(pattern, options)
}
Minimatch.prototype.braceExpand = braceExpand
function braceExpand (pattern, options) {
if (!options) {
if (this instanceof Minimatch) {
options = this.options
} else {
options = {}
}
}
pattern = typeof pattern === 'undefined'
? this.pattern : pattern
if (typeof pattern === 'undefined') {
throw new Error('undefined pattern')
}
if (options.nobrace ||
!pattern.match(/\{.*\}/)) {
// shortcut. no need to expand.
return [pattern]
}
return expand(pattern)
}
// parse a component of the expanded set.
// At this point, no pattern may contain "/" in it
// so we're going to return a 2d array, where each entry is the full
// pattern, split on '/', and then turned into a regular expression.
// A regexp is made at the end which joins each array with an
// escaped /, and another full one which joins each regexp with |.
//
// Following the lead of Bash 4.1, note that "**" only has special meaning
// when it is the *only* thing in a path portion. Otherwise, any series
// of * is equivalent to a single *. Globstar behavior is enabled by
// default, and can be disabled by setting options.noglobstar.
Minimatch.prototype.parse = parse
var SUBPARSE = {}
function parse (pattern, isSub) {
var options = this.options
// shortcuts
if (!options.noglobstar && pattern === '**') return GLOBSTAR
if (pattern === '') return ''
var re = ''
var hasMagic = !!options.nocase
var escaping = false
// ? => one single character
var patternListStack = []
var negativeLists = []
var plType
var stateChar
var inClass = false
var reClassStart = -1
var classStart = -1
// . and .. never match anything that doesn't start with .,
// even when options.dot is set.
var patternStart = pattern.charAt(0) === '.' ? '' // anything
// not (start or / followed by . or .. followed by / or end)
: options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))'
: '(?!\\.)'
var self = this
function clearStateChar () {
if (stateChar) {
// we had some state-tracking character
// that wasn't consumed by this pass.
switch (stateChar) {
case '*':
re += star
hasMagic = true
break
case '?':
re += qmark
hasMagic = true
break
default:
re += '\\' + stateChar
break
}
self.debug('clearStateChar %j %j', stateChar, re)
stateChar = false
}
}
for (var i = 0, len = pattern.length, c
; (i < len) && (c = pattern.charAt(i))
; i++) {
this.debug('%s\t%s %s %j', pattern, i, re, c)
// skip over any that are escaped.
if (escaping && reSpecials[c]) {
re += '\\' + c
escaping = false
continue
}
switch (c) {
case '/':
// completely not allowed, even escaped.
// Should already be path-split by now.
return false
case '\\':
clearStateChar()
escaping = true
continue
// the various stateChar values
// for the "extglob" stuff.
case '?':
case '*':
case '+':
case '@':
case '!':
this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c)
// all of those are literals inside a class, except that
// the glob [!a] means [^a] in regexp
if (inClass) {
this.debug(' in class')
if (c === '!' && i === classStart + 1) c = '^'
re += c
continue
}
// if we already have a stateChar, then it means
// that there was something like ** or +? in there.
// Handle the stateChar, then proceed with this one.
self.debug('call clearStateChar %j', stateChar)
clearStateChar()
stateChar = c
// if extglob is disabled, then +(asdf|foo) isn't a thing.
// just clear the statechar *now*, rather than even diving into
// the patternList stuff.
if (options.noext) clearStateChar()
continue
case '(':
if (inClass) {
re += '('
continue
}
if (!stateChar) {
re += '\\('
continue
}
plType = stateChar
patternListStack.push({
type: plType,
start: i - 1,
reStart: re.length
})
// negation is (?:(?!js)[^/]*)
re += stateChar === '!' ? '(?:(?!(?:' : '(?:'
this.debug('plType %j %j', stateChar, re)
stateChar = false
continue
case ')':
if (inClass || !patternListStack.length) {
re += '\\)'
continue
}
clearStateChar()
hasMagic = true
re += ')'
var pl = patternListStack.pop()
plType = pl.type
// negation is (?:(?!js)[^/]*)
// The others are (?:<pattern>)<type>
switch (plType) {
case '!':
negativeLists.push(pl)
re += ')[^/]*?)'
pl.reEnd = re.length
break
case '?':
case '+':
case '*':
re += plType
break
case '@': break // the default anyway
}
continue
case '|':
if (inClass || !patternListStack.length || escaping) {
re += '\\|'
escaping = false
continue
}
clearStateChar()
re += '|'
continue
// these are mostly the same in regexp and glob
case '[':
// swallow any state-tracking char before the [
clearStateChar()
if (inClass) {
re += '\\' + c
continue
}
inClass = true
classStart = i
reClassStart = re.length
re += c
continue
case ']':
// a right bracket shall lose its special
// meaning and represent itself in
// a bracket expression if it occurs
// first in the list. -- POSIX.2 2.8.3.2
if (i === classStart + 1 || !inClass) {
re += '\\' + c
escaping = false
continue
}
// handle the case where we left a class open.
// "[z-a]" is valid, equivalent to "\[z-a\]"
if (inClass) {
// split where the last [ was, make sure we don't have
// an invalid re. if so, re-walk the contents of the
// would-be class to re-translate any characters that
// were passed through as-is
// TODO: It would probably be faster to determine this
// without a try/catch and a new RegExp, but it's tricky
// to do safely. For now, this is safe and works.
var cs = pattern.substring(classStart + 1, i)
try {
RegExp('[' + cs + ']')
} catch (er) {
// not a valid class!
var sp = this.parse(cs, SUBPARSE)
re = re.substr(0, reClassStart) + '\\[' + sp[0] + '\\]'
hasMagic = hasMagic || sp[1]
inClass = false
continue
}
}
// finish up the class.
hasMagic = true
inClass = false
re += c
continue
default:
// swallow any state char that wasn't consumed
clearStateChar()
if (escaping) {
// no need
escaping = false
} else if (reSpecials[c]
&& !(c === '^' && inClass)) {
re += '\\'
}
re += c
} // switch
} // for
// handle the case where we left a class open.
// "[abc" is valid, equivalent to "\[abc"
if (inClass) {
// split where the last [ was, and escape it
// this is a huge pita. We now have to re-walk
// the contents of the would-be class to re-translate
// any characters that were passed through as-is
cs = pattern.substr(classStart + 1)
sp = this.parse(cs, SUBPARSE)
re = re.substr(0, reClassStart) + '\\[' + sp[0]
hasMagic = hasMagic || sp[1]
}
// handle the case where we had a +( thing at the *end*
// of the pattern.
// each pattern list stack adds 3 chars, and we need to go through
// and escape any | chars that were passed through as-is for the regexp.
// Go through and escape them, taking care not to double-escape any
// | chars that were already escaped.
for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) {
var tail = re.slice(pl.reStart + 3)
// maybe some even number of \, then maybe 1 \, followed by a |
tail = tail.replace(/((?:\\{2})*)(\\?)\|/g, function (_, $1, $2) {
if (!$2) {
// the | isn't already escaped, so escape it.
$2 = '\\'
}
// need to escape all those slashes *again*, without escaping the
// one that we need for escaping the | character. As it works out,
// escaping an even number of slashes can be done by simply repeating
// it exactly after itself. That's why this trick works.
//
// I am sorry that you have to see this.
return $1 + $1 + $2 + '|'
})
this.debug('tail=%j\n %s', tail, tail)
var t = pl.type === '*' ? star
: pl.type === '?' ? qmark
: '\\' + pl.type
hasMagic = true
re = re.slice(0, pl.reStart) + t + '\\(' + tail
}
// handle trailing things that only matter at the very end.
clearStateChar()
if (escaping) {
// trailing \\
re += '\\\\'
}
// only need to apply the nodot start if the re starts with
// something that could conceivably capture a dot
var addPatternStart = false
switch (re.charAt(0)) {
case '.':
case '[':
case '(': addPatternStart = true
}
// Hack to work around lack of negative lookbehind in JS
// A pattern like: *.!(x).!(y|z) needs to ensure that a name
// like 'a.xyz.yz' doesn't match. So, the first negative
// lookahead, has to look ALL the way ahead, to the end of
// the pattern.
for (var n = negativeLists.length - 1; n > -1; n--) {
var nl = negativeLists[n]
var nlBefore = re.slice(0, nl.reStart)
var nlFirst = re.slice(nl.reStart, nl.reEnd - 8)
var nlLast = re.slice(nl.reEnd - 8, nl.reEnd)
var nlAfter = re.slice(nl.reEnd)
nlLast += nlAfter
// Handle nested stuff like *(*.js|!(*.json)), where open parens
// mean that we should *not* include the ) in the bit that is considered
// "after" the negated section.
var openParensBefore = nlBefore.split('(').length - 1
var cleanAfter = nlAfter
for (i = 0; i < openParensBefore; i++) {
cleanAfter = cleanAfter.replace(/\)[+*?]?/, '')
}
nlAfter = cleanAfter
var dollar = ''
if (nlAfter === '' && isSub !== SUBPARSE) {
dollar = '$'
}
var newRe = nlBefore + nlFirst + nlAfter + dollar + nlLast
re = newRe
}
// if the re is not "" at this point, then we need to make sure
// it doesn't match against an empty path part.
// Otherwise a/* will match a/, which it should not.
if (re !== '' && hasMagic) {
re = '(?=.)' + re
}
if (addPatternStart) {
re = patternStart + re
}
// parsing just a piece of a larger pattern.
if (isSub === SUBPARSE) {
return [re, hasMagic]
}
// skip the regexp for non-magical patterns
// unescape anything in it, though, so that it'll be
// an exact match against a file etc.
if (!hasMagic) {
return globUnescape(pattern)
}
var flags = options.nocase ? 'i' : ''
var regExp = new RegExp('^' + re + '$', flags)
regExp._glob = pattern
regExp._src = re
return regExp
}
minimatch.makeRe = function (pattern, options) {
return new Minimatch(pattern, options || {}).makeRe()
}
Minimatch.prototype.makeRe = makeRe
function makeRe () {
if (this.regexp || this.regexp === false) return this.regexp
// at this point, this.set is a 2d array of partial
// pattern strings, or "**".
//
// It's better to use .match(). This function shouldn't
// be used, really, but it's pretty convenient sometimes,
// when you just want to work with a regex.
var set = this.set
if (!set.length) {
this.regexp = false
return this.regexp
}
var options = this.options
var twoStar = options.noglobstar ? star
: options.dot ? twoStarDot
: twoStarNoDot
var flags = options.nocase ? 'i' : ''
var re = set.map(function (pattern) {
return pattern.map(function (p) {
return (p === GLOBSTAR) ? twoStar
: (typeof p === 'string') ? regExpEscape(p)
: p._src
}).join('\\\/')
}).join('|')
// must match entire pattern
// ending in a * or ** will make it less strict.
re = '^(?:' + re + ')$'
// can match anything, as long as it's not this.
if (this.negate) re = '^(?!' + re + ').*$'
try {
this.regexp = new RegExp(re, flags)
} catch (ex) {
this.regexp = false
}
return this.regexp
}
minimatch.match = function (list, pattern, options) {
options = options || {}
var mm = new Minimatch(pattern, options)
list = list.filter(function (f) {
return mm.match(f)
})
if (mm.options.nonull && !list.length) {
list.push(pattern)
}
return list
}
Minimatch.prototype.match = match
function match (f, partial) {
this.debug('match', f, this.pattern)
// short-circuit in the case of busted things.
// comments, etc.
if (this.comment) return false
if (this.empty) return f === ''
if (f === '/' && partial) return true
var options = this.options
// windows: need to use /, not \
if (path.sep !== '/') {
f = f.split(path.sep).join('/')
}
// treat the test path as a set of pathparts.
f = f.split(slashSplit)
this.debug(this.pattern, 'split', f)
// just ONE of the pattern sets in this.set needs to match
// in order for it to be valid. If negating, then just one
// match means that we have failed.
// Either way, return on the first hit.
var set = this.set
this.debug(this.pattern, 'set', set)
// Find the basename of the path by looking for the last non-empty segment
var filename
var i
for (i = f.length - 1; i >= 0; i--) {
filename = f[i]
if (filename) break
}
for (i = 0; i < set.length; i++) {
var pattern = set[i]
var file = f
if (options.matchBase && pattern.length === 1) {
file = [filename]
}
var hit = this.matchOne(file, pattern, partial)
if (hit) {
if (options.flipNegate) return true
return !this.negate
}
}
// didn't get any hits. this is success if it's a negative
// pattern, failure otherwise.
if (options.flipNegate) return false
return this.negate
}
// set partial to true to test if, for example,
// "/a/b" matches the start of "/*/b/*/d"
// Partial means, if you run out of file before you run
// out of pattern, then that's fine, as long as all
// the parts match.
Minimatch.prototype.matchOne = function (file, pattern, partial) {
var options = this.options
this.debug('matchOne',
{ 'this': this, file: file, pattern: pattern })
this.debug('matchOne', file.length, pattern.length)
for (var fi = 0,
pi = 0,
fl = file.length,
pl = pattern.length
; (fi < fl) && (pi < pl)
; fi++, pi++) {
this.debug('matchOne loop')
var p = pattern[pi]
var f = file[fi]
this.debug(pattern, p, f)
// should be impossible.
// some invalid regexp stuff in the set.
if (p === false) return false
if (p === GLOBSTAR) {
this.debug('GLOBSTAR', [pattern, p, f])
// "**"
// a/**/b/**/c would match the following:
// a/b/x/y/z/c
// a/x/y/z/b/c
// a/b/x/b/x/c
// a/b/c
// To do this, take the rest of the pattern after
// the **, and see if it would match the file remainder.
// If so, return success.
// If not, the ** "swallows" a segment, and try again.
// This is recursively awful.
//
// a/**/b/**/c matching a/b/x/y/z/c
// - a matches a
// - doublestar
// - matchOne(b/x/y/z/c, b/**/c)
// - b matches b
// - doublestar
// - matchOne(x/y/z/c, c) -> no
// - matchOne(y/z/c, c) -> no
// - matchOne(z/c, c) -> no
// - matchOne(c, c) yes, hit
var fr = fi
var pr = pi + 1
if (pr === pl) {
this.debug('** at the end')
// a ** at the end will just swallow the rest.
// We have found a match.
// however, it will not swallow /.x, unless
// options.dot is set.
// . and .. are *never* matched by **, for explosively
// exponential reasons.
for (; fi < fl; fi++) {
if (file[fi] === '.' || file[fi] === '..' ||
(!options.dot && file[fi].charAt(0) === '.')) return false
}
return true
}
// ok, let's see if we can swallow whatever we can.
while (fr < fl) {
var swallowee = file[fr]
this.debug('\nglobstar while', file, fr, pattern, pr, swallowee)
// XXX remove this slice. Just pass the start index.
if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) {
this.debug('globstar found match!', fr, fl, swallowee)
// found a match.
return true
} else {
// can't swallow "." or ".." ever.
// can only swallow ".foo" when explicitly asked.
if (swallowee === '.' || swallowee === '..' ||
(!options.dot && swallowee.charAt(0) === '.')) {
this.debug('dot detected!', file, fr, pattern, pr)
break
}
// ** swallows a segment, and continue.
this.debug('globstar swallow a segment, and continue')
fr++
}
}
// no match was found.
// However, in partial mode, we can't say this is necessarily over.
// If there's more *pattern* left, then
if (partial) {
// ran out of file
this.debug('\n>>> no match, partial?', file, fr, pattern, pr)
if (fr === fl) return true
}
return false
}
// something other than **
// non-magic patterns just have to match exactly
// patterns with magic have been turned into regexps.
var hit
if (typeof p === 'string') {
if (options.nocase) {
hit = f.toLowerCase() === p.toLowerCase()
} else {
hit = f === p
}
this.debug('string match', p, f, hit)
} else {
hit = f.match(p)
this.debug('pattern match', p, f, hit)
}
if (!hit) return false
}
// Note: ending in / means that we'll get a final ""
// at the end of the pattern. This can only match a
// corresponding "" at the end of the file.
// If the file ends in /, then it can only match a
// a pattern that ends in /, unless the pattern just
// doesn't have any more for it. But, a/b/ should *not*
// match "a/b/*", even though "" matches against the
// [^/]*? pattern, except in partial mode, where it might
// simply not be reached yet.
// However, a/b/ should still satisfy a/*
// now either we fell off the end of the pattern, or we're done.
if (fi === fl && pi === pl) {
// ran out of pattern and filename at the same time.
// an exact hit!
return true
} else if (fi === fl) {
// ran out of file, but still had pattern left.
// this is ok if we're doing the match as part of
// a glob fs traversal.
return partial
} else if (pi === pl) {
// ran out of pattern, still have file left.
// this is only acceptable if we're on the very last
// empty segment of a file with a trailing slash.
// a/* should match a/b/
var emptyFileEnd = (fi === fl - 1) && (file[fi] === '')
return emptyFileEnd
}
// should be unreachable.
throw new Error('wtf?')
}
// replace stuff like \* with *
function globUnescape (s) {
return s.replace(/\\(.)/g, '$1')
}
function regExpEscape (s) {
return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&')
} |
if (!options) options = {}
// shortcut: comments match nothing.
if (!options.nocomment && pattern.charAt(0) === '#') {
| random_line_split |
minimatch.js | module.exports = minimatch
minimatch.Minimatch = Minimatch
var path = { sep: '/' }
try {
path = require('path')
} catch (er) {}
var GLOBSTAR = minimatch.GLOBSTAR = Minimatch.GLOBSTAR = {}
var expand = require('brace-expansion')
// any single thing other than /
// don't need to escape / when using new RegExp()
var qmark = '[^/]'
// * => any number of characters
var star = qmark + '*?'
// ** when dots are allowed. Anything goes, except .. and .
// not (^ or / followed by one or two dots followed by $ or /),
// followed by anything, any number of times.
var twoStarDot = '(?:(?!(?:\\\/|^)(?:\\.{1,2})($|\\\/)).)*?'
// not a ^ or / followed by a dot,
// followed by anything, any number of times.
var twoStarNoDot = '(?:(?!(?:\\\/|^)\\.).)*?'
// characters that need to be escaped in RegExp.
var reSpecials = charSet('().*{}+?[]^$\\!')
// "abc" -> { a:true, b:true, c:true }
function charSet (s) {
return s.split('').reduce(function (set, c) {
set[c] = true
return set
}, {})
}
// normalizes slashes.
var slashSplit = /\/+/
minimatch.filter = filter
function filter (pattern, options) {
options = options || {}
return function (p, i, list) {
return minimatch(p, pattern, options)
}
}
function ext (a, b) {
a = a || {}
b = b || {}
var t = {}
Object.keys(b).forEach(function (k) {
t[k] = b[k]
})
Object.keys(a).forEach(function (k) {
t[k] = a[k]
})
return t
}
minimatch.defaults = function (def) {
if (!def || !Object.keys(def).length) return minimatch
var orig = minimatch
var m = function minimatch (p, pattern, options) {
return orig.minimatch(p, pattern, ext(def, options))
}
m.Minimatch = function Minimatch (pattern, options) {
return new orig.Minimatch(pattern, ext(def, options))
}
return m
}
Minimatch.defaults = function (def) {
if (!def || !Object.keys(def).length) return Minimatch
return minimatch.defaults(def).Minimatch
}
function minimatch (p, pattern, options) {
if (typeof pattern !== 'string') {
throw new TypeError('glob pattern string required')
}
if (!options) options = {}
// shortcut: comments match nothing.
if (!options.nocomment && pattern.charAt(0) === '#') {
return false
}
// "" only matches ""
if (pattern.trim() === '') return p === ''
return new Minimatch(pattern, options).match(p)
}
function Minimatch (pattern, options) {
if (!(this instanceof Minimatch)) {
return new Minimatch(pattern, options)
}
if (typeof pattern !== 'string') {
throw new TypeError('glob pattern string required')
}
if (!options) options = {}
pattern = pattern.trim()
// windows support: need to use /, not \
if (path.sep !== '/') {
pattern = pattern.split(path.sep).join('/')
}
this.options = options
this.set = []
this.pattern = pattern
this.regexp = null
this.negate = false
this.comment = false
this.empty = false
// make the set of regexps etc.
this.make()
}
Minimatch.prototype.debug = function () {}
Minimatch.prototype.make = make
function make () {
// don't do it more than once.
if (this._made) return
var pattern = this.pattern
var options = this.options
// empty patterns and comments match nothing.
if (!options.nocomment && pattern.charAt(0) === '#') {
this.comment = true
return
}
if (!pattern) {
this.empty = true
return
}
// step 1: figure out negation, etc.
this.parseNegate()
// step 2: expand braces
var set = this.globSet = this.braceExpand()
if (options.debug) this.debug = console.error
this.debug(this.pattern, set)
// step 3: now we have a set, so turn each one into a series of path-portion
// matching patterns.
// These will be regexps, except in the case of "**", which is
// set to the GLOBSTAR object for globstar behavior,
// and will not contain any / characters
set = this.globParts = set.map(function (s) {
return s.split(slashSplit)
})
this.debug(this.pattern, set)
// glob --> regexps
set = set.map(function (s, si, set) {
return s.map(this.parse, this)
}, this)
this.debug(this.pattern, set)
// filter out everything that didn't compile properly.
set = set.filter(function (s) {
return s.indexOf(false) === -1
})
this.debug(this.pattern, set)
this.set = set
}
Minimatch.prototype.parseNegate = parseNegate
function parseNegate () {
var pattern = this.pattern
var negate = false
var options = this.options
var negateOffset = 0
if (options.nonegate) return
for (var i = 0, l = pattern.length
; i < l && pattern.charAt(i) === '!'
; i++) {
negate = !negate
negateOffset++
}
if (negateOffset) this.pattern = pattern.substr(negateOffset)
this.negate = negate
}
// Brace expansion:
// a{b,c}d -> abd acd
// a{b,}c -> abc ac
// a{0..3}d -> a0d a1d a2d a3d
// a{b,c{d,e}f}g -> abg acdfg acefg
// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg
//
// Invalid sets are not expanded.
// a{2..}b -> a{2..}b
// a{b}c -> a{b}c
minimatch.braceExpand = function (pattern, options) {
return braceExpand(pattern, options)
}
Minimatch.prototype.braceExpand = braceExpand
function braceExpand (pattern, options) {
if (!options) {
if (this instanceof Minimatch) {
options = this.options
} else {
options = {}
}
}
pattern = typeof pattern === 'undefined'
? this.pattern : pattern
if (typeof pattern === 'undefined') {
throw new Error('undefined pattern')
}
if (options.nobrace ||
!pattern.match(/\{.*\}/)) {
// shortcut. no need to expand.
return [pattern]
}
return expand(pattern)
}
// parse a component of the expanded set.
// At this point, no pattern may contain "/" in it
// so we're going to return a 2d array, where each entry is the full
// pattern, split on '/', and then turned into a regular expression.
// A regexp is made at the end which joins each array with an
// escaped /, and another full one which joins each regexp with |.
//
// Following the lead of Bash 4.1, note that "**" only has special meaning
// when it is the *only* thing in a path portion. Otherwise, any series
// of * is equivalent to a single *. Globstar behavior is enabled by
// default, and can be disabled by setting options.noglobstar.
Minimatch.prototype.parse = parse
var SUBPARSE = {}
function parse (pattern, isSub) {
var options = this.options
// shortcuts
if (!options.noglobstar && pattern === '**') return GLOBSTAR
if (pattern === '') return ''
var re = ''
var hasMagic = !!options.nocase
var escaping = false
// ? => one single character
var patternListStack = []
var negativeLists = []
var plType
var stateChar
var inClass = false
var reClassStart = -1
var classStart = -1
// . and .. never match anything that doesn't start with .,
// even when options.dot is set.
var patternStart = pattern.charAt(0) === '.' ? '' // anything
// not (start or / followed by . or .. followed by / or end)
: options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))'
: '(?!\\.)'
var self = this
function clearStateChar () {
if (stateChar) {
// we had some state-tracking character
// that wasn't consumed by this pass.
switch (stateChar) {
case '*':
re += star
hasMagic = true
break
case '?':
re += qmark
hasMagic = true
break
default:
re += '\\' + stateChar
break
}
self.debug('clearStateChar %j %j', stateChar, re)
stateChar = false
}
}
for (var i = 0, len = pattern.length, c
; (i < len) && (c = pattern.charAt(i))
; i++) {
this.debug('%s\t%s %s %j', pattern, i, re, c)
// skip over any that are escaped.
if (escaping && reSpecials[c]) {
re += '\\' + c
escaping = false
continue
}
switch (c) {
case '/':
// completely not allowed, even escaped.
// Should already be path-split by now.
return false
case '\\':
clearStateChar()
escaping = true
continue
// the various stateChar values
// for the "extglob" stuff.
case '?':
case '*':
case '+':
case '@':
case '!':
this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c)
// all of those are literals inside a class, except that
// the glob [!a] means [^a] in regexp
if (inClass) {
this.debug(' in class')
if (c === '!' && i === classStart + 1) c = '^'
re += c
continue
}
// if we already have a stateChar, then it means
// that there was something like ** or +? in there.
// Handle the stateChar, then proceed with this one.
self.debug('call clearStateChar %j', stateChar)
clearStateChar()
stateChar = c
// if extglob is disabled, then +(asdf|foo) isn't a thing.
// just clear the statechar *now*, rather than even diving into
// the patternList stuff.
if (options.noext) clearStateChar()
continue
case '(':
if (inClass) {
re += '('
continue
}
if (!stateChar) {
re += '\\('
continue
}
plType = stateChar
patternListStack.push({
type: plType,
start: i - 1,
reStart: re.length
})
// negation is (?:(?!js)[^/]*)
re += stateChar === '!' ? '(?:(?!(?:' : '(?:'
this.debug('plType %j %j', stateChar, re)
stateChar = false
continue
case ')':
if (inClass || !patternListStack.length) {
re += '\\)'
continue
}
clearStateChar()
hasMagic = true
re += ')'
var pl = patternListStack.pop()
plType = pl.type
// negation is (?:(?!js)[^/]*)
// The others are (?:<pattern>)<type>
switch (plType) {
case '!':
negativeLists.push(pl)
re += ')[^/]*?)'
pl.reEnd = re.length
break
case '?':
case '+':
case '*':
re += plType
break
case '@': break // the default anyway
}
continue
case '|':
if (inClass || !patternListStack.length || escaping) {
re += '\\|'
escaping = false
continue
}
clearStateChar()
re += '|'
continue
// these are mostly the same in regexp and glob
case '[':
// swallow any state-tracking char before the [
clearStateChar()
if (inClass) {
re += '\\' + c
continue
}
inClass = true
classStart = i
reClassStart = re.length
re += c
continue
case ']':
// a right bracket shall lose its special
// meaning and represent itself in
// a bracket expression if it occurs
// first in the list. -- POSIX.2 2.8.3.2
if (i === classStart + 1 || !inClass) {
re += '\\' + c
escaping = false
continue
}
// handle the case where we left a class open.
// "[z-a]" is valid, equivalent to "\[z-a\]"
if (inClass) {
// split where the last [ was, make sure we don't have
// an invalid re. if so, re-walk the contents of the
// would-be class to re-translate any characters that
// were passed through as-is
// TODO: It would probably be faster to determine this
// without a try/catch and a new RegExp, but it's tricky
// to do safely. For now, this is safe and works.
var cs = pattern.substring(classStart + 1, i)
try {
RegExp('[' + cs + ']')
} catch (er) {
// not a valid class!
var sp = this.parse(cs, SUBPARSE)
re = re.substr(0, reClassStart) + '\\[' + sp[0] + '\\]'
hasMagic = hasMagic || sp[1]
inClass = false
continue
}
}
// finish up the class.
hasMagic = true
inClass = false
re += c
continue
default:
// swallow any state char that wasn't consumed
clearStateChar()
if (escaping) {
// no need
escaping = false
} else if (reSpecials[c]
&& !(c === '^' && inClass)) {
re += '\\'
}
re += c
} // switch
} // for
// handle the case where we left a class open.
// "[abc" is valid, equivalent to "\[abc"
if (inClass) {
// split where the last [ was, and escape it
// this is a huge pita. We now have to re-walk
// the contents of the would-be class to re-translate
// any characters that were passed through as-is
cs = pattern.substr(classStart + 1)
sp = this.parse(cs, SUBPARSE)
re = re.substr(0, reClassStart) + '\\[' + sp[0]
hasMagic = hasMagic || sp[1]
}
// handle the case where we had a +( thing at the *end*
// of the pattern.
// each pattern list stack adds 3 chars, and we need to go through
// and escape any | chars that were passed through as-is for the regexp.
// Go through and escape them, taking care not to double-escape any
// | chars that were already escaped.
for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) {
var tail = re.slice(pl.reStart + 3)
// maybe some even number of \, then maybe 1 \, followed by a |
tail = tail.replace(/((?:\\{2})*)(\\?)\|/g, function (_, $1, $2) {
if (!$2) {
// the | isn't already escaped, so escape it.
$2 = '\\'
}
// need to escape all those slashes *again*, without escaping the
// one that we need for escaping the | character. As it works out,
// escaping an even number of slashes can be done by simply repeating
// it exactly after itself. That's why this trick works.
//
// I am sorry that you have to see this.
return $1 + $1 + $2 + '|'
})
this.debug('tail=%j\n %s', tail, tail)
var t = pl.type === '*' ? star
: pl.type === '?' ? qmark
: '\\' + pl.type
hasMagic = true
re = re.slice(0, pl.reStart) + t + '\\(' + tail
}
// handle trailing things that only matter at the very end.
clearStateChar()
if (escaping) {
// trailing \\
re += '\\\\'
}
// only need to apply the nodot start if the re starts with
// something that could conceivably capture a dot
var addPatternStart = false
switch (re.charAt(0)) {
case '.':
case '[':
case '(': addPatternStart = true
}
// Hack to work around lack of negative lookbehind in JS
// A pattern like: *.!(x).!(y|z) needs to ensure that a name
// like 'a.xyz.yz' doesn't match. So, the first negative
// lookahead, has to look ALL the way ahead, to the end of
// the pattern.
for (var n = negativeLists.length - 1; n > -1; n--) {
var nl = negativeLists[n]
var nlBefore = re.slice(0, nl.reStart)
var nlFirst = re.slice(nl.reStart, nl.reEnd - 8)
var nlLast = re.slice(nl.reEnd - 8, nl.reEnd)
var nlAfter = re.slice(nl.reEnd)
nlLast += nlAfter
// Handle nested stuff like *(*.js|!(*.json)), where open parens
// mean that we should *not* include the ) in the bit that is considered
// "after" the negated section.
var openParensBefore = nlBefore.split('(').length - 1
var cleanAfter = nlAfter
for (i = 0; i < openParensBefore; i++) {
cleanAfter = cleanAfter.replace(/\)[+*?]?/, '')
}
nlAfter = cleanAfter
var dollar = ''
if (nlAfter === '' && isSub !== SUBPARSE) {
dollar = '$'
}
var newRe = nlBefore + nlFirst + nlAfter + dollar + nlLast
re = newRe
}
// if the re is not "" at this point, then we need to make sure
// it doesn't match against an empty path part.
// Otherwise a/* will match a/, which it should not.
if (re !== '' && hasMagic) {
re = '(?=.)' + re
}
if (addPatternStart) {
re = patternStart + re
}
// parsing just a piece of a larger pattern.
if (isSub === SUBPARSE) {
return [re, hasMagic]
}
// skip the regexp for non-magical patterns
// unescape anything in it, though, so that it'll be
// an exact match against a file etc.
if (!hasMagic) {
return globUnescape(pattern)
}
var flags = options.nocase ? 'i' : ''
var regExp = new RegExp('^' + re + '$', flags)
regExp._glob = pattern
regExp._src = re
return regExp
}
minimatch.makeRe = function (pattern, options) {
return new Minimatch(pattern, options || {}).makeRe()
}
Minimatch.prototype.makeRe = makeRe
function makeRe () {
if (this.regexp || this.regexp === false) return this.regexp
// at this point, this.set is a 2d array of partial
// pattern strings, or "**".
//
// It's better to use .match(). This function shouldn't
// be used, really, but it's pretty convenient sometimes,
// when you just want to work with a regex.
var set = this.set
if (!set.length) {
this.regexp = false
return this.regexp
}
var options = this.options
var twoStar = options.noglobstar ? star
: options.dot ? twoStarDot
: twoStarNoDot
var flags = options.nocase ? 'i' : ''
var re = set.map(function (pattern) {
return pattern.map(function (p) {
return (p === GLOBSTAR) ? twoStar
: (typeof p === 'string') ? regExpEscape(p)
: p._src
}).join('\\\/')
}).join('|')
// must match entire pattern
// ending in a * or ** will make it less strict.
re = '^(?:' + re + ')$'
// can match anything, as long as it's not this.
if (this.negate) re = '^(?!' + re + ').*$'
try {
this.regexp = new RegExp(re, flags)
} catch (ex) {
this.regexp = false
}
return this.regexp
}
minimatch.match = function (list, pattern, options) {
options = options || {}
var mm = new Minimatch(pattern, options)
list = list.filter(function (f) {
return mm.match(f)
})
if (mm.options.nonull && !list.length) {
list.push(pattern)
}
return list
}
Minimatch.prototype.match = match
function match (f, partial) {
this.debug('match', f, this.pattern)
// short-circuit in the case of busted things.
// comments, etc.
if (this.comment) return false
if (this.empty) return f === ''
if (f === '/' && partial) return true
var options = this.options
// windows: need to use /, not \
if (path.sep !== '/') {
f = f.split(path.sep).join('/')
}
// treat the test path as a set of pathparts.
f = f.split(slashSplit)
this.debug(this.pattern, 'split', f)
// just ONE of the pattern sets in this.set needs to match
// in order for it to be valid. If negating, then just one
// match means that we have failed.
// Either way, return on the first hit.
var set = this.set
this.debug(this.pattern, 'set', set)
// Find the basename of the path by looking for the last non-empty segment
var filename
var i
for (i = f.length - 1; i >= 0; i--) {
filename = f[i]
if (filename) break
}
for (i = 0; i < set.length; i++) {
var pattern = set[i]
var file = f
if (options.matchBase && pattern.length === 1) {
file = [filename]
}
var hit = this.matchOne(file, pattern, partial)
if (hit) {
if (options.flipNegate) return true
return !this.negate
}
}
// didn't get any hits. this is success if it's a negative
// pattern, failure otherwise.
if (options.flipNegate) return false
return this.negate
}
// set partial to true to test if, for example,
// "/a/b" matches the start of "/*/b/*/d"
// Partial means, if you run out of file before you run
// out of pattern, then that's fine, as long as all
// the parts match.
Minimatch.prototype.matchOne = function (file, pattern, partial) {
var options = this.options
this.debug('matchOne',
{ 'this': this, file: file, pattern: pattern })
this.debug('matchOne', file.length, pattern.length)
for (var fi = 0,
pi = 0,
fl = file.length,
pl = pattern.length
; (fi < fl) && (pi < pl)
; fi++, pi++) {
this.debug('matchOne loop')
var p = pattern[pi]
var f = file[fi]
this.debug(pattern, p, f)
// should be impossible.
// some invalid regexp stuff in the set.
if (p === false) return false
if (p === GLOBSTAR) {
this.debug('GLOBSTAR', [pattern, p, f])
// "**"
// a/**/b/**/c would match the following:
// a/b/x/y/z/c
// a/x/y/z/b/c
// a/b/x/b/x/c
// a/b/c
// To do this, take the rest of the pattern after
// the **, and see if it would match the file remainder.
// If so, return success.
// If not, the ** "swallows" a segment, and try again.
// This is recursively awful.
//
// a/**/b/**/c matching a/b/x/y/z/c
// - a matches a
// - doublestar
// - matchOne(b/x/y/z/c, b/**/c)
// - b matches b
// - doublestar
// - matchOne(x/y/z/c, c) -> no
// - matchOne(y/z/c, c) -> no
// - matchOne(z/c, c) -> no
// - matchOne(c, c) yes, hit
var fr = fi
var pr = pi + 1
if (pr === pl) {
this.debug('** at the end')
// a ** at the end will just swallow the rest.
// We have found a match.
// however, it will not swallow /.x, unless
// options.dot is set.
// . and .. are *never* matched by **, for explosively
// exponential reasons.
for (; fi < fl; fi++) {
if (file[fi] === '.' || file[fi] === '..' ||
(!options.dot && file[fi].charAt(0) === '.')) return false
}
return true
}
// ok, let's see if we can swallow whatever we can.
while (fr < fl) {
var swallowee = file[fr]
this.debug('\nglobstar while', file, fr, pattern, pr, swallowee)
// XXX remove this slice. Just pass the start index.
if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) {
this.debug('globstar found match!', fr, fl, swallowee)
// found a match.
return true
} else {
// can't swallow "." or ".." ever.
// can only swallow ".foo" when explicitly asked.
if (swallowee === '.' || swallowee === '..' ||
(!options.dot && swallowee.charAt(0) === '.')) {
this.debug('dot detected!', file, fr, pattern, pr)
break
}
// ** swallows a segment, and continue.
this.debug('globstar swallow a segment, and continue')
fr++
}
}
// no match was found.
// However, in partial mode, we can't say this is necessarily over.
// If there's more *pattern* left, then
if (partial) {
// ran out of file
this.debug('\n>>> no match, partial?', file, fr, pattern, pr)
if (fr === fl) return true
}
return false
}
// something other than **
// non-magic patterns just have to match exactly
// patterns with magic have been turned into regexps.
var hit
if (typeof p === 'string') {
if (options.nocase) {
hit = f.toLowerCase() === p.toLowerCase()
} else {
hit = f === p
}
this.debug('string match', p, f, hit)
} else {
hit = f.match(p)
this.debug('pattern match', p, f, hit)
}
if (!hit) return false
}
// Note: ending in / means that we'll get a final ""
// at the end of the pattern. This can only match a
// corresponding "" at the end of the file.
// If the file ends in /, then it can only match a
// a pattern that ends in /, unless the pattern just
// doesn't have any more for it. But, a/b/ should *not*
// match "a/b/*", even though "" matches against the
// [^/]*? pattern, except in partial mode, where it might
// simply not be reached yet.
// However, a/b/ should still satisfy a/*
// now either we fell off the end of the pattern, or we're done.
if (fi === fl && pi === pl) {
// ran out of pattern and filename at the same time.
// an exact hit!
return true
} else if (fi === fl) {
// ran out of file, but still had pattern left.
// this is ok if we're doing the match as part of
// a glob fs traversal.
return partial
} else if (pi === pl) {
// ran out of pattern, still have file left.
// this is only acceptable if we're on the very last
// empty segment of a file with a trailing slash.
// a/* should match a/b/
var emptyFileEnd = (fi === fl - 1) && (file[fi] === '')
return emptyFileEnd
}
// should be unreachable.
throw new Error('wtf?')
}
// replace stuff like \* with *
function globUnescape (s) {
return s.replace(/\\(.)/g, '$1')
}
function | (s) {
return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&')
}
| regExpEscape | identifier_name |
tracked_services.py | """
Classes and functions to manage arkOS tracked services.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import glob
import miniupnpc
import random
from arkos import config, logger, policies, signals, storage, security
from arkos.messages import Notification
from arkos.utilities import errors, test_port
COMMON_PORTS = [3000, 3306, 5222, 5223, 5232]
class SecurityPolicy:
"""
An object representing an arkOS firewall policy for a service.
SecurityPolicies are created for all websites, as well as for all apps
that have port-based services registered in their metadata files. They
are used to compute the proper values to put into the arkOS firewall
(iptables) on regeneration or app update.
"""
def __init__(self, type="", id="", name="", icon="", ports=[],
policy=2, addr=None):
"""
Initialize the policy object.
To create a new policy or to see more info about these parameters,
see ``tracked_services.register()`` below.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param int policy: Policy identifier
:param str addr: Address and port (for websites)
"""
self.type = type
self.id = id
self.name = name
self.icon = icon
self.ports = ports
self.policy = policy
self.addr = addr
def save(self, fw=True):
"""
Save changes to a security policy to disk.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
policies.append(
"custom",
{"id": self.id, "name": self.name, "icon": self.icon,
"ports": self.ports, "policy": self.policy}
)
else:
policies.set(self.type, self.id, self.policy)
policies.save()
storage.policies[self.id] = self
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def remove(self, fw=True):
"""
Remove a security policy from the firewall and config.
You should probably use ``tracked_services.deregister()`` for this.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
else:
policies.remove(self.type, self.id)
policies.save()
if self.id in storage.policies:
del storage.policies[self.id]
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
@property
def as_dict(self):
"""Return policy metadata as dict."""
return {
"type": self.type,
"id": self.id,
"name": self.name,
"icon": self.icon,
"ports": self.ports,
"policy": self.policy,
"is_ready": True
}
@property
def serialized(self):
"""Return serializable policy metadata as dict."""
return self.as_dict
class PortConflictError(errors.Error):
"""Raised when an address and port requested are not available."""
def __init__(self, port, domain):
self.port = port
self.domain = domain
def __str__(self):
return ("This port is taken by another site or service, "
"please choose another")
def get(id=None, type=None):
"""
Get all security policies from cache storage.
:param str id: App or website ID
:param str type: Filter by type ('website', 'app', etc)
"""
data = storage.policies
if id:
return data.get(id)
if type:
return filter(lambda x: x.type == type, data.values())
return data.values()
def register(type, id, name, icon, ports, domain=None, policy=0,
default_policy=2, fw=True):
"""
Register a new security policy with the system.
The ``ports`` parameter takes tuples of ports to manage, like so:
ports = [('tcp', 8000), ('udp', 21500)]
The ``policy`` parameter is an integer with the following meaning:
0 = Restrict access from all outside hosts. (excludes loopback)
1 = Restrict access to local networks only.
2 = Allow access to all networks and ultimately the whole Internet.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param str domain: Address (for websites)
:param int policy: Policy identifier
:param int default_policy: Application default policy to use on first init
:param bool fw: Regenerate the firewall after save?
"""
if not policy:
policy = policies.get(type, id, default_policy)
svc = SecurityPolicy(type, id, name, icon, ports, policy, domain)
svc.save(fw)
def deregister(type, id="", fw=True):
"""
Deregister a security policy.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param bool fw: Regenerate the firewall after save?
"""
for x in get(type=type):
if not id:
x.remove(fw=False)
elif x.id == id:
x.remove(fw=False)
break
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def refresh_policies():
"""Recreate security policies based on what is stored in config."""
svcs = get()
newpolicies = {}
for x in policies.get_all():
if x == "custom":
newpolicies["custom"] = policies.get_all("custom")
for y in svcs:
if x == y.type:
if x not in newpolicies:
newpolicies[x] = {}
for s in policies.get_all(x):
if s == y.id:
newpolicies[x][s] = policies.get(x, s)
policies.config = newpolicies
policies.save()
def is_open_port(port, domain=None, ignore_common=False):
"""
Check if the specified port is taken by a tracked service or not.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param int port: Port number to check
:param str domain: Address to check (for websites)
:param bool ignore_common: Don't return False for commonly used ports?
:returns: True if port is open
:rtype bool:
"""
data = get()
ports = []
for x in data:
if domain and x.type == "website" and domain != x.addr:
continue
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
return port not in ports
def _upnp_igd_connect():
logger.debug("TrSv", "Attempting to connect to uPnP IGD")
upnpc = miniupnpc.UPnP()
upnpc.discoverdelay = 3000
devs = upnpc.discover()
if devs == 0:
msg = "Failed to connect to uPnP IGD: no devices found"
logger.warning("TrSv", msg)
return
try:
upnpc.selectigd()
except Exception as e:
msg = "Failed to connect to uPnP IGD: {0}"
logger.warning("TrSv", msg.format(str(e)))
return upnpc
def open_upnp(port):
"""
Open and forward a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(int(port[1]), port[0].upper()):
try:
upnpc.deleteportmapping(int(port[1]), port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(
int(port[1]), port[0].upper(), upnpc.lanaddr, int(port[1]),
pf.format(port[1]), ''
)
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_upnp(port):
"""
Remove forwarding of a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def initialize_upnp(svcs):
"""
Initialize uPnP port forwarding with the IGD.
:param SecurityPolicy svcs: SecurityPolicies to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for svc in svcs:
if svc.policy != 2:
continue
for protocol, port in svc.ports:
if upnpc.getspecificportmapping(port, protocol.upper()):
try:
upnpc.deleteportmapping(port, protocol.upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port, protocol.upper(), upnpc.lanaddr,
port, pf.format(port), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"\
.format(port, str(e))
logger.warning("TrSv", msg)
def open_all_upnp(ports):
"""
Open and forward multiple ports with the local uPnP IGD.
:param list ports: List of port objects to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port[1], port[0].upper(), upnpc.lanaddr,
port[1], pf.format(port[1]), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_all_upnp(ports):
"""
Remove forwarding of multiple ports with the local uPnP IGD.
:param list ports: List of port objects to close
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
|
def get_open_port(ignore_common=False):
"""
Get a random TCP port not currently in use by a tracked service.
:param bool ignore_common: Don't exclude commonly used ports?
:returns: Port number
:rtype: int
"""
data = get()
ports = []
for x in data:
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
r = random.randint(8001, 65534)
return r if r not in ports else get_open_port()
def initialize():
"""Initialize security policy tracking."""
logger.debug("TrSv", "Initializing security policy tracking")
# arkOS
policy = policies.get("arkos", "arkos", 2)
port = [("tcp", int(config.get("genesis", "port")))]
pol = SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)",
"server", port, policy)
storage.policies[pol.id] = pol
# uPNP
policy = policies.get("arkos", "upnp", 1)
pol = SecurityPolicy("arkos", "upnp", "uPnP Firewall Comms",
"server", [("udp", 1900)], policy)
if config.get("general", "enable_upnp"):
storage.policies[pol.id] = pol
# SSHd
policy = policies.get("arkos", "sshd", 1)
pol = SecurityPolicy(
"arkos", "sshd", "SSH", "server", [("tcp", 22)], policy)
# ACME dummies
for x in glob.glob("/etc/nginx/sites-enabled/acme-*"):
acme_name = x.split("/etc/nginx/sites-enabled/acme-")[1]
pol = SecurityPolicy(
"acme", acme_name, "{0} (ACME Validation)".format(acme_name),
"globe", [('tcp', 80)], 2
)
storage.policies[pol.id] = pol
for x in policies.get_all("custom"):
pol = SecurityPolicy("custom", x["id"], x["name"], x["icon"],
x["ports"], x["policy"])
storage.policies[pol.id] = pol
def register_website(site):
"""Convenience function to register a website as tracked service."""
register("website", site.id, getattr(site, "name", site.id),
site.app.icon if site.app else "globe",
[("tcp", site.port)], site.domain)
def deregister_website(site):
"""Convenience function to deregister a website as tracked service."""
deregister("website", site.id)
def open_upnp_site(site):
"""Convenience function to register a website with uPnP."""
if config.get("general", "enable_upnp"):
open_upnp(("tcp", site.port))
domain = site.domain
if domain == "localhost" or domain.endswith(".local"):
domain = None
try:
test_port(config.get("general", "repo_server"), site.port, domain)
except:
msg = ("Port {0} and/or domain {1} could not be tested."
" Make sure your ports are properly forwarded and"
" that your domain is properly set up.")\
.format(site.port, site.domain)
Notification("error", "TrSv", msg).send()
def close_upnp_site(site):
"""Convenience function to deregister a website with uPnP."""
if config.get("general", "enable_upnp"):
close_upnp(("tcp", site.port))
signals.add("tracked_services", "websites", "site_loaded", register_website)
signals.add("tracked_services", "websites", "site_installed", register_website)
signals.add("tracked_services", "websites", "site_installed", open_upnp_site)
signals.add("tracked_services", "websites", "site_removed", deregister_website)
signals.add("tracked_services", "websites", "site_removed", close_upnp_site)
| try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass | conditional_block |
tracked_services.py | """
Classes and functions to manage arkOS tracked services.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import glob
import miniupnpc
import random
from arkos import config, logger, policies, signals, storage, security
from arkos.messages import Notification
from arkos.utilities import errors, test_port
COMMON_PORTS = [3000, 3306, 5222, 5223, 5232]
class SecurityPolicy:
"""
An object representing an arkOS firewall policy for a service.
SecurityPolicies are created for all websites, as well as for all apps
that have port-based services registered in their metadata files. They
are used to compute the proper values to put into the arkOS firewall
(iptables) on regeneration or app update.
"""
def __init__(self, type="", id="", name="", icon="", ports=[],
policy=2, addr=None):
"""
Initialize the policy object.
To create a new policy or to see more info about these parameters,
see ``tracked_services.register()`` below.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param int policy: Policy identifier
:param str addr: Address and port (for websites)
"""
self.type = type
self.id = id
self.name = name
self.icon = icon
self.ports = ports
self.policy = policy
self.addr = addr
def save(self, fw=True):
"""
Save changes to a security policy to disk.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
policies.append(
"custom",
{"id": self.id, "name": self.name, "icon": self.icon,
"ports": self.ports, "policy": self.policy}
)
else:
policies.set(self.type, self.id, self.policy)
policies.save()
storage.policies[self.id] = self
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def remove(self, fw=True):
"""
Remove a security policy from the firewall and config.
You should probably use ``tracked_services.deregister()`` for this.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
else:
policies.remove(self.type, self.id)
policies.save()
if self.id in storage.policies:
del storage.policies[self.id]
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
@property
def as_dict(self):
"""Return policy metadata as dict."""
return {
"type": self.type,
"id": self.id,
"name": self.name,
"icon": self.icon,
"ports": self.ports,
"policy": self.policy,
"is_ready": True
}
@property
def serialized(self):
"""Return serializable policy metadata as dict."""
return self.as_dict
class PortConflictError(errors.Error):
"""Raised when an address and port requested are not available."""
def __init__(self, port, domain):
self.port = port
self.domain = domain
def __str__(self):
return ("This port is taken by another site or service, "
"please choose another")
def get(id=None, type=None):
"""
Get all security policies from cache storage.
:param str id: App or website ID
:param str type: Filter by type ('website', 'app', etc)
"""
data = storage.policies
if id:
return data.get(id)
if type:
return filter(lambda x: x.type == type, data.values())
return data.values()
def register(type, id, name, icon, ports, domain=None, policy=0,
default_policy=2, fw=True):
"""
Register a new security policy with the system.
The ``ports`` parameter takes tuples of ports to manage, like so:
ports = [('tcp', 8000), ('udp', 21500)]
The ``policy`` parameter is an integer with the following meaning:
0 = Restrict access from all outside hosts. (excludes loopback)
1 = Restrict access to local networks only.
2 = Allow access to all networks and ultimately the whole Internet.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param str domain: Address (for websites)
:param int policy: Policy identifier
:param int default_policy: Application default policy to use on first init
:param bool fw: Regenerate the firewall after save?
"""
if not policy:
policy = policies.get(type, id, default_policy)
svc = SecurityPolicy(type, id, name, icon, ports, policy, domain)
svc.save(fw)
def deregister(type, id="", fw=True):
"""
Deregister a security policy.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param bool fw: Regenerate the firewall after save?
"""
for x in get(type=type):
if not id:
x.remove(fw=False)
elif x.id == id:
x.remove(fw=False)
break
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def refresh_policies():
"""Recreate security policies based on what is stored in config."""
svcs = get()
newpolicies = {}
for x in policies.get_all():
if x == "custom":
newpolicies["custom"] = policies.get_all("custom")
for y in svcs:
if x == y.type:
if x not in newpolicies:
newpolicies[x] = {}
for s in policies.get_all(x):
if s == y.id:
newpolicies[x][s] = policies.get(x, s)
policies.config = newpolicies
policies.save()
def is_open_port(port, domain=None, ignore_common=False):
"""
Check if the specified port is taken by a tracked service or not.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param int port: Port number to check
:param str domain: Address to check (for websites)
:param bool ignore_common: Don't return False for commonly used ports?
:returns: True if port is open
:rtype bool:
"""
data = get()
ports = []
for x in data:
if domain and x.type == "website" and domain != x.addr:
continue
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
return port not in ports
def | ():
logger.debug("TrSv", "Attempting to connect to uPnP IGD")
upnpc = miniupnpc.UPnP()
upnpc.discoverdelay = 3000
devs = upnpc.discover()
if devs == 0:
msg = "Failed to connect to uPnP IGD: no devices found"
logger.warning("TrSv", msg)
return
try:
upnpc.selectigd()
except Exception as e:
msg = "Failed to connect to uPnP IGD: {0}"
logger.warning("TrSv", msg.format(str(e)))
return upnpc
def open_upnp(port):
"""
Open and forward a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(int(port[1]), port[0].upper()):
try:
upnpc.deleteportmapping(int(port[1]), port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(
int(port[1]), port[0].upper(), upnpc.lanaddr, int(port[1]),
pf.format(port[1]), ''
)
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_upnp(port):
"""
Remove forwarding of a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def initialize_upnp(svcs):
"""
Initialize uPnP port forwarding with the IGD.
:param SecurityPolicy svcs: SecurityPolicies to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for svc in svcs:
if svc.policy != 2:
continue
for protocol, port in svc.ports:
if upnpc.getspecificportmapping(port, protocol.upper()):
try:
upnpc.deleteportmapping(port, protocol.upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port, protocol.upper(), upnpc.lanaddr,
port, pf.format(port), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"\
.format(port, str(e))
logger.warning("TrSv", msg)
def open_all_upnp(ports):
"""
Open and forward multiple ports with the local uPnP IGD.
:param list ports: List of port objects to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port[1], port[0].upper(), upnpc.lanaddr,
port[1], pf.format(port[1]), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_all_upnp(ports):
"""
Remove forwarding of multiple ports with the local uPnP IGD.
:param list ports: List of port objects to close
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def get_open_port(ignore_common=False):
"""
Get a random TCP port not currently in use by a tracked service.
:param bool ignore_common: Don't exclude commonly used ports?
:returns: Port number
:rtype: int
"""
data = get()
ports = []
for x in data:
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
r = random.randint(8001, 65534)
return r if r not in ports else get_open_port()
def initialize():
"""Initialize security policy tracking."""
logger.debug("TrSv", "Initializing security policy tracking")
# arkOS
policy = policies.get("arkos", "arkos", 2)
port = [("tcp", int(config.get("genesis", "port")))]
pol = SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)",
"server", port, policy)
storage.policies[pol.id] = pol
# uPNP
policy = policies.get("arkos", "upnp", 1)
pol = SecurityPolicy("arkos", "upnp", "uPnP Firewall Comms",
"server", [("udp", 1900)], policy)
if config.get("general", "enable_upnp"):
storage.policies[pol.id] = pol
# SSHd
policy = policies.get("arkos", "sshd", 1)
pol = SecurityPolicy(
"arkos", "sshd", "SSH", "server", [("tcp", 22)], policy)
# ACME dummies
for x in glob.glob("/etc/nginx/sites-enabled/acme-*"):
acme_name = x.split("/etc/nginx/sites-enabled/acme-")[1]
pol = SecurityPolicy(
"acme", acme_name, "{0} (ACME Validation)".format(acme_name),
"globe", [('tcp', 80)], 2
)
storage.policies[pol.id] = pol
for x in policies.get_all("custom"):
pol = SecurityPolicy("custom", x["id"], x["name"], x["icon"],
x["ports"], x["policy"])
storage.policies[pol.id] = pol
def register_website(site):
"""Convenience function to register a website as tracked service."""
register("website", site.id, getattr(site, "name", site.id),
site.app.icon if site.app else "globe",
[("tcp", site.port)], site.domain)
def deregister_website(site):
"""Convenience function to deregister a website as tracked service."""
deregister("website", site.id)
def open_upnp_site(site):
"""Convenience function to register a website with uPnP."""
if config.get("general", "enable_upnp"):
open_upnp(("tcp", site.port))
domain = site.domain
if domain == "localhost" or domain.endswith(".local"):
domain = None
try:
test_port(config.get("general", "repo_server"), site.port, domain)
except:
msg = ("Port {0} and/or domain {1} could not be tested."
" Make sure your ports are properly forwarded and"
" that your domain is properly set up.")\
.format(site.port, site.domain)
Notification("error", "TrSv", msg).send()
def close_upnp_site(site):
"""Convenience function to deregister a website with uPnP."""
if config.get("general", "enable_upnp"):
close_upnp(("tcp", site.port))
signals.add("tracked_services", "websites", "site_loaded", register_website)
signals.add("tracked_services", "websites", "site_installed", register_website)
signals.add("tracked_services", "websites", "site_installed", open_upnp_site)
signals.add("tracked_services", "websites", "site_removed", deregister_website)
signals.add("tracked_services", "websites", "site_removed", close_upnp_site)
| _upnp_igd_connect | identifier_name |
tracked_services.py | """
Classes and functions to manage arkOS tracked services.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import glob
import miniupnpc
import random
from arkos import config, logger, policies, signals, storage, security
from arkos.messages import Notification
from arkos.utilities import errors, test_port
COMMON_PORTS = [3000, 3306, 5222, 5223, 5232]
class SecurityPolicy:
"""
An object representing an arkOS firewall policy for a service.
SecurityPolicies are created for all websites, as well as for all apps
that have port-based services registered in their metadata files. They
are used to compute the proper values to put into the arkOS firewall
(iptables) on regeneration or app update.
"""
def __init__(self, type="", id="", name="", icon="", ports=[],
policy=2, addr=None):
"""
Initialize the policy object.
To create a new policy or to see more info about these parameters,
see ``tracked_services.register()`` below.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param int policy: Policy identifier
:param str addr: Address and port (for websites)
"""
self.type = type
self.id = id
self.name = name
self.icon = icon
self.ports = ports
self.policy = policy
self.addr = addr
def save(self, fw=True):
"""
Save changes to a security policy to disk.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
policies.append(
"custom",
{"id": self.id, "name": self.name, "icon": self.icon,
"ports": self.ports, "policy": self.policy}
)
else:
policies.set(self.type, self.id, self.policy)
policies.save()
storage.policies[self.id] = self
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def remove(self, fw=True):
"""
Remove a security policy from the firewall and config.
You should probably use ``tracked_services.deregister()`` for this.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
else:
policies.remove(self.type, self.id)
policies.save()
if self.id in storage.policies:
del storage.policies[self.id]
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
@property
def as_dict(self):
"""Return policy metadata as dict."""
return {
"type": self.type,
"id": self.id,
"name": self.name,
"icon": self.icon,
"ports": self.ports,
"policy": self.policy,
"is_ready": True
}
@property
def serialized(self):
"""Return serializable policy metadata as dict."""
return self.as_dict
class PortConflictError(errors.Error):
"""Raised when an address and port requested are not available."""
def __init__(self, port, domain):
self.port = port
self.domain = domain
def __str__(self):
return ("This port is taken by another site or service, "
"please choose another")
def get(id=None, type=None):
"""
Get all security policies from cache storage.
:param str id: App or website ID
:param str type: Filter by type ('website', 'app', etc)
"""
data = storage.policies
if id:
return data.get(id)
if type:
return filter(lambda x: x.type == type, data.values())
return data.values()
def register(type, id, name, icon, ports, domain=None, policy=0,
default_policy=2, fw=True):
"""
Register a new security policy with the system.
The ``ports`` parameter takes tuples of ports to manage, like so:
ports = [('tcp', 8000), ('udp', 21500)]
The ``policy`` parameter is an integer with the following meaning:
0 = Restrict access from all outside hosts. (excludes loopback)
1 = Restrict access to local networks only.
2 = Allow access to all networks and ultimately the whole Internet.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param str domain: Address (for websites)
:param int policy: Policy identifier
:param int default_policy: Application default policy to use on first init
:param bool fw: Regenerate the firewall after save?
"""
if not policy:
policy = policies.get(type, id, default_policy)
svc = SecurityPolicy(type, id, name, icon, ports, policy, domain)
svc.save(fw)
def deregister(type, id="", fw=True):
"""
Deregister a security policy.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param bool fw: Regenerate the firewall after save?
"""
for x in get(type=type):
if not id:
x.remove(fw=False)
elif x.id == id:
x.remove(fw=False)
break
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def refresh_policies():
"""Recreate security policies based on what is stored in config."""
svcs = get()
newpolicies = {}
for x in policies.get_all():
if x == "custom":
newpolicies["custom"] = policies.get_all("custom")
for y in svcs:
if x == y.type:
if x not in newpolicies:
newpolicies[x] = {}
for s in policies.get_all(x):
if s == y.id:
newpolicies[x][s] = policies.get(x, s)
policies.config = newpolicies
policies.save()
def is_open_port(port, domain=None, ignore_common=False):
"""
Check if the specified port is taken by a tracked service or not.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param int port: Port number to check
:param str domain: Address to check (for websites)
:param bool ignore_common: Don't return False for commonly used ports?
:returns: True if port is open
:rtype bool:
"""
data = get()
ports = []
for x in data:
if domain and x.type == "website" and domain != x.addr:
continue
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
return port not in ports
def _upnp_igd_connect():
logger.debug("TrSv", "Attempting to connect to uPnP IGD")
upnpc = miniupnpc.UPnP()
upnpc.discoverdelay = 3000
devs = upnpc.discover()
if devs == 0:
msg = "Failed to connect to uPnP IGD: no devices found"
logger.warning("TrSv", msg)
return
try:
upnpc.selectigd()
except Exception as e:
msg = "Failed to connect to uPnP IGD: {0}"
logger.warning("TrSv", msg.format(str(e)))
return upnpc
def open_upnp(port):
"""
Open and forward a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(int(port[1]), port[0].upper()):
try:
upnpc.deleteportmapping(int(port[1]), port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(
int(port[1]), port[0].upper(), upnpc.lanaddr, int(port[1]),
pf.format(port[1]), ''
)
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_upnp(port):
|
def initialize_upnp(svcs):
"""
Initialize uPnP port forwarding with the IGD.
:param SecurityPolicy svcs: SecurityPolicies to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for svc in svcs:
if svc.policy != 2:
continue
for protocol, port in svc.ports:
if upnpc.getspecificportmapping(port, protocol.upper()):
try:
upnpc.deleteportmapping(port, protocol.upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port, protocol.upper(), upnpc.lanaddr,
port, pf.format(port), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"\
.format(port, str(e))
logger.warning("TrSv", msg)
def open_all_upnp(ports):
"""
Open and forward multiple ports with the local uPnP IGD.
:param list ports: List of port objects to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port[1], port[0].upper(), upnpc.lanaddr,
port[1], pf.format(port[1]), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_all_upnp(ports):
"""
Remove forwarding of multiple ports with the local uPnP IGD.
:param list ports: List of port objects to close
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def get_open_port(ignore_common=False):
"""
Get a random TCP port not currently in use by a tracked service.
:param bool ignore_common: Don't exclude commonly used ports?
:returns: Port number
:rtype: int
"""
data = get()
ports = []
for x in data:
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
r = random.randint(8001, 65534)
return r if r not in ports else get_open_port()
def initialize():
"""Initialize security policy tracking."""
logger.debug("TrSv", "Initializing security policy tracking")
# arkOS
policy = policies.get("arkos", "arkos", 2)
port = [("tcp", int(config.get("genesis", "port")))]
pol = SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)",
"server", port, policy)
storage.policies[pol.id] = pol
# uPNP
policy = policies.get("arkos", "upnp", 1)
pol = SecurityPolicy("arkos", "upnp", "uPnP Firewall Comms",
"server", [("udp", 1900)], policy)
if config.get("general", "enable_upnp"):
storage.policies[pol.id] = pol
# SSHd
policy = policies.get("arkos", "sshd", 1)
pol = SecurityPolicy(
"arkos", "sshd", "SSH", "server", [("tcp", 22)], policy)
# ACME dummies
for x in glob.glob("/etc/nginx/sites-enabled/acme-*"):
acme_name = x.split("/etc/nginx/sites-enabled/acme-")[1]
pol = SecurityPolicy(
"acme", acme_name, "{0} (ACME Validation)".format(acme_name),
"globe", [('tcp', 80)], 2
)
storage.policies[pol.id] = pol
for x in policies.get_all("custom"):
pol = SecurityPolicy("custom", x["id"], x["name"], x["icon"],
x["ports"], x["policy"])
storage.policies[pol.id] = pol
def register_website(site):
"""Convenience function to register a website as tracked service."""
register("website", site.id, getattr(site, "name", site.id),
site.app.icon if site.app else "globe",
[("tcp", site.port)], site.domain)
def deregister_website(site):
"""Convenience function to deregister a website as tracked service."""
deregister("website", site.id)
def open_upnp_site(site):
"""Convenience function to register a website with uPnP."""
if config.get("general", "enable_upnp"):
open_upnp(("tcp", site.port))
domain = site.domain
if domain == "localhost" or domain.endswith(".local"):
domain = None
try:
test_port(config.get("general", "repo_server"), site.port, domain)
except:
msg = ("Port {0} and/or domain {1} could not be tested."
" Make sure your ports are properly forwarded and"
" that your domain is properly set up.")\
.format(site.port, site.domain)
Notification("error", "TrSv", msg).send()
def close_upnp_site(site):
"""Convenience function to deregister a website with uPnP."""
if config.get("general", "enable_upnp"):
close_upnp(("tcp", site.port))
signals.add("tracked_services", "websites", "site_loaded", register_website)
signals.add("tracked_services", "websites", "site_installed", register_website)
signals.add("tracked_services", "websites", "site_installed", open_upnp_site)
signals.add("tracked_services", "websites", "site_removed", deregister_website)
signals.add("tracked_services", "websites", "site_removed", close_upnp_site)
| """
Remove forwarding of a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass | identifier_body |
tracked_services.py | """
Classes and functions to manage arkOS tracked services.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import glob
import miniupnpc
import random
from arkos import config, logger, policies, signals, storage, security
from arkos.messages import Notification
from arkos.utilities import errors, test_port
COMMON_PORTS = [3000, 3306, 5222, 5223, 5232]
class SecurityPolicy:
"""
An object representing an arkOS firewall policy for a service.
SecurityPolicies are created for all websites, as well as for all apps
that have port-based services registered in their metadata files. They
are used to compute the proper values to put into the arkOS firewall
(iptables) on regeneration or app update.
"""
def __init__(self, type="", id="", name="", icon="", ports=[],
policy=2, addr=None):
"""
Initialize the policy object.
To create a new policy or to see more info about these parameters,
see ``tracked_services.register()`` below.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param int policy: Policy identifier
:param str addr: Address and port (for websites)
"""
self.type = type
self.id = id
self.name = name
self.icon = icon
self.ports = ports
self.policy = policy
self.addr = addr
def save(self, fw=True):
"""
Save changes to a security policy to disk.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
policies.append(
"custom",
{"id": self.id, "name": self.name, "icon": self.icon,
"ports": self.ports, "policy": self.policy}
)
else:
policies.set(self.type, self.id, self.policy)
policies.save()
storage.policies[self.id] = self
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def remove(self, fw=True):
"""
Remove a security policy from the firewall and config.
You should probably use ``tracked_services.deregister()`` for this.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
else:
policies.remove(self.type, self.id)
policies.save()
if self.id in storage.policies:
del storage.policies[self.id]
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
@property
def as_dict(self):
"""Return policy metadata as dict."""
return {
"type": self.type,
"id": self.id,
"name": self.name,
"icon": self.icon,
"ports": self.ports,
"policy": self.policy,
"is_ready": True
}
@property
def serialized(self):
"""Return serializable policy metadata as dict."""
return self.as_dict
class PortConflictError(errors.Error):
"""Raised when an address and port requested are not available."""
def __init__(self, port, domain):
self.port = port
self.domain = domain
def __str__(self):
return ("This port is taken by another site or service, "
"please choose another")
def get(id=None, type=None):
"""
Get all security policies from cache storage.
:param str id: App or website ID
:param str type: Filter by type ('website', 'app', etc)
"""
data = storage.policies
if id:
return data.get(id)
if type:
return filter(lambda x: x.type == type, data.values())
return data.values()
def register(type, id, name, icon, ports, domain=None, policy=0,
default_policy=2, fw=True):
"""
Register a new security policy with the system.
The ``ports`` parameter takes tuples of ports to manage, like so:
ports = [('tcp', 8000), ('udp', 21500)]
The ``policy`` parameter is an integer with the following meaning:
0 = Restrict access from all outside hosts. (excludes loopback)
1 = Restrict access to local networks only.
2 = Allow access to all networks and ultimately the whole Internet.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param str domain: Address (for websites)
:param int policy: Policy identifier
:param int default_policy: Application default policy to use on first init
:param bool fw: Regenerate the firewall after save?
"""
if not policy:
policy = policies.get(type, id, default_policy)
svc = SecurityPolicy(type, id, name, icon, ports, policy, domain)
svc.save(fw)
def deregister(type, id="", fw=True): | :param str id: Website or app ID
:param bool fw: Regenerate the firewall after save?
"""
for x in get(type=type):
if not id:
x.remove(fw=False)
elif x.id == id:
x.remove(fw=False)
break
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def refresh_policies():
"""Recreate security policies based on what is stored in config."""
svcs = get()
newpolicies = {}
for x in policies.get_all():
if x == "custom":
newpolicies["custom"] = policies.get_all("custom")
for y in svcs:
if x == y.type:
if x not in newpolicies:
newpolicies[x] = {}
for s in policies.get_all(x):
if s == y.id:
newpolicies[x][s] = policies.get(x, s)
policies.config = newpolicies
policies.save()
def is_open_port(port, domain=None, ignore_common=False):
"""
Check if the specified port is taken by a tracked service or not.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param int port: Port number to check
:param str domain: Address to check (for websites)
:param bool ignore_common: Don't return False for commonly used ports?
:returns: True if port is open
:rtype bool:
"""
data = get()
ports = []
for x in data:
if domain and x.type == "website" and domain != x.addr:
continue
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
return port not in ports
def _upnp_igd_connect():
logger.debug("TrSv", "Attempting to connect to uPnP IGD")
upnpc = miniupnpc.UPnP()
upnpc.discoverdelay = 3000
devs = upnpc.discover()
if devs == 0:
msg = "Failed to connect to uPnP IGD: no devices found"
logger.warning("TrSv", msg)
return
try:
upnpc.selectigd()
except Exception as e:
msg = "Failed to connect to uPnP IGD: {0}"
logger.warning("TrSv", msg.format(str(e)))
return upnpc
def open_upnp(port):
"""
Open and forward a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(int(port[1]), port[0].upper()):
try:
upnpc.deleteportmapping(int(port[1]), port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(
int(port[1]), port[0].upper(), upnpc.lanaddr, int(port[1]),
pf.format(port[1]), ''
)
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_upnp(port):
"""
Remove forwarding of a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def initialize_upnp(svcs):
"""
Initialize uPnP port forwarding with the IGD.
:param SecurityPolicy svcs: SecurityPolicies to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for svc in svcs:
if svc.policy != 2:
continue
for protocol, port in svc.ports:
if upnpc.getspecificportmapping(port, protocol.upper()):
try:
upnpc.deleteportmapping(port, protocol.upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port, protocol.upper(), upnpc.lanaddr,
port, pf.format(port), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"\
.format(port, str(e))
logger.warning("TrSv", msg)
def open_all_upnp(ports):
"""
Open and forward multiple ports with the local uPnP IGD.
:param list ports: List of port objects to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port[1], port[0].upper(), upnpc.lanaddr,
port[1], pf.format(port[1]), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_all_upnp(ports):
"""
Remove forwarding of multiple ports with the local uPnP IGD.
:param list ports: List of port objects to close
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def get_open_port(ignore_common=False):
"""
Get a random TCP port not currently in use by a tracked service.
:param bool ignore_common: Don't exclude commonly used ports?
:returns: Port number
:rtype: int
"""
data = get()
ports = []
for x in data:
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
r = random.randint(8001, 65534)
return r if r not in ports else get_open_port()
def initialize():
"""Initialize security policy tracking."""
logger.debug("TrSv", "Initializing security policy tracking")
# arkOS
policy = policies.get("arkos", "arkos", 2)
port = [("tcp", int(config.get("genesis", "port")))]
pol = SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)",
"server", port, policy)
storage.policies[pol.id] = pol
# uPNP
policy = policies.get("arkos", "upnp", 1)
pol = SecurityPolicy("arkos", "upnp", "uPnP Firewall Comms",
"server", [("udp", 1900)], policy)
if config.get("general", "enable_upnp"):
storage.policies[pol.id] = pol
# SSHd
policy = policies.get("arkos", "sshd", 1)
pol = SecurityPolicy(
"arkos", "sshd", "SSH", "server", [("tcp", 22)], policy)
# ACME dummies
for x in glob.glob("/etc/nginx/sites-enabled/acme-*"):
acme_name = x.split("/etc/nginx/sites-enabled/acme-")[1]
pol = SecurityPolicy(
"acme", acme_name, "{0} (ACME Validation)".format(acme_name),
"globe", [('tcp', 80)], 2
)
storage.policies[pol.id] = pol
for x in policies.get_all("custom"):
pol = SecurityPolicy("custom", x["id"], x["name"], x["icon"],
x["ports"], x["policy"])
storage.policies[pol.id] = pol
def register_website(site):
"""Convenience function to register a website as tracked service."""
register("website", site.id, getattr(site, "name", site.id),
site.app.icon if site.app else "globe",
[("tcp", site.port)], site.domain)
def deregister_website(site):
"""Convenience function to deregister a website as tracked service."""
deregister("website", site.id)
def open_upnp_site(site):
"""Convenience function to register a website with uPnP."""
if config.get("general", "enable_upnp"):
open_upnp(("tcp", site.port))
domain = site.domain
if domain == "localhost" or domain.endswith(".local"):
domain = None
try:
test_port(config.get("general", "repo_server"), site.port, domain)
except:
msg = ("Port {0} and/or domain {1} could not be tested."
" Make sure your ports are properly forwarded and"
" that your domain is properly set up.")\
.format(site.port, site.domain)
Notification("error", "TrSv", msg).send()
def close_upnp_site(site):
"""Convenience function to deregister a website with uPnP."""
if config.get("general", "enable_upnp"):
close_upnp(("tcp", site.port))
signals.add("tracked_services", "websites", "site_loaded", register_website)
signals.add("tracked_services", "websites", "site_installed", register_website)
signals.add("tracked_services", "websites", "site_installed", open_upnp_site)
signals.add("tracked_services", "websites", "site_removed", deregister_website)
signals.add("tracked_services", "websites", "site_removed", close_upnp_site) | """
Deregister a security policy.
:param str type: Policy type ('website', 'app', etc) | random_line_split |
runner.py | #
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from ast import literal_eval
from threading import Thread
from ovirtscheduler import utils
class PythonMethodRunner(Thread):
def __init__(self, path, module, cls, method, args, request_id=''):
super(PythonMethodRunner, self).__init__(group=None)
logger = logging.getLogger()
self._log_adapter = utils.RequestAdapter(
logger,
{'method': 'PythonMethodRunner',
'request_id': request_id})
self._path = path
self._result = None
self._error = None
self._process = None
self._script = self.createScript(module, cls, method, args)
self.request_id = request_id
def run(self):
try:
self._log_adapter.debug(
'running %s in %s' % (self._script, self._path))
self._process = utils.createProcess(self._script, self._path)
(result, error) = self._process.communicate()
if not isinstance(result, str):
result = result.decode()
try:
self._result = literal_eval(result)
except Exception as ex:
if not error:
self._error = "Unable to parse result: %s" \
" got error : %s " % (result, ex)
if error:
self._error = error
except Exception as ex:
self._error = ex
if self._error:
self._log_adapter.error("script %s got error %s" %
(self._script, self._error))
def | (self):
return self._result
def getErrors(self):
return self._error
def getReturnCode(self):
return self._process.returncode
def stop(self):
return utils.killProcess(self._process)
def createScript(self, module, cls, method, args):
command_template = "import {m}; {m}.{c}().{method}{args}"
command_string = command_template\
.format(m=module,
c=cls,
method=method,
args=repr(utils.createFunctionArgs(args)))
return ["python3", "-c", command_string]
| getResults | identifier_name |
runner.py | #
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from ast import literal_eval
from threading import Thread
from ovirtscheduler import utils
class PythonMethodRunner(Thread):
def __init__(self, path, module, cls, method, args, request_id=''):
super(PythonMethodRunner, self).__init__(group=None)
logger = logging.getLogger()
self._log_adapter = utils.RequestAdapter(
logger,
{'method': 'PythonMethodRunner',
'request_id': request_id})
self._path = path
self._result = None
self._error = None
self._process = None
self._script = self.createScript(module, cls, method, args)
self.request_id = request_id
def run(self):
try:
self._log_adapter.debug(
'running %s in %s' % (self._script, self._path))
self._process = utils.createProcess(self._script, self._path)
(result, error) = self._process.communicate()
if not isinstance(result, str):
result = result.decode()
try:
self._result = literal_eval(result)
except Exception as ex:
if not error:
self._error = "Unable to parse result: %s" \
" got error : %s " % (result, ex)
if error:
self._error = error
except Exception as ex:
self._error = ex
if self._error:
self._log_adapter.error("script %s got error %s" %
(self._script, self._error))
def getResults(self):
|
def getErrors(self):
return self._error
def getReturnCode(self):
return self._process.returncode
def stop(self):
return utils.killProcess(self._process)
def createScript(self, module, cls, method, args):
command_template = "import {m}; {m}.{c}().{method}{args}"
command_string = command_template\
.format(m=module,
c=cls,
method=method,
args=repr(utils.createFunctionArgs(args)))
return ["python3", "-c", command_string]
| return self._result | identifier_body |
runner.py | #
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from ast import literal_eval
from threading import Thread
from ovirtscheduler import utils
| super(PythonMethodRunner, self).__init__(group=None)
logger = logging.getLogger()
self._log_adapter = utils.RequestAdapter(
logger,
{'method': 'PythonMethodRunner',
'request_id': request_id})
self._path = path
self._result = None
self._error = None
self._process = None
self._script = self.createScript(module, cls, method, args)
self.request_id = request_id
def run(self):
try:
self._log_adapter.debug(
'running %s in %s' % (self._script, self._path))
self._process = utils.createProcess(self._script, self._path)
(result, error) = self._process.communicate()
if not isinstance(result, str):
result = result.decode()
try:
self._result = literal_eval(result)
except Exception as ex:
if not error:
self._error = "Unable to parse result: %s" \
" got error : %s " % (result, ex)
if error:
self._error = error
except Exception as ex:
self._error = ex
if self._error:
self._log_adapter.error("script %s got error %s" %
(self._script, self._error))
def getResults(self):
return self._result
def getErrors(self):
return self._error
def getReturnCode(self):
return self._process.returncode
def stop(self):
return utils.killProcess(self._process)
def createScript(self, module, cls, method, args):
command_template = "import {m}; {m}.{c}().{method}{args}"
command_string = command_template\
.format(m=module,
c=cls,
method=method,
args=repr(utils.createFunctionArgs(args)))
return ["python3", "-c", command_string] |
class PythonMethodRunner(Thread):
def __init__(self, path, module, cls, method, args, request_id=''): | random_line_split |
runner.py | #
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from ast import literal_eval
from threading import Thread
from ovirtscheduler import utils
class PythonMethodRunner(Thread):
def __init__(self, path, module, cls, method, args, request_id=''):
super(PythonMethodRunner, self).__init__(group=None)
logger = logging.getLogger()
self._log_adapter = utils.RequestAdapter(
logger,
{'method': 'PythonMethodRunner',
'request_id': request_id})
self._path = path
self._result = None
self._error = None
self._process = None
self._script = self.createScript(module, cls, method, args)
self.request_id = request_id
def run(self):
try:
self._log_adapter.debug(
'running %s in %s' % (self._script, self._path))
self._process = utils.createProcess(self._script, self._path)
(result, error) = self._process.communicate()
if not isinstance(result, str):
result = result.decode()
try:
self._result = literal_eval(result)
except Exception as ex:
if not error:
|
if error:
self._error = error
except Exception as ex:
self._error = ex
if self._error:
self._log_adapter.error("script %s got error %s" %
(self._script, self._error))
def getResults(self):
return self._result
def getErrors(self):
return self._error
def getReturnCode(self):
return self._process.returncode
def stop(self):
return utils.killProcess(self._process)
def createScript(self, module, cls, method, args):
command_template = "import {m}; {m}.{c}().{method}{args}"
command_string = command_template\
.format(m=module,
c=cls,
method=method,
args=repr(utils.createFunctionArgs(args)))
return ["python3", "-c", command_string]
| self._error = "Unable to parse result: %s" \
" got error : %s " % (result, ex) | conditional_block |
captcha.py | # -*- coding: utf-8 -*-
#
#Copyright (C) 2009 kingzero, RaNaN
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License,
#or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
###
from __future__ import with_statement
import os
from os.path import join
from os.path import abspath
import logging
import subprocess
#import tempfile
import Image
import TiffImagePlugin
import PngImagePlugin
import GifImagePlugin
import JpegImagePlugin
class OCR(object):
__name__ = "OCR"
__type__ = "ocr"
__version__ = "0.1"
__description__ = """OCR base plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
def __init__(self):
self.logger = logging.getLogger("log")
def load_image(self, image):
self.image = Image.open(image)
self.pixels = self.image.load()
self.result_captcha = ''
def unload(self):
"""delete all tmp images"""
pass
def threshold(self, value):
self.image = self.image.point(lambda a: a * value + 10)
def run(self, command):
"""Run a command"""
popen = subprocess.Popen(command, bufsize = -1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popen.wait()
output = popen.stdout.read() +" | "+ popen.stderr.read()
popen.stdout.close()
popen.stderr.close()
self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
#self.logger.debug("create tmp tif")
#tmp = tempfile.NamedTemporaryFile(suffix=".tif")
tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
tmp.close()
#self.logger.debug("create tmp txt")
#tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
tmpTxt.close()
self.logger.debug("save tiff")
self.image.save(tmp.name, 'TIFF')
if os.name == "nt":
tessparams = [join(pypath,"tesseract","tesseract.exe")]
else:
tessparams = ["tesseract"]
tessparams.extend( [abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")] )
if subset and (digits or lowercase or uppercase):
#self.logger.debug("create temp subset config")
#tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
tmpSub.write("tessedit_char_whitelist ")
if digits:
tmpSub.write("0123456789")
if lowercase:
tmpSub.write("abcdefghijklmnopqrstuvwxyz")
if uppercase:
tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
tmpSub.write("\n")
tessparams.append("nobatch")
tessparams.append(abspath(tmpSub.name))
tmpSub.close()
self.logger.debug("run tesseract")
self.run(tessparams)
self.logger.debug("read txt")
try:
with open(tmpTxt.name, 'r') as f:
self.result_captcha = f.read().replace("\n", "")
except:
self.result_captcha = ""
self.logger.debug(self.result_captcha)
try:
os.remove(tmp.name)
os.remove(tmpTxt.name)
if subset and (digits or lowercase or uppercase):
os.remove(tmpSub.name)
except:
pass
def get_captcha(self, name):
raise NotImplementedError
def to_greyscale(self):
if self.image.mode != 'L':
self.image = self.image.convert('L')
self.pixels = self.image.load()
def eval_black_white(self, limit):
self.pixels = self.image.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if self.pixels[x, y] > limit:
self.pixels[x, y] = 255
else:
self.pixels[x, y] = 0
def clean(self, allowed):
pixels = self.pixels
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 255:
continue
# No point in processing white pixels since we only want to remove black pixel
count = 0
try:
if pixels[x-1, y-1] != 255:
count += 1
if pixels[x-1, y] != 255:
count += 1
if pixels[x-1, y + 1] != 255:
count += 1
if pixels[x, y + 1] != 255:
count += 1
if pixels[x + 1, y + 1] != 255:
|
if pixels[x + 1, y] != 255:
count += 1
if pixels[x + 1, y-1] != 255:
count += 1
if pixels[x, y-1] != 255:
count += 1
except:
pass
# not enough neighbors are dark pixels so mark this pixel
# to be changed to white
if count < allowed:
pixels[x, y] = 1
# second pass: this time set all 1's to 255 (white)
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 1:
pixels[x, y] = 255
self.pixels = pixels
def derotate_by_average(self):
"""rotate by checking each angle and guess most suitable"""
w, h = self.image.size
pixels = self.pixels
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 155
highest = {}
counts = {}
for angle in xrange(-45, 45):
tmpimage = self.image.rotate(angle)
pixels = tmpimage.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
count = {}
for x in xrange(w):
count[x] = 0
for y in xrange(h):
if pixels[x, y] == 155:
count[x] += 1
sum = 0
cnt = 0
for x in count.values():
if x != 0:
sum += x
cnt += 1
avg = sum / cnt
counts[angle] = cnt
highest[angle] = 0
for x in count.values():
if x > highest[angle]:
highest[angle] = x
highest[angle] = highest[angle] - avg
hkey = 0
hvalue = 0
for key, value in highest.iteritems():
if value > hvalue:
hkey = key
hvalue = value
self.image = self.image.rotate(hkey)
pixels = self.image.load()
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
if pixels[x, y] == 155:
pixels[x, y] = 0
self.pixels = pixels
def split_captcha_letters(self):
captcha = self.image
started = False
letters = []
width, height = captcha.size
bottomY, topY = 0, height
pixels = captcha.load()
for x in xrange(width):
black_pixel_in_col = False
for y in xrange(height):
if pixels[x, y] != 255:
if not started:
started = True
firstX = x
lastX = x
if y > bottomY:
bottomY = y
if y < topY:
topY = y
if x > lastX:
lastX = x
black_pixel_in_col = True
if black_pixel_in_col is False and started is True:
rect = (firstX, topY, lastX, bottomY)
new_captcha = captcha.crop(rect)
w, h = new_captcha.size
if w > 5 and h > 5:
letters.append(new_captcha)
started = False
bottomY, topY = 0, height
return letters
def correct(self, values, var=None):
if var:
result = var
else:
result = self.result_captcha
for key, item in values.iteritems():
if key.__class__ == str:
result = result.replace(key, item)
else:
for expr in key:
result = result.replace(expr, item)
if var:
return result
else:
self.result_captcha = result
if __name__ == '__main__':
ocr = OCR()
ocr.load_image("B.jpg")
ocr.to_greyscale()
ocr.eval_black_white(140)
ocr.derotate_by_average()
ocr.run_tesser()
print "Tesseract", ocr.result_captcha
ocr.image.save("derotated.jpg")
| count += 1 | conditional_block |
captcha.py | # -*- coding: utf-8 -*-
#
#Copyright (C) 2009 kingzero, RaNaN
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License,
#or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
###
from __future__ import with_statement
import os
from os.path import join
from os.path import abspath
import logging
import subprocess
#import tempfile
import Image
import TiffImagePlugin
import PngImagePlugin
import GifImagePlugin
import JpegImagePlugin
class OCR(object):
__name__ = "OCR"
__type__ = "ocr"
__version__ = "0.1"
__description__ = """OCR base plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
def __init__(self):
self.logger = logging.getLogger("log")
def load_image(self, image):
self.image = Image.open(image)
self.pixels = self.image.load()
self.result_captcha = ''
def unload(self):
"""delete all tmp images"""
pass
def threshold(self, value):
|
def run(self, command):
"""Run a command"""
popen = subprocess.Popen(command, bufsize = -1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popen.wait()
output = popen.stdout.read() +" | "+ popen.stderr.read()
popen.stdout.close()
popen.stderr.close()
self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
#self.logger.debug("create tmp tif")
#tmp = tempfile.NamedTemporaryFile(suffix=".tif")
tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
tmp.close()
#self.logger.debug("create tmp txt")
#tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
tmpTxt.close()
self.logger.debug("save tiff")
self.image.save(tmp.name, 'TIFF')
if os.name == "nt":
tessparams = [join(pypath,"tesseract","tesseract.exe")]
else:
tessparams = ["tesseract"]
tessparams.extend( [abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")] )
if subset and (digits or lowercase or uppercase):
#self.logger.debug("create temp subset config")
#tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
tmpSub.write("tessedit_char_whitelist ")
if digits:
tmpSub.write("0123456789")
if lowercase:
tmpSub.write("abcdefghijklmnopqrstuvwxyz")
if uppercase:
tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
tmpSub.write("\n")
tessparams.append("nobatch")
tessparams.append(abspath(tmpSub.name))
tmpSub.close()
self.logger.debug("run tesseract")
self.run(tessparams)
self.logger.debug("read txt")
try:
with open(tmpTxt.name, 'r') as f:
self.result_captcha = f.read().replace("\n", "")
except:
self.result_captcha = ""
self.logger.debug(self.result_captcha)
try:
os.remove(tmp.name)
os.remove(tmpTxt.name)
if subset and (digits or lowercase or uppercase):
os.remove(tmpSub.name)
except:
pass
def get_captcha(self, name):
raise NotImplementedError
def to_greyscale(self):
if self.image.mode != 'L':
self.image = self.image.convert('L')
self.pixels = self.image.load()
def eval_black_white(self, limit):
self.pixels = self.image.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if self.pixels[x, y] > limit:
self.pixels[x, y] = 255
else:
self.pixels[x, y] = 0
def clean(self, allowed):
pixels = self.pixels
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 255:
continue
# No point in processing white pixels since we only want to remove black pixel
count = 0
try:
if pixels[x-1, y-1] != 255:
count += 1
if pixels[x-1, y] != 255:
count += 1
if pixels[x-1, y + 1] != 255:
count += 1
if pixels[x, y + 1] != 255:
count += 1
if pixels[x + 1, y + 1] != 255:
count += 1
if pixels[x + 1, y] != 255:
count += 1
if pixels[x + 1, y-1] != 255:
count += 1
if pixels[x, y-1] != 255:
count += 1
except:
pass
# not enough neighbors are dark pixels so mark this pixel
# to be changed to white
if count < allowed:
pixels[x, y] = 1
# second pass: this time set all 1's to 255 (white)
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 1:
pixels[x, y] = 255
self.pixels = pixels
def derotate_by_average(self):
"""rotate by checking each angle and guess most suitable"""
w, h = self.image.size
pixels = self.pixels
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 155
highest = {}
counts = {}
for angle in xrange(-45, 45):
tmpimage = self.image.rotate(angle)
pixels = tmpimage.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
count = {}
for x in xrange(w):
count[x] = 0
for y in xrange(h):
if pixels[x, y] == 155:
count[x] += 1
sum = 0
cnt = 0
for x in count.values():
if x != 0:
sum += x
cnt += 1
avg = sum / cnt
counts[angle] = cnt
highest[angle] = 0
for x in count.values():
if x > highest[angle]:
highest[angle] = x
highest[angle] = highest[angle] - avg
hkey = 0
hvalue = 0
for key, value in highest.iteritems():
if value > hvalue:
hkey = key
hvalue = value
self.image = self.image.rotate(hkey)
pixels = self.image.load()
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
if pixels[x, y] == 155:
pixels[x, y] = 0
self.pixels = pixels
def split_captcha_letters(self):
captcha = self.image
started = False
letters = []
width, height = captcha.size
bottomY, topY = 0, height
pixels = captcha.load()
for x in xrange(width):
black_pixel_in_col = False
for y in xrange(height):
if pixels[x, y] != 255:
if not started:
started = True
firstX = x
lastX = x
if y > bottomY:
bottomY = y
if y < topY:
topY = y
if x > lastX:
lastX = x
black_pixel_in_col = True
if black_pixel_in_col is False and started is True:
rect = (firstX, topY, lastX, bottomY)
new_captcha = captcha.crop(rect)
w, h = new_captcha.size
if w > 5 and h > 5:
letters.append(new_captcha)
started = False
bottomY, topY = 0, height
return letters
def correct(self, values, var=None):
if var:
result = var
else:
result = self.result_captcha
for key, item in values.iteritems():
if key.__class__ == str:
result = result.replace(key, item)
else:
for expr in key:
result = result.replace(expr, item)
if var:
return result
else:
self.result_captcha = result
if __name__ == '__main__':
ocr = OCR()
ocr.load_image("B.jpg")
ocr.to_greyscale()
ocr.eval_black_white(140)
ocr.derotate_by_average()
ocr.run_tesser()
print "Tesseract", ocr.result_captcha
ocr.image.save("derotated.jpg")
| self.image = self.image.point(lambda a: a * value + 10) | identifier_body |
captcha.py | # -*- coding: utf-8 -*-
#
#Copyright (C) 2009 kingzero, RaNaN
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License,
#or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
###
from __future__ import with_statement
import os
from os.path import join
from os.path import abspath
import logging
import subprocess
#import tempfile
import Image
import TiffImagePlugin
import PngImagePlugin
import GifImagePlugin
import JpegImagePlugin
class OCR(object):
__name__ = "OCR"
__type__ = "ocr"
__version__ = "0.1"
__description__ = """OCR base plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
def __init__(self):
self.logger = logging.getLogger("log")
def | (self, image):
self.image = Image.open(image)
self.pixels = self.image.load()
self.result_captcha = ''
def unload(self):
"""delete all tmp images"""
pass
def threshold(self, value):
self.image = self.image.point(lambda a: a * value + 10)
def run(self, command):
"""Run a command"""
popen = subprocess.Popen(command, bufsize = -1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popen.wait()
output = popen.stdout.read() +" | "+ popen.stderr.read()
popen.stdout.close()
popen.stderr.close()
self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
#self.logger.debug("create tmp tif")
#tmp = tempfile.NamedTemporaryFile(suffix=".tif")
tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
tmp.close()
#self.logger.debug("create tmp txt")
#tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
tmpTxt.close()
self.logger.debug("save tiff")
self.image.save(tmp.name, 'TIFF')
if os.name == "nt":
tessparams = [join(pypath,"tesseract","tesseract.exe")]
else:
tessparams = ["tesseract"]
tessparams.extend( [abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")] )
if subset and (digits or lowercase or uppercase):
#self.logger.debug("create temp subset config")
#tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
tmpSub.write("tessedit_char_whitelist ")
if digits:
tmpSub.write("0123456789")
if lowercase:
tmpSub.write("abcdefghijklmnopqrstuvwxyz")
if uppercase:
tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
tmpSub.write("\n")
tessparams.append("nobatch")
tessparams.append(abspath(tmpSub.name))
tmpSub.close()
self.logger.debug("run tesseract")
self.run(tessparams)
self.logger.debug("read txt")
try:
with open(tmpTxt.name, 'r') as f:
self.result_captcha = f.read().replace("\n", "")
except:
self.result_captcha = ""
self.logger.debug(self.result_captcha)
try:
os.remove(tmp.name)
os.remove(tmpTxt.name)
if subset and (digits or lowercase or uppercase):
os.remove(tmpSub.name)
except:
pass
def get_captcha(self, name):
raise NotImplementedError
def to_greyscale(self):
if self.image.mode != 'L':
self.image = self.image.convert('L')
self.pixels = self.image.load()
def eval_black_white(self, limit):
self.pixels = self.image.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if self.pixels[x, y] > limit:
self.pixels[x, y] = 255
else:
self.pixels[x, y] = 0
def clean(self, allowed):
pixels = self.pixels
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 255:
continue
# No point in processing white pixels since we only want to remove black pixel
count = 0
try:
if pixels[x-1, y-1] != 255:
count += 1
if pixels[x-1, y] != 255:
count += 1
if pixels[x-1, y + 1] != 255:
count += 1
if pixels[x, y + 1] != 255:
count += 1
if pixels[x + 1, y + 1] != 255:
count += 1
if pixels[x + 1, y] != 255:
count += 1
if pixels[x + 1, y-1] != 255:
count += 1
if pixels[x, y-1] != 255:
count += 1
except:
pass
# not enough neighbors are dark pixels so mark this pixel
# to be changed to white
if count < allowed:
pixels[x, y] = 1
# second pass: this time set all 1's to 255 (white)
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 1:
pixels[x, y] = 255
self.pixels = pixels
def derotate_by_average(self):
"""rotate by checking each angle and guess most suitable"""
w, h = self.image.size
pixels = self.pixels
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 155
highest = {}
counts = {}
for angle in xrange(-45, 45):
tmpimage = self.image.rotate(angle)
pixels = tmpimage.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
count = {}
for x in xrange(w):
count[x] = 0
for y in xrange(h):
if pixels[x, y] == 155:
count[x] += 1
sum = 0
cnt = 0
for x in count.values():
if x != 0:
sum += x
cnt += 1
avg = sum / cnt
counts[angle] = cnt
highest[angle] = 0
for x in count.values():
if x > highest[angle]:
highest[angle] = x
highest[angle] = highest[angle] - avg
hkey = 0
hvalue = 0
for key, value in highest.iteritems():
if value > hvalue:
hkey = key
hvalue = value
self.image = self.image.rotate(hkey)
pixels = self.image.load()
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
if pixels[x, y] == 155:
pixels[x, y] = 0
self.pixels = pixels
def split_captcha_letters(self):
captcha = self.image
started = False
letters = []
width, height = captcha.size
bottomY, topY = 0, height
pixels = captcha.load()
for x in xrange(width):
black_pixel_in_col = False
for y in xrange(height):
if pixels[x, y] != 255:
if not started:
started = True
firstX = x
lastX = x
if y > bottomY:
bottomY = y
if y < topY:
topY = y
if x > lastX:
lastX = x
black_pixel_in_col = True
if black_pixel_in_col is False and started is True:
rect = (firstX, topY, lastX, bottomY)
new_captcha = captcha.crop(rect)
w, h = new_captcha.size
if w > 5 and h > 5:
letters.append(new_captcha)
started = False
bottomY, topY = 0, height
return letters
def correct(self, values, var=None):
if var:
result = var
else:
result = self.result_captcha
for key, item in values.iteritems():
if key.__class__ == str:
result = result.replace(key, item)
else:
for expr in key:
result = result.replace(expr, item)
if var:
return result
else:
self.result_captcha = result
if __name__ == '__main__':
ocr = OCR()
ocr.load_image("B.jpg")
ocr.to_greyscale()
ocr.eval_black_white(140)
ocr.derotate_by_average()
ocr.run_tesser()
print "Tesseract", ocr.result_captcha
ocr.image.save("derotated.jpg")
| load_image | identifier_name |
captcha.py | # -*- coding: utf-8 -*-
#
#Copyright (C) 2009 kingzero, RaNaN
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License,
#or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
###
from __future__ import with_statement
import os
from os.path import join
from os.path import abspath
import logging
import subprocess
#import tempfile
import Image
import TiffImagePlugin
import PngImagePlugin
import GifImagePlugin
import JpegImagePlugin
class OCR(object):
__name__ = "OCR"
__type__ = "ocr"
__version__ = "0.1"
__description__ = """OCR base plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
def __init__(self):
self.logger = logging.getLogger("log")
def load_image(self, image):
self.image = Image.open(image)
self.pixels = self.image.load()
self.result_captcha = ''
def unload(self):
"""delete all tmp images"""
pass
def threshold(self, value):
self.image = self.image.point(lambda a: a * value + 10)
def run(self, command):
"""Run a command"""
popen = subprocess.Popen(command, bufsize = -1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popen.wait()
output = popen.stdout.read() +" | "+ popen.stderr.read()
popen.stdout.close()
popen.stderr.close()
self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
#self.logger.debug("create tmp tif")
#tmp = tempfile.NamedTemporaryFile(suffix=".tif")
tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
tmp.close()
#self.logger.debug("create tmp txt")
#tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
tmpTxt.close()
self.logger.debug("save tiff")
self.image.save(tmp.name, 'TIFF')
if os.name == "nt":
tessparams = [join(pypath,"tesseract","tesseract.exe")]
else:
tessparams = ["tesseract"]
tessparams.extend( [abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")] )
if subset and (digits or lowercase or uppercase):
#self.logger.debug("create temp subset config")
#tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
tmpSub.write("tessedit_char_whitelist ")
if digits:
tmpSub.write("0123456789")
if lowercase:
tmpSub.write("abcdefghijklmnopqrstuvwxyz")
if uppercase:
tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
tmpSub.write("\n")
tessparams.append("nobatch")
tessparams.append(abspath(tmpSub.name))
tmpSub.close()
self.logger.debug("run tesseract")
self.run(tessparams)
self.logger.debug("read txt")
try:
with open(tmpTxt.name, 'r') as f:
self.result_captcha = f.read().replace("\n", "")
except:
self.result_captcha = ""
self.logger.debug(self.result_captcha)
try:
os.remove(tmp.name)
os.remove(tmpTxt.name)
if subset and (digits or lowercase or uppercase):
os.remove(tmpSub.name)
except:
pass
def get_captcha(self, name):
raise NotImplementedError
def to_greyscale(self):
if self.image.mode != 'L':
self.image = self.image.convert('L')
self.pixels = self.image.load()
def eval_black_white(self, limit):
self.pixels = self.image.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if self.pixels[x, y] > limit:
self.pixels[x, y] = 255
else:
self.pixels[x, y] = 0
def clean(self, allowed):
pixels = self.pixels
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 255:
continue
# No point in processing white pixels since we only want to remove black pixel
count = 0
try:
if pixels[x-1, y-1] != 255:
count += 1
if pixels[x-1, y] != 255:
count += 1
if pixels[x-1, y + 1] != 255:
count += 1
if pixels[x, y + 1] != 255:
count += 1
if pixels[x + 1, y + 1] != 255:
count += 1
if pixels[x + 1, y] != 255:
count += 1
if pixels[x + 1, y-1] != 255:
count += 1
if pixels[x, y-1] != 255:
count += 1
except:
pass
# not enough neighbors are dark pixels so mark this pixel
# to be changed to white
if count < allowed:
pixels[x, y] = 1
# second pass: this time set all 1's to 255 (white)
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 1:
pixels[x, y] = 255
self.pixels = pixels
def derotate_by_average(self):
"""rotate by checking each angle and guess most suitable"""
w, h = self.image.size
pixels = self.pixels
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 155
highest = {}
counts = {}
for angle in xrange(-45, 45):
tmpimage = self.image.rotate(angle)
pixels = tmpimage.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
count = {}
for x in xrange(w):
count[x] = 0
for y in xrange(h):
if pixels[x, y] == 155:
count[x] += 1
sum = 0
cnt = 0
for x in count.values():
if x != 0:
sum += x
cnt += 1
avg = sum / cnt
counts[angle] = cnt
highest[angle] = 0
for x in count.values():
if x > highest[angle]:
highest[angle] = x
highest[angle] = highest[angle] - avg
hkey = 0
hvalue = 0
for key, value in highest.iteritems():
if value > hvalue:
hkey = key
hvalue = value
self.image = self.image.rotate(hkey)
pixels = self.image.load()
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
if pixels[x, y] == 155:
pixels[x, y] = 0
self.pixels = pixels
def split_captcha_letters(self):
captcha = self.image
started = False
letters = []
width, height = captcha.size
bottomY, topY = 0, height
pixels = captcha.load()
for x in xrange(width):
black_pixel_in_col = False
for y in xrange(height):
if pixels[x, y] != 255:
if not started:
started = True
firstX = x
lastX = x
if y > bottomY: | bottomY = y
if y < topY:
topY = y
if x > lastX:
lastX = x
black_pixel_in_col = True
if black_pixel_in_col is False and started is True:
rect = (firstX, topY, lastX, bottomY)
new_captcha = captcha.crop(rect)
w, h = new_captcha.size
if w > 5 and h > 5:
letters.append(new_captcha)
started = False
bottomY, topY = 0, height
return letters
def correct(self, values, var=None):
if var:
result = var
else:
result = self.result_captcha
for key, item in values.iteritems():
if key.__class__ == str:
result = result.replace(key, item)
else:
for expr in key:
result = result.replace(expr, item)
if var:
return result
else:
self.result_captcha = result
if __name__ == '__main__':
ocr = OCR()
ocr.load_image("B.jpg")
ocr.to_greyscale()
ocr.eval_black_white(140)
ocr.derotate_by_average()
ocr.run_tesser()
print "Tesseract", ocr.result_captcha
ocr.image.save("derotated.jpg") | random_line_split | |
iss118-spec.js | /** @babel */
/* eslint-env jasmine, atomtest */
/*
This file contains verifying specs for:
https://github.com/sindresorhus/atom-editorconfig/issues/118
*/
import fs from 'fs';
import path from 'path';
const testPrefix = path.basename(__filename).split('-').shift();
const projectRoot = path.join(__dirname, 'fixtures');
const filePath = path.join(projectRoot, `test.${testPrefix}`);
describe('editorconfig', () => {
let textEditor; | const textWithManyTrailingWhitespaces = 'I \t \nam \t \nProvidence.';
beforeEach(() => {
waitsForPromise(() =>
Promise.all([
atom.packages.activatePackage('editorconfig'),
atom.workspace.open(filePath)
]).then(results => {
textEditor = results[1];
})
);
});
afterEach(() => {
// remove the created fixture, if it exists
runs(() => {
fs.stat(filePath, (err, stats) => {
if (!err && stats.isFile()) {
fs.unlink(filePath);
}
});
});
waitsFor(() => {
try {
return fs.statSync(filePath).isFile() === false;
} catch (err) {
return true;
}
}, 5000, `removed ${filePath}`);
});
describe('Atom being set to remove trailing whitespaces', () => {
beforeEach(() => {
// eslint-disable-next-line camelcase
textEditor.getBuffer().editorconfig.settings.trim_trailing_whitespace = true;
// eslint-disable-next-line camelcase
textEditor.getBuffer().editorconfig.settings.insert_final_newline = false;
});
it('should strip trailing whitespaces on save.', () => {
textEditor.setText(textWithManyTrailingWhitespaces);
textEditor.save();
expect(textEditor.getText().length).toEqual(textWithoutTrailingWhitespaces.length);
});
});
}); | const textWithoutTrailingWhitespaces = 'I\nam\nProvidence.'; | random_line_split |
iss118-spec.js | /** @babel */
/* eslint-env jasmine, atomtest */
/*
This file contains verifying specs for:
https://github.com/sindresorhus/atom-editorconfig/issues/118
*/
import fs from 'fs';
import path from 'path';
const testPrefix = path.basename(__filename).split('-').shift();
const projectRoot = path.join(__dirname, 'fixtures');
const filePath = path.join(projectRoot, `test.${testPrefix}`);
describe('editorconfig', () => {
let textEditor;
const textWithoutTrailingWhitespaces = 'I\nam\nProvidence.';
const textWithManyTrailingWhitespaces = 'I \t \nam \t \nProvidence.';
beforeEach(() => {
waitsForPromise(() =>
Promise.all([
atom.packages.activatePackage('editorconfig'),
atom.workspace.open(filePath)
]).then(results => {
textEditor = results[1];
})
);
});
afterEach(() => {
// remove the created fixture, if it exists
runs(() => {
fs.stat(filePath, (err, stats) => {
if (!err && stats.isFile()) |
});
});
waitsFor(() => {
try {
return fs.statSync(filePath).isFile() === false;
} catch (err) {
return true;
}
}, 5000, `removed ${filePath}`);
});
describe('Atom being set to remove trailing whitespaces', () => {
beforeEach(() => {
// eslint-disable-next-line camelcase
textEditor.getBuffer().editorconfig.settings.trim_trailing_whitespace = true;
// eslint-disable-next-line camelcase
textEditor.getBuffer().editorconfig.settings.insert_final_newline = false;
});
it('should strip trailing whitespaces on save.', () => {
textEditor.setText(textWithManyTrailingWhitespaces);
textEditor.save();
expect(textEditor.getText().length).toEqual(textWithoutTrailingWhitespaces.length);
});
});
});
| {
fs.unlink(filePath);
} | conditional_block |
htmldivelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDivElementBinding::{self, HTMLDivElementMethods};
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLDivElement {
htmlelement: HTMLElement
}
impl HTMLDivElement {
fn new_inherited(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> HTMLDivElement {
HTMLDivElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDivElement> {
Node::reflect_node(box HTMLDivElement::new_inherited(local_name, prefix, document),
document,
HTMLDivElementBinding::Wrap)
}
}
impl HTMLDivElementMethods for HTMLDivElement {
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_getter!(Align, "align");
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_setter!(SetAlign, "align"); | } | random_line_split | |
htmldivelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDivElementBinding::{self, HTMLDivElementMethods};
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLDivElement {
htmlelement: HTMLElement
}
impl HTMLDivElement {
fn new_inherited(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> HTMLDivElement {
HTMLDivElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn | (local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDivElement> {
Node::reflect_node(box HTMLDivElement::new_inherited(local_name, prefix, document),
document,
HTMLDivElementBinding::Wrap)
}
}
impl HTMLDivElementMethods for HTMLDivElement {
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_getter!(Align, "align");
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_setter!(SetAlign, "align");
}
| new | identifier_name |
config.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage configuration.
use crate::server::ttl::TTLCheckerTask;
use crate::server::CONFIG_ROCKSDB_GAUGE;
use configuration::{ConfigChange, ConfigManager, ConfigValue, Configuration, Result as CfgResult};
use engine_rocks::raw::{Cache, LRUCacheOptions, MemoryAllocator};
use engine_rocks::RocksEngine;
use engine_traits::{CFOptionsExt, ColumnFamilyOptions, CF_DEFAULT};
use libc::c_int;
use std::error::Error;
use tikv_util::config::{self, OptionReadableSize, ReadableDuration, ReadableSize};
use tikv_util::sys::sys_quota::SysQuota;
use tikv_util::worker::Scheduler;
pub const DEFAULT_DATA_DIR: &str = "./";
const DEFAULT_GC_RATIO_THRESHOLD: f64 = 1.1;
const DEFAULT_MAX_KEY_SIZE: usize = 4 * 1024;
const DEFAULT_SCHED_CONCURRENCY: usize = 1024 * 512;
const MAX_SCHED_CONCURRENCY: usize = 2 * 1024 * 1024;
// According to "Little's law", assuming you can write 100MB per
// second, and it takes about 100ms to process the write requests
// on average, in that situation the writing bytes estimated 10MB,
// here we use 100MB as default value for tolerate 1s latency.
const DEFAULT_SCHED_PENDING_WRITE_MB: u64 = 100;
const DEFAULT_RESERVED_SPACE_GB: u64 = 5;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
#[config(skip)]
pub data_dir: String,
// Replaced by `GcConfig.ratio_threshold`. Keep it for backward compatibility.
#[config(skip)]
pub gc_ratio_threshold: f64,
#[config(skip)]
pub max_key_size: usize,
#[config(skip)]
pub scheduler_concurrency: usize,
#[config(skip)]
pub scheduler_worker_pool_size: usize,
#[config(skip)]
pub scheduler_pending_write_threshold: ReadableSize,
#[config(skip)]
// Reserve disk space to make tikv would have enough space to compact when disk is full.
pub reserve_space: ReadableSize,
#[config(skip)]
pub enable_async_apply_prewrite: bool,
#[config(skip)]
pub enable_ttl: bool,
/// Interval to check TTL for all SSTs,
pub ttl_check_poll_interval: ReadableDuration,
#[config(submodule)]
pub block_cache: BlockCacheConfig,
}
impl Default for Config {
fn default() -> Config {
let cpu_num = SysQuota::new().cpu_cores_quota();
Config {
data_dir: DEFAULT_DATA_DIR.to_owned(),
gc_ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
max_key_size: DEFAULT_MAX_KEY_SIZE,
scheduler_concurrency: DEFAULT_SCHED_CONCURRENCY,
scheduler_worker_pool_size: if cpu_num >= 16.0 { 8 } else { 4 },
scheduler_pending_write_threshold: ReadableSize::mb(DEFAULT_SCHED_PENDING_WRITE_MB),
reserve_space: ReadableSize::gb(DEFAULT_RESERVED_SPACE_GB),
enable_async_apply_prewrite: false,
enable_ttl: false,
ttl_check_poll_interval: ReadableDuration::hours(12),
block_cache: BlockCacheConfig::default(),
}
}
}
impl Config {
pub fn validate(&mut self) -> Result<(), Box<dyn Error>> {
if self.data_dir != DEFAULT_DATA_DIR {
self.data_dir = config::canonicalize_path(&self.data_dir)?
}
if self.scheduler_concurrency > MAX_SCHED_CONCURRENCY {
warn!(
"TiKV has optimized latch since v4.0, so it is not necessary to set large schedule \
concurrency. To save memory, change it from {:?} to {:?}",
self.scheduler_concurrency, MAX_SCHED_CONCURRENCY
);
self.scheduler_concurrency = MAX_SCHED_CONCURRENCY;
}
Ok(())
}
}
pub struct StorageConfigManger {
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
}
impl StorageConfigManger {
pub fn new(
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
) -> StorageConfigManger {
StorageConfigManger {
kvdb,
shared_block_cache,
ttl_checker_scheduler,
}
}
}
impl ConfigManager for StorageConfigManger {
fn dispatch(&mut self, mut change: ConfigChange) -> CfgResult<()> {
if let Some(ConfigValue::Module(mut block_cache)) = change.remove("block_cache") {
if !self.shared_block_cache {
return Err("shared block cache is disabled".into());
}
if let Some(size) = block_cache.remove("capacity") {
let s: OptionReadableSize = size.into();
if let Some(size) = s.0 {
// Hack: since all CFs in both kvdb and raftdb share a block cache, we can change
// the size through any of them. Here we change it through default CF in kvdb.
// A better way to do it is to hold the cache reference somewhere, and use it to
// change cache size.
let opt = self.kvdb.get_options_cf(CF_DEFAULT).unwrap(); // FIXME unwrap
opt.set_block_cache_capacity(size.0)?;
// Write config to metric
CONFIG_ROCKSDB_GAUGE
.with_label_values(&[CF_DEFAULT, "block_cache_size"])
.set(size.0 as f64);
}
}
} else if let Some(v) = change.remove("ttl_check_poll_interval") {
let interval: ReadableDuration = v.into();
self.ttl_checker_scheduler
.schedule(TTLCheckerTask::UpdatePollInterval(interval.into()))
.unwrap();
}
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct BlockCacheConfig {
#[config(skip)]
pub shared: bool,
pub capacity: OptionReadableSize,
#[config(skip)]
pub num_shard_bits: i32,
#[config(skip)]
pub strict_capacity_limit: bool,
#[config(skip)]
pub high_pri_pool_ratio: f64,
#[config(skip)]
pub memory_allocator: Option<String>,
}
impl Default for BlockCacheConfig {
fn default() -> BlockCacheConfig {
BlockCacheConfig {
shared: true,
capacity: OptionReadableSize(None),
num_shard_bits: 6,
strict_capacity_limit: false,
high_pri_pool_ratio: 0.8,
memory_allocator: Some(String::from("nodump")),
}
}
}
impl BlockCacheConfig {
pub fn build_shared_cache(&self) -> Option<Cache> {
if !self.shared {
return None;
}
let capacity = match self.capacity.0 {
None => {
let total_mem = SysQuota::new().memory_limit_in_bytes();
((total_mem as f64) * 0.45) as usize
}
Some(c) => c.0 as usize,
};
let mut cache_opts = LRUCacheOptions::new();
cache_opts.set_capacity(capacity);
cache_opts.set_num_shard_bits(self.num_shard_bits as c_int);
cache_opts.set_strict_capacity_limit(self.strict_capacity_limit);
cache_opts.set_high_pri_pool_ratio(self.high_pri_pool_ratio);
if let Some(allocator) = self.new_memory_allocator() {
cache_opts.set_memory_allocator(allocator);
}
Some(Cache::new_lru_cache(cache_opts))
}
fn new_memory_allocator(&self) -> Option<MemoryAllocator> {
if let Some(ref alloc) = self.memory_allocator {
match alloc.as_str() {
#[cfg(feature = "jemalloc")]
"nodump" => match MemoryAllocator::new_jemalloc_memory_allocator() {
Ok(allocator) => {
return Some(allocator);
}
Err(e) => |
},
"" => {}
other => {
warn!(
"Memory allocator {} is not supported, continue with default allocator",
other
);
}
}
};
None
}
}
| {
warn!(
"Create jemalloc nodump allocator for block cache failed: {}, continue with default allocator",
e
);
} | conditional_block |
config.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage configuration.
use crate::server::ttl::TTLCheckerTask;
use crate::server::CONFIG_ROCKSDB_GAUGE;
use configuration::{ConfigChange, ConfigManager, ConfigValue, Configuration, Result as CfgResult};
use engine_rocks::raw::{Cache, LRUCacheOptions, MemoryAllocator};
use engine_rocks::RocksEngine;
use engine_traits::{CFOptionsExt, ColumnFamilyOptions, CF_DEFAULT};
use libc::c_int;
use std::error::Error;
use tikv_util::config::{self, OptionReadableSize, ReadableDuration, ReadableSize};
use tikv_util::sys::sys_quota::SysQuota;
use tikv_util::worker::Scheduler;
pub const DEFAULT_DATA_DIR: &str = "./";
const DEFAULT_GC_RATIO_THRESHOLD: f64 = 1.1;
const DEFAULT_MAX_KEY_SIZE: usize = 4 * 1024;
const DEFAULT_SCHED_CONCURRENCY: usize = 1024 * 512;
const MAX_SCHED_CONCURRENCY: usize = 2 * 1024 * 1024;
// According to "Little's law", assuming you can write 100MB per
// second, and it takes about 100ms to process the write requests
// on average, in that situation the writing bytes estimated 10MB,
// here we use 100MB as default value for tolerate 1s latency.
const DEFAULT_SCHED_PENDING_WRITE_MB: u64 = 100;
const DEFAULT_RESERVED_SPACE_GB: u64 = 5;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
#[config(skip)]
pub data_dir: String,
// Replaced by `GcConfig.ratio_threshold`. Keep it for backward compatibility.
#[config(skip)]
pub gc_ratio_threshold: f64,
#[config(skip)]
pub max_key_size: usize,
#[config(skip)]
pub scheduler_concurrency: usize,
#[config(skip)]
pub scheduler_worker_pool_size: usize,
#[config(skip)]
pub scheduler_pending_write_threshold: ReadableSize,
#[config(skip)]
// Reserve disk space to make tikv would have enough space to compact when disk is full.
pub reserve_space: ReadableSize,
#[config(skip)]
pub enable_async_apply_prewrite: bool,
#[config(skip)]
pub enable_ttl: bool,
/// Interval to check TTL for all SSTs,
pub ttl_check_poll_interval: ReadableDuration,
#[config(submodule)]
pub block_cache: BlockCacheConfig,
}
impl Default for Config {
fn default() -> Config {
let cpu_num = SysQuota::new().cpu_cores_quota();
Config {
data_dir: DEFAULT_DATA_DIR.to_owned(),
gc_ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
max_key_size: DEFAULT_MAX_KEY_SIZE,
scheduler_concurrency: DEFAULT_SCHED_CONCURRENCY,
scheduler_worker_pool_size: if cpu_num >= 16.0 { 8 } else { 4 },
scheduler_pending_write_threshold: ReadableSize::mb(DEFAULT_SCHED_PENDING_WRITE_MB),
reserve_space: ReadableSize::gb(DEFAULT_RESERVED_SPACE_GB),
enable_async_apply_prewrite: false,
enable_ttl: false,
ttl_check_poll_interval: ReadableDuration::hours(12),
block_cache: BlockCacheConfig::default(),
}
}
}
impl Config {
pub fn validate(&mut self) -> Result<(), Box<dyn Error>> { | "TiKV has optimized latch since v4.0, so it is not necessary to set large schedule \
concurrency. To save memory, change it from {:?} to {:?}",
self.scheduler_concurrency, MAX_SCHED_CONCURRENCY
);
self.scheduler_concurrency = MAX_SCHED_CONCURRENCY;
}
Ok(())
}
}
pub struct StorageConfigManger {
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
}
impl StorageConfigManger {
pub fn new(
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
) -> StorageConfigManger {
StorageConfigManger {
kvdb,
shared_block_cache,
ttl_checker_scheduler,
}
}
}
impl ConfigManager for StorageConfigManger {
fn dispatch(&mut self, mut change: ConfigChange) -> CfgResult<()> {
if let Some(ConfigValue::Module(mut block_cache)) = change.remove("block_cache") {
if !self.shared_block_cache {
return Err("shared block cache is disabled".into());
}
if let Some(size) = block_cache.remove("capacity") {
let s: OptionReadableSize = size.into();
if let Some(size) = s.0 {
// Hack: since all CFs in both kvdb and raftdb share a block cache, we can change
// the size through any of them. Here we change it through default CF in kvdb.
// A better way to do it is to hold the cache reference somewhere, and use it to
// change cache size.
let opt = self.kvdb.get_options_cf(CF_DEFAULT).unwrap(); // FIXME unwrap
opt.set_block_cache_capacity(size.0)?;
// Write config to metric
CONFIG_ROCKSDB_GAUGE
.with_label_values(&[CF_DEFAULT, "block_cache_size"])
.set(size.0 as f64);
}
}
} else if let Some(v) = change.remove("ttl_check_poll_interval") {
let interval: ReadableDuration = v.into();
self.ttl_checker_scheduler
.schedule(TTLCheckerTask::UpdatePollInterval(interval.into()))
.unwrap();
}
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct BlockCacheConfig {
#[config(skip)]
pub shared: bool,
pub capacity: OptionReadableSize,
#[config(skip)]
pub num_shard_bits: i32,
#[config(skip)]
pub strict_capacity_limit: bool,
#[config(skip)]
pub high_pri_pool_ratio: f64,
#[config(skip)]
pub memory_allocator: Option<String>,
}
impl Default for BlockCacheConfig {
fn default() -> BlockCacheConfig {
BlockCacheConfig {
shared: true,
capacity: OptionReadableSize(None),
num_shard_bits: 6,
strict_capacity_limit: false,
high_pri_pool_ratio: 0.8,
memory_allocator: Some(String::from("nodump")),
}
}
}
impl BlockCacheConfig {
pub fn build_shared_cache(&self) -> Option<Cache> {
if !self.shared {
return None;
}
let capacity = match self.capacity.0 {
None => {
let total_mem = SysQuota::new().memory_limit_in_bytes();
((total_mem as f64) * 0.45) as usize
}
Some(c) => c.0 as usize,
};
let mut cache_opts = LRUCacheOptions::new();
cache_opts.set_capacity(capacity);
cache_opts.set_num_shard_bits(self.num_shard_bits as c_int);
cache_opts.set_strict_capacity_limit(self.strict_capacity_limit);
cache_opts.set_high_pri_pool_ratio(self.high_pri_pool_ratio);
if let Some(allocator) = self.new_memory_allocator() {
cache_opts.set_memory_allocator(allocator);
}
Some(Cache::new_lru_cache(cache_opts))
}
fn new_memory_allocator(&self) -> Option<MemoryAllocator> {
if let Some(ref alloc) = self.memory_allocator {
match alloc.as_str() {
#[cfg(feature = "jemalloc")]
"nodump" => match MemoryAllocator::new_jemalloc_memory_allocator() {
Ok(allocator) => {
return Some(allocator);
}
Err(e) => {
warn!(
"Create jemalloc nodump allocator for block cache failed: {}, continue with default allocator",
e
);
}
},
"" => {}
other => {
warn!(
"Memory allocator {} is not supported, continue with default allocator",
other
);
}
}
};
None
}
} | if self.data_dir != DEFAULT_DATA_DIR {
self.data_dir = config::canonicalize_path(&self.data_dir)?
}
if self.scheduler_concurrency > MAX_SCHED_CONCURRENCY {
warn!( | random_line_split |
config.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage configuration.
use crate::server::ttl::TTLCheckerTask;
use crate::server::CONFIG_ROCKSDB_GAUGE;
use configuration::{ConfigChange, ConfigManager, ConfigValue, Configuration, Result as CfgResult};
use engine_rocks::raw::{Cache, LRUCacheOptions, MemoryAllocator};
use engine_rocks::RocksEngine;
use engine_traits::{CFOptionsExt, ColumnFamilyOptions, CF_DEFAULT};
use libc::c_int;
use std::error::Error;
use tikv_util::config::{self, OptionReadableSize, ReadableDuration, ReadableSize};
use tikv_util::sys::sys_quota::SysQuota;
use tikv_util::worker::Scheduler;
pub const DEFAULT_DATA_DIR: &str = "./";
const DEFAULT_GC_RATIO_THRESHOLD: f64 = 1.1;
const DEFAULT_MAX_KEY_SIZE: usize = 4 * 1024;
const DEFAULT_SCHED_CONCURRENCY: usize = 1024 * 512;
const MAX_SCHED_CONCURRENCY: usize = 2 * 1024 * 1024;
// According to "Little's law", assuming you can write 100MB per
// second, and it takes about 100ms to process the write requests
// on average, in that situation the writing bytes estimated 10MB,
// here we use 100MB as default value for tolerate 1s latency.
const DEFAULT_SCHED_PENDING_WRITE_MB: u64 = 100;
const DEFAULT_RESERVED_SPACE_GB: u64 = 5;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
#[config(skip)]
pub data_dir: String,
// Replaced by `GcConfig.ratio_threshold`. Keep it for backward compatibility.
#[config(skip)]
pub gc_ratio_threshold: f64,
#[config(skip)]
pub max_key_size: usize,
#[config(skip)]
pub scheduler_concurrency: usize,
#[config(skip)]
pub scheduler_worker_pool_size: usize,
#[config(skip)]
pub scheduler_pending_write_threshold: ReadableSize,
#[config(skip)]
// Reserve disk space to make tikv would have enough space to compact when disk is full.
pub reserve_space: ReadableSize,
#[config(skip)]
pub enable_async_apply_prewrite: bool,
#[config(skip)]
pub enable_ttl: bool,
/// Interval to check TTL for all SSTs,
pub ttl_check_poll_interval: ReadableDuration,
#[config(submodule)]
pub block_cache: BlockCacheConfig,
}
impl Default for Config {
fn | () -> Config {
let cpu_num = SysQuota::new().cpu_cores_quota();
Config {
data_dir: DEFAULT_DATA_DIR.to_owned(),
gc_ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
max_key_size: DEFAULT_MAX_KEY_SIZE,
scheduler_concurrency: DEFAULT_SCHED_CONCURRENCY,
scheduler_worker_pool_size: if cpu_num >= 16.0 { 8 } else { 4 },
scheduler_pending_write_threshold: ReadableSize::mb(DEFAULT_SCHED_PENDING_WRITE_MB),
reserve_space: ReadableSize::gb(DEFAULT_RESERVED_SPACE_GB),
enable_async_apply_prewrite: false,
enable_ttl: false,
ttl_check_poll_interval: ReadableDuration::hours(12),
block_cache: BlockCacheConfig::default(),
}
}
}
impl Config {
pub fn validate(&mut self) -> Result<(), Box<dyn Error>> {
if self.data_dir != DEFAULT_DATA_DIR {
self.data_dir = config::canonicalize_path(&self.data_dir)?
}
if self.scheduler_concurrency > MAX_SCHED_CONCURRENCY {
warn!(
"TiKV has optimized latch since v4.0, so it is not necessary to set large schedule \
concurrency. To save memory, change it from {:?} to {:?}",
self.scheduler_concurrency, MAX_SCHED_CONCURRENCY
);
self.scheduler_concurrency = MAX_SCHED_CONCURRENCY;
}
Ok(())
}
}
pub struct StorageConfigManger {
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
}
impl StorageConfigManger {
pub fn new(
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
) -> StorageConfigManger {
StorageConfigManger {
kvdb,
shared_block_cache,
ttl_checker_scheduler,
}
}
}
impl ConfigManager for StorageConfigManger {
fn dispatch(&mut self, mut change: ConfigChange) -> CfgResult<()> {
if let Some(ConfigValue::Module(mut block_cache)) = change.remove("block_cache") {
if !self.shared_block_cache {
return Err("shared block cache is disabled".into());
}
if let Some(size) = block_cache.remove("capacity") {
let s: OptionReadableSize = size.into();
if let Some(size) = s.0 {
// Hack: since all CFs in both kvdb and raftdb share a block cache, we can change
// the size through any of them. Here we change it through default CF in kvdb.
// A better way to do it is to hold the cache reference somewhere, and use it to
// change cache size.
let opt = self.kvdb.get_options_cf(CF_DEFAULT).unwrap(); // FIXME unwrap
opt.set_block_cache_capacity(size.0)?;
// Write config to metric
CONFIG_ROCKSDB_GAUGE
.with_label_values(&[CF_DEFAULT, "block_cache_size"])
.set(size.0 as f64);
}
}
} else if let Some(v) = change.remove("ttl_check_poll_interval") {
let interval: ReadableDuration = v.into();
self.ttl_checker_scheduler
.schedule(TTLCheckerTask::UpdatePollInterval(interval.into()))
.unwrap();
}
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct BlockCacheConfig {
#[config(skip)]
pub shared: bool,
pub capacity: OptionReadableSize,
#[config(skip)]
pub num_shard_bits: i32,
#[config(skip)]
pub strict_capacity_limit: bool,
#[config(skip)]
pub high_pri_pool_ratio: f64,
#[config(skip)]
pub memory_allocator: Option<String>,
}
impl Default for BlockCacheConfig {
fn default() -> BlockCacheConfig {
BlockCacheConfig {
shared: true,
capacity: OptionReadableSize(None),
num_shard_bits: 6,
strict_capacity_limit: false,
high_pri_pool_ratio: 0.8,
memory_allocator: Some(String::from("nodump")),
}
}
}
impl BlockCacheConfig {
pub fn build_shared_cache(&self) -> Option<Cache> {
if !self.shared {
return None;
}
let capacity = match self.capacity.0 {
None => {
let total_mem = SysQuota::new().memory_limit_in_bytes();
((total_mem as f64) * 0.45) as usize
}
Some(c) => c.0 as usize,
};
let mut cache_opts = LRUCacheOptions::new();
cache_opts.set_capacity(capacity);
cache_opts.set_num_shard_bits(self.num_shard_bits as c_int);
cache_opts.set_strict_capacity_limit(self.strict_capacity_limit);
cache_opts.set_high_pri_pool_ratio(self.high_pri_pool_ratio);
if let Some(allocator) = self.new_memory_allocator() {
cache_opts.set_memory_allocator(allocator);
}
Some(Cache::new_lru_cache(cache_opts))
}
fn new_memory_allocator(&self) -> Option<MemoryAllocator> {
if let Some(ref alloc) = self.memory_allocator {
match alloc.as_str() {
#[cfg(feature = "jemalloc")]
"nodump" => match MemoryAllocator::new_jemalloc_memory_allocator() {
Ok(allocator) => {
return Some(allocator);
}
Err(e) => {
warn!(
"Create jemalloc nodump allocator for block cache failed: {}, continue with default allocator",
e
);
}
},
"" => {}
other => {
warn!(
"Memory allocator {} is not supported, continue with default allocator",
other
);
}
}
};
None
}
}
| default | identifier_name |
shared.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Parser as ExpressionParser} from '../expression_parser/parser';
import {StringWrapper, isBlank, isPresent} from '../facade/lang';
import {HtmlAst, HtmlAstVisitor, HtmlAttrAst, HtmlCommentAst, HtmlElementAst, HtmlExpansionAst, HtmlExpansionCaseAst, HtmlTextAst, htmlVisitAll} from '../html_ast';
import {InterpolationConfig} from '../interpolation_config';
import {ParseError, ParseSourceSpan} from '../parse_util';
import {Message} from './message';
export const I18N_ATTR = 'i18n';
export const I18N_ATTR_PREFIX = 'i18n-';
const _CUSTOM_PH_EXP = /\/\/[\s\S]*i18n[\s\S]*\([\s\S]*ph[\s\S]*=[\s\S]*"([\s\S]*?)"[\s\S]*\)/g;
/**
* An i18n error.
*/
export class I18nError extends ParseError {
constructor(span: ParseSourceSpan, msg: string) { super(span, msg); }
}
export function partition(nodes: HtmlAst[], errors: ParseError[], implicitTags: string[]): Part[] {
let parts: Part[] = [];
for (let i = 0; i < nodes.length; ++i) {
let node = nodes[i];
let msgNodes: HtmlAst[] = [];
// Nodes between `<!-- i18n -->` and `<!-- /i18n -->`
if (_isOpeningComment(node)) {
let i18n = (<HtmlCommentAst>node).value.replace(/^i18n:?/, '').trim();
while (++i < nodes.length && !_isClosingComment(nodes[i])) {
msgNodes.push(nodes[i]);
}
if (i === nodes.length) {
errors.push(new I18nError(node.sourceSpan, 'Missing closing \'i18n\' comment.'));
break;
}
parts.push(new Part(null, null, msgNodes, i18n, true));
} else if (node instanceof HtmlElementAst) {
// Node with an `i18n` attribute
let i18n = _findI18nAttr(node);
let hasI18n: boolean = isPresent(i18n) || implicitTags.indexOf(node.name) > -1;
parts.push(new Part(node, null, node.children, isPresent(i18n) ? i18n.value : null, hasI18n));
} else if (node instanceof HtmlTextAst) {
parts.push(new Part(null, node, null, null, false));
}
}
return parts;
}
export class Part {
constructor(
public rootElement: HtmlElementAst, public rootTextNode: HtmlTextAst,
public children: HtmlAst[], public i18n: string, public hasI18n: boolean) {}
get sourceSpan(): ParseSourceSpan {
if (isPresent(this.rootElement)) {
return this.rootElement.sourceSpan;
}
if (isPresent(this.rootTextNode)) {
return this.rootTextNode.sourceSpan;
}
return new ParseSourceSpan(
this.children[0].sourceSpan.start, this.children[this.children.length - 1].sourceSpan.end);
}
createMessage(parser: ExpressionParser, interpolationConfig: InterpolationConfig): Message {
return new Message(
stringifyNodes(this.children, parser, interpolationConfig), meaning(this.i18n),
description(this.i18n));
}
}
function _isOpeningComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value.startsWith('i18n');
}
function _isClosingComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value === '/i18n';
}
function _findI18nAttr(p: HtmlElementAst): HtmlAttrAst {
let attrs = p.attrs;
for (let i = 0; i < attrs.length; i++) {
if (attrs[i].name === I18N_ATTR) {
return attrs[i];
}
}
return null;
}
export function meaning(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
return i18n.split('|')[0];
}
export function description(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
let parts = i18n.split('|', 2);
return parts.length > 1 ? parts[1] : null;
}
/**
* Extract a translation string given an `i18n-` prefixed attribute.
*
* @internal
*/
export function messageFromI18nAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, p: HtmlElementAst,
i18nAttr: HtmlAttrAst): Message {
const expectedName = i18nAttr.name.substring(5);
const attr = p.attrs.find(a => a.name == expectedName);
if (attr) {
return messageFromAttribute(
parser, interpolationConfig, attr, meaning(i18nAttr.value), description(i18nAttr.value));
}
throw new I18nError(p.sourceSpan, `Missing attribute '${expectedName}'.`);
}
export function messageFromAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, attr: HtmlAttrAst,
meaning: string = null, description: string = null): Message {
const value = removeInterpolation(attr.value, attr.sourceSpan, parser, interpolationConfig);
return new Message(value, meaning, description);
}
/**
* Replace interpolation in the `value` string with placeholders
*/
export function removeInterpolation(
value: string, source: ParseSourceSpan, expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
try {
const parsed =
expressionParser.splitInterpolation(value, source.toString(), interpolationConfig);
const usedNames = new Map<string, number>();
if (isPresent(parsed)) {
let res = '';
for (let i = 0; i < parsed.strings.length; ++i) {
res += parsed.strings[i];
if (i != parsed.strings.length - 1) {
let customPhName = extractPhNameFromInterpolation(parsed.expressions[i], i);
customPhName = dedupePhName(usedNames, customPhName);
res += `<ph name="${customPhName}"/>`;
}
}
return res;
}
return value;
} catch (e) {
return value;
}
}
/**
* Extract the placeholder name from the interpolation.
*
* Use a custom name when specified (ie: `{{<expression> //i18n(ph="FIRST")}}`) otherwise generate a
* unique name.
*/
export function extractPhNameFromInterpolation(input: string, index: number): string {
let customPhMatch = StringWrapper.split(input, _CUSTOM_PH_EXP);
return customPhMatch.length > 1 ? customPhMatch[1] : `INTERPOLATION_${index}`;
}
/**
* Return a unique placeholder name based on the given name
*/
export function dedupePhName(usedNames: Map<string, number>, name: string): string {
const duplicateNameCount = usedNames.get(name);
if (duplicateNameCount) {
usedNames.set(name, duplicateNameCount + 1);
return `${name}_${duplicateNameCount}`;
}
usedNames.set(name, 1);
return name;
}
/**
* Convert a list of nodes to a string message.
*
*/
export function stringifyNodes(
nodes: HtmlAst[], expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
const visitor = new _StringifyVisitor(expressionParser, interpolationConfig);
return htmlVisitAll(visitor, nodes).join('');
}
class _StringifyVisitor implements HtmlAstVisitor {
private _index: number = 0;
constructor(
private _parser: ExpressionParser, private _interpolationConfig: InterpolationConfig) {}
visitElement(ast: HtmlElementAst, context: any): any {
let name = this._index++;
let children = this._join(htmlVisitAll(this, ast.children), '');
return `<ph name="e${name}">${children}</ph>`;
}
visitAttr(ast: HtmlAttrAst, context: any): any { return null; }
visitText(ast: HtmlTextAst, context: any): any {
let index = this._index++;
let noInterpolation =
removeInterpolation(ast.value, ast.sourceSpan, this._parser, this._interpolationConfig);
if (noInterpolation != ast.value) {
return `<ph name="t${index}">${noInterpolation}</ph>`;
}
return ast.value;
}
visitComment(ast: HtmlCommentAst, context: any): any { return ''; }
visitExpansion(ast: HtmlExpansionAst, context: any): any { return null; }
| (ast: HtmlExpansionCaseAst, context: any): any { return null; }
private _join(strs: string[], str: string): string {
return strs.filter(s => s.length > 0).join(str);
}
}
| visitExpansionCase | identifier_name |
shared.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Parser as ExpressionParser} from '../expression_parser/parser';
import {StringWrapper, isBlank, isPresent} from '../facade/lang';
import {HtmlAst, HtmlAstVisitor, HtmlAttrAst, HtmlCommentAst, HtmlElementAst, HtmlExpansionAst, HtmlExpansionCaseAst, HtmlTextAst, htmlVisitAll} from '../html_ast';
import {InterpolationConfig} from '../interpolation_config';
import {ParseError, ParseSourceSpan} from '../parse_util';
import {Message} from './message';
export const I18N_ATTR = 'i18n';
export const I18N_ATTR_PREFIX = 'i18n-';
const _CUSTOM_PH_EXP = /\/\/[\s\S]*i18n[\s\S]*\([\s\S]*ph[\s\S]*=[\s\S]*"([\s\S]*?)"[\s\S]*\)/g;
/**
* An i18n error.
*/
export class I18nError extends ParseError {
constructor(span: ParseSourceSpan, msg: string) { super(span, msg); }
}
export function partition(nodes: HtmlAst[], errors: ParseError[], implicitTags: string[]): Part[] {
let parts: Part[] = [];
for (let i = 0; i < nodes.length; ++i) {
let node = nodes[i];
let msgNodes: HtmlAst[] = [];
// Nodes between `<!-- i18n -->` and `<!-- /i18n -->`
if (_isOpeningComment(node)) {
let i18n = (<HtmlCommentAst>node).value.replace(/^i18n:?/, '').trim();
while (++i < nodes.length && !_isClosingComment(nodes[i])) {
msgNodes.push(nodes[i]);
}
if (i === nodes.length) {
errors.push(new I18nError(node.sourceSpan, 'Missing closing \'i18n\' comment.'));
break;
}
parts.push(new Part(null, null, msgNodes, i18n, true));
} else if (node instanceof HtmlElementAst) {
// Node with an `i18n` attribute
let i18n = _findI18nAttr(node);
let hasI18n: boolean = isPresent(i18n) || implicitTags.indexOf(node.name) > -1;
parts.push(new Part(node, null, node.children, isPresent(i18n) ? i18n.value : null, hasI18n));
} else if (node instanceof HtmlTextAst) {
parts.push(new Part(null, node, null, null, false));
}
}
return parts;
}
export class Part {
constructor(
public rootElement: HtmlElementAst, public rootTextNode: HtmlTextAst,
public children: HtmlAst[], public i18n: string, public hasI18n: boolean) {}
get sourceSpan(): ParseSourceSpan {
if (isPresent(this.rootElement)) {
return this.rootElement.sourceSpan;
}
if (isPresent(this.rootTextNode)) {
return this.rootTextNode.sourceSpan;
}
return new ParseSourceSpan(
this.children[0].sourceSpan.start, this.children[this.children.length - 1].sourceSpan.end);
}
createMessage(parser: ExpressionParser, interpolationConfig: InterpolationConfig): Message {
return new Message(
stringifyNodes(this.children, parser, interpolationConfig), meaning(this.i18n),
description(this.i18n));
}
}
function _isOpeningComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value.startsWith('i18n');
}
function _isClosingComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value === '/i18n';
}
function _findI18nAttr(p: HtmlElementAst): HtmlAttrAst {
let attrs = p.attrs;
for (let i = 0; i < attrs.length; i++) {
if (attrs[i].name === I18N_ATTR) |
}
return null;
}
export function meaning(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
return i18n.split('|')[0];
}
export function description(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
let parts = i18n.split('|', 2);
return parts.length > 1 ? parts[1] : null;
}
/**
* Extract a translation string given an `i18n-` prefixed attribute.
*
* @internal
*/
export function messageFromI18nAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, p: HtmlElementAst,
i18nAttr: HtmlAttrAst): Message {
const expectedName = i18nAttr.name.substring(5);
const attr = p.attrs.find(a => a.name == expectedName);
if (attr) {
return messageFromAttribute(
parser, interpolationConfig, attr, meaning(i18nAttr.value), description(i18nAttr.value));
}
throw new I18nError(p.sourceSpan, `Missing attribute '${expectedName}'.`);
}
export function messageFromAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, attr: HtmlAttrAst,
meaning: string = null, description: string = null): Message {
const value = removeInterpolation(attr.value, attr.sourceSpan, parser, interpolationConfig);
return new Message(value, meaning, description);
}
/**
* Replace interpolation in the `value` string with placeholders
*/
export function removeInterpolation(
value: string, source: ParseSourceSpan, expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
try {
const parsed =
expressionParser.splitInterpolation(value, source.toString(), interpolationConfig);
const usedNames = new Map<string, number>();
if (isPresent(parsed)) {
let res = '';
for (let i = 0; i < parsed.strings.length; ++i) {
res += parsed.strings[i];
if (i != parsed.strings.length - 1) {
let customPhName = extractPhNameFromInterpolation(parsed.expressions[i], i);
customPhName = dedupePhName(usedNames, customPhName);
res += `<ph name="${customPhName}"/>`;
}
}
return res;
}
return value;
} catch (e) {
return value;
}
}
/**
* Extract the placeholder name from the interpolation.
*
* Use a custom name when specified (ie: `{{<expression> //i18n(ph="FIRST")}}`) otherwise generate a
* unique name.
*/
export function extractPhNameFromInterpolation(input: string, index: number): string {
let customPhMatch = StringWrapper.split(input, _CUSTOM_PH_EXP);
return customPhMatch.length > 1 ? customPhMatch[1] : `INTERPOLATION_${index}`;
}
/**
* Return a unique placeholder name based on the given name
*/
export function dedupePhName(usedNames: Map<string, number>, name: string): string {
const duplicateNameCount = usedNames.get(name);
if (duplicateNameCount) {
usedNames.set(name, duplicateNameCount + 1);
return `${name}_${duplicateNameCount}`;
}
usedNames.set(name, 1);
return name;
}
/**
* Convert a list of nodes to a string message.
*
*/
export function stringifyNodes(
nodes: HtmlAst[], expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
const visitor = new _StringifyVisitor(expressionParser, interpolationConfig);
return htmlVisitAll(visitor, nodes).join('');
}
class _StringifyVisitor implements HtmlAstVisitor {
private _index: number = 0;
constructor(
private _parser: ExpressionParser, private _interpolationConfig: InterpolationConfig) {}
visitElement(ast: HtmlElementAst, context: any): any {
let name = this._index++;
let children = this._join(htmlVisitAll(this, ast.children), '');
return `<ph name="e${name}">${children}</ph>`;
}
visitAttr(ast: HtmlAttrAst, context: any): any { return null; }
visitText(ast: HtmlTextAst, context: any): any {
let index = this._index++;
let noInterpolation =
removeInterpolation(ast.value, ast.sourceSpan, this._parser, this._interpolationConfig);
if (noInterpolation != ast.value) {
return `<ph name="t${index}">${noInterpolation}</ph>`;
}
return ast.value;
}
visitComment(ast: HtmlCommentAst, context: any): any { return ''; }
visitExpansion(ast: HtmlExpansionAst, context: any): any { return null; }
visitExpansionCase(ast: HtmlExpansionCaseAst, context: any): any { return null; }
private _join(strs: string[], str: string): string {
return strs.filter(s => s.length > 0).join(str);
}
}
| {
return attrs[i];
} | conditional_block |
shared.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Parser as ExpressionParser} from '../expression_parser/parser';
import {StringWrapper, isBlank, isPresent} from '../facade/lang';
import {HtmlAst, HtmlAstVisitor, HtmlAttrAst, HtmlCommentAst, HtmlElementAst, HtmlExpansionAst, HtmlExpansionCaseAst, HtmlTextAst, htmlVisitAll} from '../html_ast';
import {InterpolationConfig} from '../interpolation_config';
import {ParseError, ParseSourceSpan} from '../parse_util';
import {Message} from './message';
export const I18N_ATTR = 'i18n';
export const I18N_ATTR_PREFIX = 'i18n-';
const _CUSTOM_PH_EXP = /\/\/[\s\S]*i18n[\s\S]*\([\s\S]*ph[\s\S]*=[\s\S]*"([\s\S]*?)"[\s\S]*\)/g;
/**
* An i18n error.
*/
export class I18nError extends ParseError {
constructor(span: ParseSourceSpan, msg: string) { super(span, msg); }
}
export function partition(nodes: HtmlAst[], errors: ParseError[], implicitTags: string[]): Part[] {
let parts: Part[] = [];
for (let i = 0; i < nodes.length; ++i) {
let node = nodes[i];
let msgNodes: HtmlAst[] = [];
// Nodes between `<!-- i18n -->` and `<!-- /i18n -->`
if (_isOpeningComment(node)) {
let i18n = (<HtmlCommentAst>node).value.replace(/^i18n:?/, '').trim();
while (++i < nodes.length && !_isClosingComment(nodes[i])) {
msgNodes.push(nodes[i]);
}
if (i === nodes.length) {
errors.push(new I18nError(node.sourceSpan, 'Missing closing \'i18n\' comment.'));
break;
}
parts.push(new Part(null, null, msgNodes, i18n, true));
} else if (node instanceof HtmlElementAst) {
// Node with an `i18n` attribute
let i18n = _findI18nAttr(node);
let hasI18n: boolean = isPresent(i18n) || implicitTags.indexOf(node.name) > -1;
parts.push(new Part(node, null, node.children, isPresent(i18n) ? i18n.value : null, hasI18n));
} else if (node instanceof HtmlTextAst) {
parts.push(new Part(null, node, null, null, false));
}
}
return parts;
}
export class Part {
constructor(
public rootElement: HtmlElementAst, public rootTextNode: HtmlTextAst,
public children: HtmlAst[], public i18n: string, public hasI18n: boolean) {}
get sourceSpan(): ParseSourceSpan {
if (isPresent(this.rootElement)) {
return this.rootElement.sourceSpan;
}
if (isPresent(this.rootTextNode)) {
return this.rootTextNode.sourceSpan;
}
return new ParseSourceSpan(
this.children[0].sourceSpan.start, this.children[this.children.length - 1].sourceSpan.end);
}
createMessage(parser: ExpressionParser, interpolationConfig: InterpolationConfig): Message {
return new Message(
stringifyNodes(this.children, parser, interpolationConfig), meaning(this.i18n),
description(this.i18n));
}
}
function _isOpeningComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value.startsWith('i18n');
}
function _isClosingComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value === '/i18n';
}
function _findI18nAttr(p: HtmlElementAst): HtmlAttrAst {
let attrs = p.attrs;
for (let i = 0; i < attrs.length; i++) {
if (attrs[i].name === I18N_ATTR) {
return attrs[i];
}
}
return null;
}
export function meaning(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
return i18n.split('|')[0];
}
export function description(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
let parts = i18n.split('|', 2);
return parts.length > 1 ? parts[1] : null;
}
/**
* Extract a translation string given an `i18n-` prefixed attribute.
*
* @internal
*/
export function messageFromI18nAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, p: HtmlElementAst,
i18nAttr: HtmlAttrAst): Message {
const expectedName = i18nAttr.name.substring(5);
const attr = p.attrs.find(a => a.name == expectedName);
if (attr) {
return messageFromAttribute(
parser, interpolationConfig, attr, meaning(i18nAttr.value), description(i18nAttr.value));
}
throw new I18nError(p.sourceSpan, `Missing attribute '${expectedName}'.`);
}
export function messageFromAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, attr: HtmlAttrAst,
meaning: string = null, description: string = null): Message {
const value = removeInterpolation(attr.value, attr.sourceSpan, parser, interpolationConfig);
return new Message(value, meaning, description);
}
/**
* Replace interpolation in the `value` string with placeholders
*/
export function removeInterpolation(
value: string, source: ParseSourceSpan, expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
try {
const parsed =
expressionParser.splitInterpolation(value, source.toString(), interpolationConfig);
const usedNames = new Map<string, number>();
if (isPresent(parsed)) {
let res = '';
for (let i = 0; i < parsed.strings.length; ++i) {
res += parsed.strings[i];
if (i != parsed.strings.length - 1) {
let customPhName = extractPhNameFromInterpolation(parsed.expressions[i], i);
customPhName = dedupePhName(usedNames, customPhName);
res += `<ph name="${customPhName}"/>`;
}
}
return res;
}
return value;
} catch (e) {
return value;
}
}
/**
* Extract the placeholder name from the interpolation.
*
* Use a custom name when specified (ie: `{{<expression> //i18n(ph="FIRST")}}`) otherwise generate a
* unique name.
*/
export function extractPhNameFromInterpolation(input: string, index: number): string {
let customPhMatch = StringWrapper.split(input, _CUSTOM_PH_EXP);
return customPhMatch.length > 1 ? customPhMatch[1] : `INTERPOLATION_${index}`;
}
/**
* Return a unique placeholder name based on the given name
*/
export function dedupePhName(usedNames: Map<string, number>, name: string): string {
const duplicateNameCount = usedNames.get(name);
if (duplicateNameCount) {
usedNames.set(name, duplicateNameCount + 1);
return `${name}_${duplicateNameCount}`;
}
usedNames.set(name, 1);
return name;
}
/**
* Convert a list of nodes to a string message.
*
*/
export function stringifyNodes(
nodes: HtmlAst[], expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
const visitor = new _StringifyVisitor(expressionParser, interpolationConfig);
return htmlVisitAll(visitor, nodes).join('');
}
class _StringifyVisitor implements HtmlAstVisitor {
private _index: number = 0;
constructor(
private _parser: ExpressionParser, private _interpolationConfig: InterpolationConfig) {}
visitElement(ast: HtmlElementAst, context: any): any {
let name = this._index++;
let children = this._join(htmlVisitAll(this, ast.children), '');
return `<ph name="e${name}">${children}</ph>`;
}
visitAttr(ast: HtmlAttrAst, context: any): any { return null; }
visitText(ast: HtmlTextAst, context: any): any {
let index = this._index++;
let noInterpolation =
removeInterpolation(ast.value, ast.sourceSpan, this._parser, this._interpolationConfig);
if (noInterpolation != ast.value) {
return `<ph name="t${index}">${noInterpolation}</ph>`;
}
return ast.value;
}
visitComment(ast: HtmlCommentAst, context: any): any |
visitExpansion(ast: HtmlExpansionAst, context: any): any { return null; }
visitExpansionCase(ast: HtmlExpansionCaseAst, context: any): any { return null; }
private _join(strs: string[], str: string): string {
return strs.filter(s => s.length > 0).join(str);
}
}
| { return ''; } | identifier_body |
shared.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Parser as ExpressionParser} from '../expression_parser/parser';
import {StringWrapper, isBlank, isPresent} from '../facade/lang';
import {HtmlAst, HtmlAstVisitor, HtmlAttrAst, HtmlCommentAst, HtmlElementAst, HtmlExpansionAst, HtmlExpansionCaseAst, HtmlTextAst, htmlVisitAll} from '../html_ast';
import {InterpolationConfig} from '../interpolation_config';
import {ParseError, ParseSourceSpan} from '../parse_util';
import {Message} from './message';
export const I18N_ATTR = 'i18n';
export const I18N_ATTR_PREFIX = 'i18n-';
const _CUSTOM_PH_EXP = /\/\/[\s\S]*i18n[\s\S]*\([\s\S]*ph[\s\S]*=[\s\S]*"([\s\S]*?)"[\s\S]*\)/g;
/**
* An i18n error.
*/
export class I18nError extends ParseError {
constructor(span: ParseSourceSpan, msg: string) { super(span, msg); }
}
export function partition(nodes: HtmlAst[], errors: ParseError[], implicitTags: string[]): Part[] {
let parts: Part[] = [];
for (let i = 0; i < nodes.length; ++i) {
let node = nodes[i];
let msgNodes: HtmlAst[] = [];
// Nodes between `<!-- i18n -->` and `<!-- /i18n -->`
if (_isOpeningComment(node)) {
let i18n = (<HtmlCommentAst>node).value.replace(/^i18n:?/, '').trim();
while (++i < nodes.length && !_isClosingComment(nodes[i])) {
msgNodes.push(nodes[i]);
}
if (i === nodes.length) {
errors.push(new I18nError(node.sourceSpan, 'Missing closing \'i18n\' comment.'));
break;
}
parts.push(new Part(null, null, msgNodes, i18n, true));
} else if (node instanceof HtmlElementAst) {
// Node with an `i18n` attribute
let i18n = _findI18nAttr(node);
let hasI18n: boolean = isPresent(i18n) || implicitTags.indexOf(node.name) > -1;
parts.push(new Part(node, null, node.children, isPresent(i18n) ? i18n.value : null, hasI18n));
} else if (node instanceof HtmlTextAst) {
parts.push(new Part(null, node, null, null, false));
}
}
return parts;
}
export class Part {
constructor(
public rootElement: HtmlElementAst, public rootTextNode: HtmlTextAst,
public children: HtmlAst[], public i18n: string, public hasI18n: boolean) {}
get sourceSpan(): ParseSourceSpan {
if (isPresent(this.rootElement)) {
return this.rootElement.sourceSpan;
}
if (isPresent(this.rootTextNode)) {
return this.rootTextNode.sourceSpan;
}
return new ParseSourceSpan(
this.children[0].sourceSpan.start, this.children[this.children.length - 1].sourceSpan.end);
}
createMessage(parser: ExpressionParser, interpolationConfig: InterpolationConfig): Message {
return new Message(
stringifyNodes(this.children, parser, interpolationConfig), meaning(this.i18n),
description(this.i18n));
}
}
function _isOpeningComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value.startsWith('i18n');
}
function _isClosingComment(n: HtmlAst): boolean {
return n instanceof HtmlCommentAst && isPresent(n.value) && n.value === '/i18n';
}
function _findI18nAttr(p: HtmlElementAst): HtmlAttrAst {
let attrs = p.attrs;
for (let i = 0; i < attrs.length; i++) {
if (attrs[i].name === I18N_ATTR) {
return attrs[i];
}
}
return null;
}
export function meaning(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
return i18n.split('|')[0];
}
export function description(i18n: string): string {
if (isBlank(i18n) || i18n == '') return null;
let parts = i18n.split('|', 2);
return parts.length > 1 ? parts[1] : null;
}
/**
* Extract a translation string given an `i18n-` prefixed attribute.
*
* @internal
*/
export function messageFromI18nAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, p: HtmlElementAst,
i18nAttr: HtmlAttrAst): Message {
const expectedName = i18nAttr.name.substring(5);
const attr = p.attrs.find(a => a.name == expectedName);
if (attr) {
return messageFromAttribute(
parser, interpolationConfig, attr, meaning(i18nAttr.value), description(i18nAttr.value));
}
throw new I18nError(p.sourceSpan, `Missing attribute '${expectedName}'.`);
}
export function messageFromAttribute(
parser: ExpressionParser, interpolationConfig: InterpolationConfig, attr: HtmlAttrAst,
meaning: string = null, description: string = null): Message {
const value = removeInterpolation(attr.value, attr.sourceSpan, parser, interpolationConfig);
return new Message(value, meaning, description);
}
/**
* Replace interpolation in the `value` string with placeholders
*/
export function removeInterpolation(
value: string, source: ParseSourceSpan, expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
try {
const parsed =
expressionParser.splitInterpolation(value, source.toString(), interpolationConfig);
const usedNames = new Map<string, number>();
if (isPresent(parsed)) {
let res = '';
for (let i = 0; i < parsed.strings.length; ++i) {
res += parsed.strings[i];
if (i != parsed.strings.length - 1) {
let customPhName = extractPhNameFromInterpolation(parsed.expressions[i], i);
customPhName = dedupePhName(usedNames, customPhName);
res += `<ph name="${customPhName}"/>`;
}
}
return res;
}
return value;
} catch (e) {
return value;
}
}
/**
* Extract the placeholder name from the interpolation.
*
* Use a custom name when specified (ie: `{{<expression> //i18n(ph="FIRST")}}`) otherwise generate a
* unique name.
*/
export function extractPhNameFromInterpolation(input: string, index: number): string {
let customPhMatch = StringWrapper.split(input, _CUSTOM_PH_EXP);
return customPhMatch.length > 1 ? customPhMatch[1] : `INTERPOLATION_${index}`;
}
/**
* Return a unique placeholder name based on the given name
*/
export function dedupePhName(usedNames: Map<string, number>, name: string): string {
const duplicateNameCount = usedNames.get(name);
if (duplicateNameCount) {
usedNames.set(name, duplicateNameCount + 1);
return `${name}_${duplicateNameCount}`;
} | usedNames.set(name, 1);
return name;
}
/**
* Convert a list of nodes to a string message.
*
*/
export function stringifyNodes(
nodes: HtmlAst[], expressionParser: ExpressionParser,
interpolationConfig: InterpolationConfig): string {
const visitor = new _StringifyVisitor(expressionParser, interpolationConfig);
return htmlVisitAll(visitor, nodes).join('');
}
class _StringifyVisitor implements HtmlAstVisitor {
private _index: number = 0;
constructor(
private _parser: ExpressionParser, private _interpolationConfig: InterpolationConfig) {}
visitElement(ast: HtmlElementAst, context: any): any {
let name = this._index++;
let children = this._join(htmlVisitAll(this, ast.children), '');
return `<ph name="e${name}">${children}</ph>`;
}
visitAttr(ast: HtmlAttrAst, context: any): any { return null; }
visitText(ast: HtmlTextAst, context: any): any {
let index = this._index++;
let noInterpolation =
removeInterpolation(ast.value, ast.sourceSpan, this._parser, this._interpolationConfig);
if (noInterpolation != ast.value) {
return `<ph name="t${index}">${noInterpolation}</ph>`;
}
return ast.value;
}
visitComment(ast: HtmlCommentAst, context: any): any { return ''; }
visitExpansion(ast: HtmlExpansionAst, context: any): any { return null; }
visitExpansionCase(ast: HtmlExpansionCaseAst, context: any): any { return null; }
private _join(strs: string[], str: string): string {
return strs.filter(s => s.length > 0).join(str);
}
} | random_line_split | |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if !v.is_empty() {
let f = Path::new(v[0]);
if !path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if !path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments.
}
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") |
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned), ..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
}
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
}
| {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if !m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize), .. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize), .. cfg };
} | conditional_block |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn | (cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if !v.is_empty() {
let f = Path::new(v[0]);
if !path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if !path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments.
}
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if !m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize), .. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize), .. cfg };
}
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned), ..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
}
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
}
| run_the_complete_fn | identifier_name |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if !v.is_empty() {
let f = Path::new(v[0]);
if !path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if !path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments.
}
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if !m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize), .. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize), .. cfg };
}
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned), ..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() |
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
}
| {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
} | identifier_body |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if !v.is_empty() {
let f = Path::new(v[0]);
if !path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if !path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments. | }
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if !m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize), .. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize), .. cfg };
}
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned), ..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
}
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
} | random_line_split | |
mod.rs | //! Provides dedicated `system` pipelines inside OrbTk.
//!
//! System pipelines are modules, that handle specific tasks when
//! iteratively walking the widget tree. Because each widget
//! implements the `state` trait, all system modules are accessible.
//! Pipelines are connected in a logical order. E.g. the `InitSystem`
//! is triggered **before** the `LayoutSystem`. The `LayoutSystem` is
//! triggerd **before** the `RenderSystem`. Handling of widget objects
//! inside the pipelines rely on the Entity Component System
//! ([`DCES`]).
//!
//! [`DCES`]: https://gitlab.redox-os.org/redox-os/dces-rust
pub use self::cleanup_system::*;
pub use self::event_state_system::*;
pub use self::init_system::*;
pub use self::layout_system::*;
pub use self::post_layout_state_system::*;
pub use self::render_system::*; | mod init_system;
mod layout_system;
mod post_layout_state_system;
mod render_system; |
mod cleanup_system;
mod event_state_system; | random_line_split |
session.rs | //! This exposes `Session`, the struct stored in the `Alloy`.
use std::sync::Arc;
use super::SessionStore;
/// A session which provides basic CRUD operations. | key: K,
store: Arc<Box<SessionStore<K, V> + 'static + Send + Sync>>
}
impl<K, V> Session<K, V> {
/// Create a new session
pub fn new(key: K, store: Box<SessionStore<K, V> + 'static + Send + Sync>) -> Session<K, V> {
Session {
key: key,
store: Arc::new(store)
}
}
/// Set the value of this session, replacing any previously set value.
pub fn insert(&self, value: V) {
self.store.insert(&self.key, value)
}
/// Retrieve the value of this session.
///
/// Returns `None` if this session has not been set.
pub fn find(&self) -> Option<V> {
self.store.find(&self.key)
}
/// Swap the given value with the current value of this session.
///
/// Returns the value being replaced.
/// Returns `None` if this session was not yet set.
pub fn swap(&self, value: V) -> Option<V> {
self.store.swap(&self.key, value)
}
/// Insert value, if not yet set, or update the current value of this session.
///
/// Returns an owned copy of the set (current) value of this session.
///
/// This is analagous to the `insert_or_update_with` method of `HashMap`.
pub fn upsert(&self, value: V, mutator: |&mut V|) -> V {
self.store.upsert(&self.key, value, mutator)
}
/// Remove the session stored at this key.
pub fn remove(&self) -> bool {
self.store.remove(&self.key)
}
} | pub struct Session<K, V> { | random_line_split |
session.rs | //! This exposes `Session`, the struct stored in the `Alloy`.
use std::sync::Arc;
use super::SessionStore;
/// A session which provides basic CRUD operations.
pub struct Session<K, V> {
key: K,
store: Arc<Box<SessionStore<K, V> + 'static + Send + Sync>>
}
impl<K, V> Session<K, V> {
/// Create a new session
pub fn new(key: K, store: Box<SessionStore<K, V> + 'static + Send + Sync>) -> Session<K, V> {
Session {
key: key,
store: Arc::new(store)
}
}
/// Set the value of this session, replacing any previously set value.
pub fn insert(&self, value: V) {
self.store.insert(&self.key, value)
}
/// Retrieve the value of this session.
///
/// Returns `None` if this session has not been set.
pub fn find(&self) -> Option<V> {
self.store.find(&self.key)
}
/// Swap the given value with the current value of this session.
///
/// Returns the value being replaced.
/// Returns `None` if this session was not yet set.
pub fn | (&self, value: V) -> Option<V> {
self.store.swap(&self.key, value)
}
/// Insert value, if not yet set, or update the current value of this session.
///
/// Returns an owned copy of the set (current) value of this session.
///
/// This is analagous to the `insert_or_update_with` method of `HashMap`.
pub fn upsert(&self, value: V, mutator: |&mut V|) -> V {
self.store.upsert(&self.key, value, mutator)
}
/// Remove the session stored at this key.
pub fn remove(&self) -> bool {
self.store.remove(&self.key)
}
}
| swap | identifier_name |
session.rs | //! This exposes `Session`, the struct stored in the `Alloy`.
use std::sync::Arc;
use super::SessionStore;
/// A session which provides basic CRUD operations.
pub struct Session<K, V> {
key: K,
store: Arc<Box<SessionStore<K, V> + 'static + Send + Sync>>
}
impl<K, V> Session<K, V> {
/// Create a new session
pub fn new(key: K, store: Box<SessionStore<K, V> + 'static + Send + Sync>) -> Session<K, V> {
Session {
key: key,
store: Arc::new(store)
}
}
/// Set the value of this session, replacing any previously set value.
pub fn insert(&self, value: V) {
self.store.insert(&self.key, value)
}
/// Retrieve the value of this session.
///
/// Returns `None` if this session has not been set.
pub fn find(&self) -> Option<V> |
/// Swap the given value with the current value of this session.
///
/// Returns the value being replaced.
/// Returns `None` if this session was not yet set.
pub fn swap(&self, value: V) -> Option<V> {
self.store.swap(&self.key, value)
}
/// Insert value, if not yet set, or update the current value of this session.
///
/// Returns an owned copy of the set (current) value of this session.
///
/// This is analagous to the `insert_or_update_with` method of `HashMap`.
pub fn upsert(&self, value: V, mutator: |&mut V|) -> V {
self.store.upsert(&self.key, value, mutator)
}
/// Remove the session stored at this key.
pub fn remove(&self) -> bool {
self.store.remove(&self.key)
}
}
| {
self.store.find(&self.key)
} | identifier_body |
isMatch.ts | /**
* Performs a partial deep comparison between object and source to
* determine if object contains equivalent property values.
*
* Partial comparisons will match empty array and empty object
* source values against any array or object value, respectively.
*
* @category Language
*
* First version: July 14, 2017 | *
* @export
* @param {object} input
* @param {object} source
* @returns {boolean}
*/
import { isEqual } from './isEqual';
import { keysIn } from './keysIn';
export function isMatch(input: object, source: object): boolean {
const sourceKey: PropertyKey[]
= keysIn({ source, goDeep: true, enumOnly: true });
if (sourceKey.length === 0) return true;
const inputKey: PropertyKey[]
= keysIn({ source: input, goDeep: true, enumOnly: true });
if (sourceKey.length > inputKey.length) return false;
let key: PropertyKey;
for (key of sourceKey) {
if (!inputKey.includes(key) || !isEqual(input[key], source[key]))
return false;
}
return true;
} | * Last updated : July 14, 2017 | random_line_split |
isMatch.ts | /**
* Performs a partial deep comparison between object and source to
* determine if object contains equivalent property values.
*
* Partial comparisons will match empty array and empty object
* source values against any array or object value, respectively.
*
* @category Language
*
* First version: July 14, 2017
* Last updated : July 14, 2017
*
* @export
* @param {object} input
* @param {object} source
* @returns {boolean}
*/
import { isEqual } from './isEqual';
import { keysIn } from './keysIn';
export function isMatch(input: object, source: object): boolean | {
const sourceKey: PropertyKey[]
= keysIn({ source, goDeep: true, enumOnly: true });
if (sourceKey.length === 0) return true;
const inputKey: PropertyKey[]
= keysIn({ source: input, goDeep: true, enumOnly: true });
if (sourceKey.length > inputKey.length) return false;
let key: PropertyKey;
for (key of sourceKey) {
if (!inputKey.includes(key) || !isEqual(input[key], source[key]))
return false;
}
return true;
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.