text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from setuptools import setup
setup(
name='smlib',
version='0.1',
description='Quick and easy to use library for sending emails',
license='BSD',
author='Grzegorz Blach',
author_email='grzegorz@blach.pl',
packages=['smlib'],
test_suite='test.smlib_test',
)
|
{
"content_hash": "f9d737e85ef6a6c811de90ee09407d76",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 24,
"alnum_prop": 0.65625,
"repo_name": "MagikBSD/smlib",
"id": "d81e18fe627307015b3592a0633ff2cd16f74e1a",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12503"
}
],
"symlink_target": ""
}
|
"""
pybitcoin
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
from .blockchain_client import BlockchainClient
from blockcypher import BlockcypherClient
from blockchain_info import BlockchainInfoClient
from chain_com import ChainComClient
from bitcoind import BitcoindClient, create_bitcoind_service_proxy
import blockcypher
import blockchain_info
import chain_com
import bitcoind
|
{
"content_hash": "fb886f7745660e912e5f73f61740e7b3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.7882882882882883,
"repo_name": "blockstack/pybitcoin",
"id": "c23214e3eaedff0c67743a68759c161eecd6d9c4",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybitcoin/services/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "872846"
}
],
"symlink_target": ""
}
|
from typing import Text, List, Any, Tuple, Callable, Dict, Optional
import dataclasses
import numpy as np
import pytest
from rasa.engine.graph import ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.constants import SPACY_DOCS, TOKENS_NAMES
from rasa.shared.nlu.constants import TEXT, INTENT, RESPONSE
from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer
@pytest.fixture()
def resource() -> Resource:
return Resource("regex_featurizer")
@pytest.fixture()
def create_featurizer(
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
resource: Resource,
) -> Callable[..., RegexFeaturizer]:
def inner(
config: Dict[Text, Any] = None,
known_patterns: Optional[List[Dict[Text, Any]]] = None,
) -> RegexFeaturizer:
config = config or {}
return RegexFeaturizer(
{**RegexFeaturizer.get_default_config(), **config},
default_model_storage,
resource,
default_execution_context,
known_patterns,
)
return inner
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"labeled_tokens",
[
(
"hey how are you today",
[
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
[0.0, 1.0, 0.0],
[0],
),
(
"hey 456 how are you",
[
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
[1.0, 1.0, 0.0],
[1, 0],
),
(
"blah balh random eh",
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0],],
[0.0, 0.0, 0.0],
[],
),
(
"a 1 digit number",
[[0.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0],],
[1.0, 0.0, 1.0],
[1, 1],
),
],
)
def test_regex_featurizer(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[int],
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer(known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence, RESPONSE: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, tokens, expected_sequence_features, expected_sentence_features,"
"labeled_tokens",
[
(
"明天上海的天气怎么样?",
[("明天", 0), ("上海", 2), ("的", 4), ("天气", 5), ("怎么样", 7), ("?", 10)],
[[0.0, 1.0], [1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[1.0, 1.0],
[0.0, 1.0],
),
(
"北京的天气如何?",
[("北京", 0), ("的", 2), ("天气", 3), ("如何", 5), ("?", 7)],
[[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[1.0, 0.0],
[0.0],
),
(
"昨天和今天的天气都不错",
[("昨天", 0), ("和", 2), ("今天", 3), ("的", 5), ("天气", 6), ("都", 8), ("不错", 9)],
[
[0.0, 1.0],
[0.0, 0.0],
[0.0, 1.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
],
[0.0, 1.0],
[0.0, 2.0],
),
(
"后天呢?",
[("后天", 0), ("呢", 2), ("?", 3)],
[[0.0, 1.0], [0.0, 0.0], [0.0, 0.0]],
[0.0, 1.0],
[0.0],
),
],
)
def test_lookup_tables_without_use_word_boundaries(
sentence: Text,
tokens: List[Tuple[Text, float]],
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[float],
create_featurizer: Callable[..., RegexFeaturizer],
):
from rasa.nlu.tokenizers.tokenizer import Token
lookups = [
{"name": "cites", "elements": ["北京", "上海", "广州", "深圳", "杭州"],},
{"name": "dates", "elements": ["昨天", "今天", "明天", "后天"],},
]
ftr = create_featurizer({"use_word_boundaries": False})
training_data = TrainingData()
training_data.lookup_tables = lookups
ftr.train(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(TOKENS_NAMES[TEXT], [Token(word, start) for (word, start) in tokens])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features, "
"labeled_tokens",
[
(
"lemonade and mapo tofu",
[[1.0, 0.0], [0.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[1.0, 1.0],
[0.0, 2.0, 3.0],
),
(
"a cup of tea",
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [1.0, 0.0]],
[1.0, 0.0],
[3.0],
),
(
"Is burrito my favorite food?",
[[0.0, 0.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0],],
[0.0, 1.0],
[1.0],
),
("I want club?mate", [[0.0, 0.0], [0.0, 0.0], [1.0, 0.0]], [1.0, 0.0], [2.0]),
],
)
def test_lookup_tables(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[float],
spacy_nlp: Any,
spacy_tokenizer: SpacyTokenizer,
create_featurizer: Callable[..., RegexFeaturizer],
):
lookups = [
{
"name": "drinks",
"elements": ["mojito", "lemonade", "sweet berry wine", "tea", "club?mate"],
},
{"name": "plates", "elements": "data/test/lookup_tables/plates.txt"},
]
ftr = create_featurizer()
training_data = TrainingData()
training_data.lookup_tables = lookups
ftr.train(training_data)
ftr.process_training_data(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set("text_spacy_doc", spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features",
[
("hey how are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]),
("hey 456 how are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]),
("blah balh random eh", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]),
("a 1 digit number", [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]),
],
)
def test_regex_featurizer_no_sequence(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer(known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray()[0], expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray()[-1], expected_sentence_features, atol=1e-10
)
def test_regex_featurizer_train(
create_featurizer: Callable[..., RegexFeaturizer],
whitespace_tokenizer: WhitespaceTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
featurizer = create_featurizer()
sentence = "hey how are you today 19.12.2019 ?"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
whitespace_tokenizer.process_training_data(TrainingData([message]))
training_data = TrainingData([message], regex_features=patterns)
featurizer.train(training_data)
featurizer.process_training_data(training_data)
expected = np.array([0, 1, 0])
expected_cls = np.array([1, 1, 1])
seq_vecs, sen_vec = message.get_sparse_features(TEXT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (6, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
seq_vecs, sen_vec = message.get_sparse_features(RESPONSE, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (6, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
seq_vecs, sen_vec = message.get_sparse_features(INTENT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert seq_vecs is None
assert sen_vec is None
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"case_sensitive",
[
("Hey How are you today", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], True),
("Hey How are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], False),
("Hey 456 How are you", [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], True),
("Hey 456 How are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], False),
],
)
def test_regex_featurizer_case_sensitive(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
case_sensitive: bool,
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer(
{"case_sensitive": case_sensitive}, known_patterns=patterns,
)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray()[0], expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray()[-1], expected_sentence_features, atol=1e-10
)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"labeled_tokens, use_word_boundaries",
[
("how are you", [[1.0], [0.0], [0.0]], [1.0], [0.0], True),
("how are you", [[1.0], [0.0], [0.0]], [1.0], [0.0], False),
("Take a shower", [[0.0], [0.0], [0.0]], [0.0], [], True),
("Take a shower", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
("What a show", [[0.0], [0.0], [0.0]], [0.0], [], True),
("What a show", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
("The wolf howled", [[0.0], [0.0], [0.0]], [0.0], [], True),
("The wolf howled", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
],
)
def test_lookup_with_and_without_boundaries(
sentence: Text,
expected_sequence_features: List[List[float]],
expected_sentence_features: List[float],
labeled_tokens: List[float],
use_word_boundaries: bool,
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
ftr = create_featurizer({"use_word_boundaries": use_word_boundaries})
training_data = TrainingData()
# we use lookups because the "use_word_boundaries" flag is only used when
# producing patterns from lookup tables
lookups = [{"name": "how", "elements": ["how"]}]
training_data.lookup_tables = lookups
ftr.train(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
(sequence_features, sentence_features) = ftr._features_for_patterns(message, TEXT)
sequence_features = sequence_features.toarray()
sentence_features = sentence_features.toarray()
num_of_patterns = sum([len(lookup["elements"]) for lookup in lookups])
assert sequence_features.shape == (
len(message.get(TOKENS_NAMES[TEXT])),
num_of_patterns,
)
num_of_lookup_tables = len(lookups)
assert sentence_features.shape == (num_of_lookup_tables, num_of_patterns)
# sequence_features should be {0,1} for each token: 1 if match, 0 if not
assert np.allclose(sequence_features, expected_sequence_features, atol=1e-10)
# sentence_features should be {0,1} for each lookup table: 1 if sentence
# contains match from that table, 0 if not
assert np.allclose(sentence_features, expected_sentence_features, atol=1e-10)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
# labeled_tokens should list the token(s) which match a pattern
assert num_matches == labeled_tokens.count(i)
def test_persist_load_for_finetuning(
create_featurizer: Callable[..., RegexFeaturizer],
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
resource: Resource,
whitespace_tokenizer: WhitespaceTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
featurizer = create_featurizer()
sentence = "hey how are you today 19.12.2019 ?"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
training_data = TrainingData([message], regex_features=patterns)
whitespace_tokenizer.process_training_data(training_data)
featurizer.train(training_data)
loaded_featurizer = RegexFeaturizer.load(
RegexFeaturizer.get_default_config(),
default_model_storage,
resource,
dataclasses.replace(default_execution_context, is_finetuning=True),
)
# Test component loaded in finetune mode and also with
# same patterns as before and vocabulary statistics
assert loaded_featurizer.known_patterns == featurizer.known_patterns
assert loaded_featurizer.finetune_mode
new_lookups = [{"name": "plates", "elements": "data/test/lookup_tables/plates.txt"}]
training_data = TrainingData()
training_data.lookup_tables = new_lookups
loaded_featurizer.train(training_data)
# Test merging of a new pattern to an already trained component.
assert len(loaded_featurizer.known_patterns) == 4
def test_vocabulary_expand_for_finetuning(
create_featurizer: Callable[..., RegexFeaturizer],
default_model_storage: ModelStorage,
resource: Resource,
default_execution_context: ExecutionContext,
whitespace_tokenizer: WhitespaceTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
]
featurizer = create_featurizer()
sentence = "hey hey 2020"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
training_data = TrainingData([message], regex_features=patterns)
whitespace_tokenizer.process_training_data(training_data)
featurizer.train(training_data)
featurizer.process_training_data(training_data)
# Test featurization of message
expected = np.array([1, 0])
expected_cls = np.array([1, 1])
seq_vecs, sen_vec = message.get_sparse_features(TEXT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (3, 2) == seq_vecs.shape
assert (1, 2) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
loaded_featurizer = RegexFeaturizer.load(
RegexFeaturizer.get_default_config(),
default_model_storage,
resource,
dataclasses.replace(default_execution_context, is_finetuning=True),
)
new_patterns = [
{"pattern": "\\btoday*", "name": "day", "usage": "intent"},
{"pattern": "\\bhey+", "name": "hello", "usage": "intent"},
]
new_sentence = "hey today"
message = Message(data={TEXT: new_sentence})
message.set(RESPONSE, new_sentence)
message.set(INTENT, "intent")
new_training_data = TrainingData([message], regex_features=patterns + new_patterns)
whitespace_tokenizer.process_training_data(new_training_data)
loaded_featurizer.train(new_training_data)
loaded_featurizer.process_training_data(new_training_data)
# Test featurization of message, this time for the extra pattern as well.
expected_token_1 = np.array([1, 0, 0])
expected_token_2 = np.array([0, 0, 1])
expected_cls = np.array([1, 0, 1])
seq_vecs, sen_vec = message.get_sparse_features(TEXT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (2, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected_token_1)
assert np.all(seq_vecs.toarray()[1] == expected_token_2)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
# let's check if the order of patterns is preserved
for old_index, pattern in enumerate(featurizer.known_patterns):
assert pattern["name"] == loaded_featurizer.known_patterns[old_index]["name"]
# we also modified a pattern, check if that is correctly modified
pattern_to_check = [
pattern
for pattern in loaded_featurizer.known_patterns
if pattern["name"] == "hello"
]
assert pattern_to_check == [new_patterns[1]]
|
{
"content_hash": "01c7ec2c6aa5666a986ec53639b20aa4",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 88,
"avg_line_length": 34.78048780487805,
"alnum_prop": 0.5905563347358579,
"repo_name": "RasaHQ/rasa_nlu",
"id": "384de53a76d435823bbf51a4697cf7f745146462",
"size": "21562",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "tests/nlu/featurizers/test_regex_featurizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
__all__ = ['DeletionTaskManager']
class DeletionTaskManager(object):
def __init__(self, default_task=None):
self.tasks = {}
self.default_task = default_task
def get(self, task=None, **kwargs):
if task is None:
model = kwargs.get('model')
try:
task = self.tasks[model]
except KeyError:
task = self.default_task
return task(manager=self, **kwargs)
def register(self, model, task):
self.tasks[model] = task
|
{
"content_hash": "315ae79ab97608b9a27c7cb57291f516",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 54,
"avg_line_length": 27.80952380952381,
"alnum_prop": 0.571917808219178,
"repo_name": "jean/sentry",
"id": "e4a5073a732d266ce83df67958c25f832e5f376e",
"size": "584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/deletions/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from . import views
def redirect_doc(uri):
return RedirectView.as_view(
url='https://developer.mozilla.org/docs%s' % uri)
redirect_patterns = patterns('',
url('^docs/firefox_os_guideline$',
redirect_doc('/Web/Apps/Design'),
name='ecosystem.ffos_guideline'),
url('^docs/responsive_design$',
redirect_doc('/Web_Development/Mobile/Responsive_design'),
name='ecosystem.responsive_design'),
url('^docs/patterns$',
redirect_doc('/Web/Apps/Design/Responsive_Navigation_Patterns'),
name='ecosystem.design_patterns'),
url('^docs/review$',
redirect_doc('/Web/Apps/Publishing/Marketplace_review_criteria'),
name='ecosystem.publish_review'),
url('^docs/hosted$',
redirect_doc('/Mozilla/Marketplace/Publish_options#Hosted_apps'),
name='ecosystem.publish_hosted'),
url('^docs/submission$',
redirect_doc('/Web/Apps/Publishing/Submitting_an_app'),
name='ecosystem.publish_submit'),
url('^docs/packaged$',
redirect_doc('/Web/Apps/Developing/Packaged_apps'),
name='ecosystem.publish_packaged'),
url('^docs/intro_apps$',
redirect_doc('/Web/Apps/Quickstart/Build/Intro_to_open_web_apps'),
name='ecosystem.build_intro'),
url('^docs/firefox_os$',
redirect_doc('/Mozilla/Firefox_OS'),
name='ecosystem.build_ffos'),
url('^docs/manifests$',
redirect_doc('/Web/Apps/FAQs/About_app_manifests'),
name='ecosystem.build_manifests'),
url('^docs/apps_offline$',
redirect_doc('/Web/Apps/Offline_apps'),
name='ecosystem.build_apps_offline'),
url('^docs/game_apps$',
redirect_doc('/Web/Apps/Developing/Games'),
name='ecosystem.build_game_apps'),
url('^docs/mobile_developers$',
redirect_doc('/Web/Apps/Quickstart/Build/For_mobile_developers'),
name='ecosystem.build_mobile_developers'),
url('^docs/web_developers$',
redirect_doc('/Web/Apps/Quickstart/Build/For_Web_developers'),
name='ecosystem.build_web_developers'),
url('^docs/firefox_os_simulator$',
redirect_doc('/Tools/Firefox_OS_Simulator'),
name='ecosystem.firefox_os_simulator'),
url('^docs/payments$',
redirect_doc('/Web/Apps/Quickstart/Build/Payments'),
name='ecosystem.build_payments'),
url('^docs/concept$',
redirect_doc('/Web/Apps/Quickstart/Design/Concept_A_great_app'),
name='ecosystem.design_concept'),
url('^docs/fundamentals$',
redirect_doc('/Web/Apps/Quickstart/Design/Design_Principles'),
name='ecosystem.design_fundamentals'),
url('^docs/quick_start$',
redirect_doc('/Web/Apps/Quickstart/Build/Your_first_app'),
name='ecosystem.build_quick'),
url('^docs/reference_apps$',
redirect_doc('/Web/Apps/Reference_apps'),
name='ecosystem.build_reference'),
url('^docs/payments/status$',
redirect_doc('/Mozilla/Marketplace/Payments_Status'),
name='ecosystem.publish_payments'),
url('^docs/tools$',
redirect_doc('/Web/Apps/Quickstart/Build/App_tools'),
name='ecosystem.build_tools'),
url('^docs/app_generator$',
redirect_doc('/Web/Apps/Developing/App_templates'),
name='ecosystem.build_app_generator'),
url('^docs/app_manager$',
redirect_doc('/Mozilla/Firefox_OS/Using_the_App_Manager'),
name='ecosystem.app_manager'),
url('^docs/dev_tools$',
redirect_doc('/Tools'),
name='ecosystem.build_dev_tools'),
)
urlpatterns = redirect_patterns + patterns('',
url('^$', views.landing, name='ecosystem.landing'),
url('^partners$', views.partners, name='ecosystem.partners'),
url('^support$', views.support, name='ecosystem.support'),
url('^dev_phone$', views.dev_phone, name='ecosystem.dev_phone'),
url('^docs/ui_guidelines$', views.design_ui,
name='ecosystem.design_ui'),
url('^docs/deploy$', views.publish_deploy,
name='ecosystem.publish_deploy'),
url('^docs/badges$', views.publish_badges,
name='ecosystem.publish_badges'),
url('^docs/apps/(?P<page>\w+)?$', views.apps_documentation,
name='ecosystem.apps_documentation'),
)
|
{
"content_hash": "b3137eed695670433736a884f214c5c7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 74,
"avg_line_length": 42.294117647058826,
"alnum_prop": 0.6418636995827538,
"repo_name": "jinankjain/zamboni",
"id": "b50bbbcf4ff7a7146c5189b665cb3bd0c736b2be",
"size": "4314",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mkt/ecosystem/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
LOCAL = lambda x: os.path.join(os.path.sep.join(
os.path.abspath(
os.path.dirname(__file__)).split(os.path.sep)[:-1]), x)
|
{
"content_hash": "3a6ae6b475f644cb7d8d68c867e58655",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 75,
"avg_line_length": 33.8,
"alnum_prop": 0.5266272189349113,
"repo_name": "arruda/riddles",
"id": "9a0cfababc24f0f55daf0fe708e1ba767876a28c",
"size": "170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riddles/settings/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10938"
},
{
"name": "JavaScript",
"bytes": "1734"
},
{
"name": "Puppet",
"bytes": "107473"
},
{
"name": "Python",
"bytes": "17848"
},
{
"name": "Ruby",
"bytes": "397566"
},
{
"name": "Shell",
"bytes": "4729"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('text,tokens', [
('"deserve,"--and', ['"', "deserve", ',"--', "and"]),
("exception;--exclusive", ["exception", ";--", "exclusive"]),
("day.--Is", ["day", ".--", "Is"]),
("refinement:--just", ["refinement", ":--", "just"]),
("memories?--To", ["memories", "?--", "To"]),
("Useful.=--Therefore", ["Useful", ".=--", "Therefore"]),
("=Hope.=--Pandora", ["=", "Hope", ".=--", "Pandora"])])
def test_issue801(en_tokenizer, text, tokens):
"""Test that special characters + hyphens are split correctly."""
doc = en_tokenizer(text)
assert len(doc) == len(tokens)
assert [t.text for t in doc] == tokens
|
{
"content_hash": "175033e80ef2c3faa85ff295c34e05af",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 39.888888888888886,
"alnum_prop": 0.5431754874651811,
"repo_name": "Gregory-Howard/spaCy",
"id": "3d83e707b8b64280c850929983cf50fcded9ef9e",
"size": "734",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "spacy/tests/regression/test_issue801.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "237446"
},
{
"name": "C++",
"bytes": "26995"
},
{
"name": "CSS",
"bytes": "26457"
},
{
"name": "HTML",
"bytes": "272578"
},
{
"name": "JavaScript",
"bytes": "880"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "98917092"
},
{
"name": "Shell",
"bytes": "753"
}
],
"symlink_target": ""
}
|
import os, tempfile
from nose import SkipTest
from nose.tools import assert_raises, assert_true, assert_false
import networkx as nx
from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
class TestConvertNumpy(object):
@classmethod
def setupClass(cls):
global np, sp, sparse
try:
import numpy as np
import scipy as sp
import scipy.sparse as sparse
except ImportError:
raise SkipTest('SciPy sparse library not available.')
def __init__(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph())
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
e = g.edges()
source = [u for u,v in e]
dest = [v for u,v in e]
weight = [s+10 for s in source]
ex = zip(source, dest, weight)
G.add_weighted_edges_from(ex)
return G
def assert_equal(self, G1, G2):
assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
assert_true( sorted(G1.edges())==sorted(G2.edges()) )
def identity_conversion(self, G, A, create_using):
GG = nx.from_scipy_sparse_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.from_whatever(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
ACSR = A.tocsr()
GI = create_using.__class__(ACSR)
self.assert_equal(G, GI)
ACOO = A.tocoo()
GI = create_using.__class__(ACOO)
self.assert_equal(G, GI)
ACSC = A.tocsc()
GI = create_using.__class__(ACSC)
self.assert_equal(G, GI)
AD = A.todense()
GI = create_using.__class__(AD)
self.assert_equal(G, GI)
AA = A.toarray()
GI = create_using.__class__(AA)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square sparse array."
A = sp.sparse.lil_matrix([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_scipy_sparse_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to sparse matrix to graph."
A = nx.to_scipy_sparse_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"Conversion from digraph to sparse matrix to digraph."
A = nx.to_scipy_sparse_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to sparse matrix to weighted graph."""
A = nx.to_scipy_sparse_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to sparse matrix to weighted digraph."""
A = nx.to_scipy_sparse_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to sparse matrix to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = P3.nodes()
A = nx.to_scipy_sparse_matrix(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3, nodelist=nodelist)
|
{
"content_hash": "b60819bdc64ec60368e9f1c2e3667554",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 84,
"avg_line_length": 33.293577981651374,
"alnum_prop": 0.6059520529071369,
"repo_name": "rainest/dance-partner-matching",
"id": "6fe3274892152ced4e7843616a90b289c3898174",
"size": "3630",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "networkx/tests/test_convert_scipy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1745363"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
}
|
print("Hello Python World...\nI'm component C :-)")
|
{
"content_hash": "6fce32d0baa1c07f3603b6dbb93295f2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 51,
"avg_line_length": 52,
"alnum_prop": 0.6538461538461539,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6c45697c748ec097a1d7b1e3679789468514618f",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/tests/test_configs/dsl_pipeline/basic_pipeline/componentC_src/hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
from pyvivado import builder, interface, signal, utils
from rfgnocchi import config
#minimum_2inputs_builder = builder.make_simple_builder(
# filenames=[os.path.join(config.basedir, 'maths', 'minimum_2inputs.vhd')],
#)({})
#minimum_stage_builder = builder.make_simple_builder(
# filenames=[os.path.join(config.basedir, 'maths', 'minimum_stage.vhd')],
# builders=[minimum_2inputs_builder],
# )({})
class MinimumBuilder(builder.Builder):
#template_fn=os.path.join(config.basedir, 'maths', 'minimum.vhd.t')
def __init__(self, params):
super().__init__(params)
#self.n_inputs = params['n_inputs']
#self.n_stages = signal.logceil(self.n_inputs)
#self.builders = [minimum_stage_builder]
self.simple_filenames = [
os.path.join(config.basedir, 'maths', 'minimum.vhd'),
]
#def output_filename(self, directory):
# return os.path.join(directory, builder.template_fn_to_output_fn(
# self.template_fn, self.params))
#def required_filenames(self, directory):
# return [self.output_filename(directory)]
#def build(self, directory):
# utils.format_file(
# template_filename=self.template_fn,
# output_filename=self.output_filename(directory),
# parameters={'n_inputs': self.n_inputs, 'n_stages': self.n_stages})
def get_minimum_interface(params):
module_name = 'minimum'
width = params['width']
n_inputs = params['n_inputs']
module_parameters = {
'WIDTH': width,
'N_INPUTS': n_inputs,
}
builder = MinimumBuilder({
#'n_inputs': n_inputs,
})
wires_in = (
('reset', signal.std_logic_type),
('i_valid', signal.std_logic_type),
('i_data', signal.StdLogicVector(width=n_inputs*width)),
('o_ready', signal.std_logic_type),
)
wires_out = (
('o_data', signal.StdLogicVector(width=width)),
('o_valid', signal.std_logic_type),
('o_index', signal.StdLogicVector(width=signal.logceil(n_inputs))),
('i_ready', signal.std_logic_type),
)
iface = interface.Interface(
wires_in, wires_out, module_name=module_name,
parameters=params, builder=builder, clock_names=['clk'],
module_parameters=module_parameters,
)
return iface
interface.add_to_module_register('minimum', get_minimum_interface)
|
{
"content_hash": "3e08efb35636994e7fd91541293e3ce1",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 31.38961038961039,
"alnum_prop": 0.6218452627223832,
"repo_name": "benreynwar/rfgnocchi",
"id": "7b467071603ce2fe8ea5bb5c3a815b279a9d8b7b",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maths/minimum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147943"
},
{
"name": "VHDL",
"bytes": "37568"
}
],
"symlink_target": ""
}
|
"""Tests for liveness module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.platform import test
global_a = 7
global_b = 17
class LivenessAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
liveness.resolve(node, ctx, graphs)
return node
def assertHasLiveOut(self, node, expected):
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
live_out_strs = set(str(v) for v in live_out)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_out_strs, set(expected))
def assertHasLiveIn(self, node, expected):
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_in_strs = set(str(v) for v in live_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_in_strs, set(expected))
class LivenessAnalyzerTest(LivenessAnalyzerTestBase):
def test_live_out_try_block(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0], 'x')
def test_live_out_if_inside_except(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
if b > 0:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0].handlers[0].body[0], 'x')
def test_live_out_stacked_if(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('a', 'x'))
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_stacked_if_else(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
else:
x = 2
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_for_basic(self):
def test_fn(x, a):
for i in range(a):
x += i
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
def test_live_out_for_iterate(self):
def test_fn(x, a):
for i in range(a):
x += i
return x, i # pylint:disable=undefined-loop-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x', 'i'))
def test_live_out_attributes(self):
def test_fn(x, a):
if a > 0:
x.y = 0
return x.y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
def test_live_out_nested_functions(self):
def test_fn(a, b):
if b:
a = []
def foo():
return a
foo()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
def test_live_out_nested_functions_isolation(self):
def test_fn(b):
if b:
a = 0 # pylint:disable=unused-variable
def child():
max(a) # pylint:disable=used-before-assignment
a = 1
return a
child()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'max')
def test_live_out_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ())
def test_live_in_pass(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_raise(self):
def test_fn(x, a, b, c):
if a > 0:
b = b + 1
raise c
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'c'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_out_except_variable(self):
def test_fn(x, a):
try:
pass
except a as b:
raise b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
# Note: 'a' is not live because there is no raise statement inside the
# try, and we discount the possibility of other code in the try block
# raising an error.
self.assertHasLiveIn(fn_body[0], ('b', 'x'))
def test_live_in_return_statement(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
return x
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_try_block(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_try_orelse(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
pass
else:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_if_inside_except(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
if b > 0:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0].handlers[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_stacked_if(self):
def test_fn(x, a, b, c):
if a > 0:
x = b
if c > 1:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[1], ('c', 'x'))
def test_live_in_stacked_if_else(self):
def test_fn(x, a, b, c, d):
if a > 1:
x = b
else:
x = c
if d > 0:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'd'))
self.assertHasLiveIn(fn_body[1], ('d', 'x'))
def test_live_in_for_basic(self):
def test_fn(x, y, a):
for i in a:
x = i
y += x
z = 0
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_for_nested(self):
def test_fn(x, y, a):
for i in a:
for j in i:
x = i
y += x
z = j
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x', 'y'))
def test_live_in_generator_comprehension(self):
def test_fn(y):
if all(x for x in y):
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('all', 'y'))
def test_live_in_list_comprehension(self):
def test_fn(y):
if [x for x in y]:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_list_comprehension_expression(self):
def test_fn(y, s):
s += foo([x for x in y]) # pylint:disable=undefined-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y', 'foo', 's'))
def test_live_in_set_comprehension(self):
def test_fn(y):
if {x for x in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_dict_comprehension(self):
def test_fn(y):
if {k: v for k, v in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_global_symbol(self):
def test_fn(c):
global global_a
global global_b
if global_a:
global_b = c
else:
global_b = c
return global_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[2], ('global_b',))
self.assertHasLiveIn(fn_body[2], ('global_a', 'c'))
if __name__ == '__main__':
test.main()
|
{
"content_hash": "e30b93ae944de6979b3b5a1273e4026f",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 76,
"avg_line_length": 23.689873417721518,
"alnum_prop": 0.5793035889215424,
"repo_name": "gunan/tensorflow",
"id": "90bcc67301a1e8682237cd6db616c763b4abee1d",
"size": "11918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/pyct/static_analysis/liveness_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
try:
import cPickle as pickle
except ImportError:
import pickle
import gtk
from twisted.words.im.gtkcommon import GLADE_FILE, SETTINGS_FILE, autoConnectMethods,\
openGlade
from twisted.words.im import gtkchat
### This generic stuff uses the word "account" in a very different way -- chat
### accounts are potential sources of messages, InstanceMessenger accounts are
### individual network connections.
class AccountManager:
def __init__(self):
self.xml = openGlade(GLADE_FILE, root="MainIMWindow")
self.chatui = gtkchat.GtkChatClientUI(self.xml)
self.chatui._accountmanager = self # TODO: clean this up... it's used in gtkchat
print self.xml._o
autoConnectMethods(self, self.chatui.theContactsList)
self.widget = self.xml.get_widget("AccountManWidget")
self.widget.show_all()
try:
f = open(SETTINGS_FILE)
self.accounts = pickle.load(f)
print 'loaded!'
self.refreshAccounts()
except IOError:
self.accounts = []
print 'initialized!'
def on_ConsoleButton_clicked(self, b):
#### For debugging purposes...
from twisted.manhole.ui.pywidgets import LocalInteraction
l = LocalInteraction()
l.localNS['chat'] = self.chatui
l.show_all()
def created(self, acct):
self.accounts.append(acct)
self.refreshAccounts()
def refreshAccounts(self):
w = self.xml.get_widget("accountsList")
w.clear()
for acct in self.accounts:
l = [acct.accountName, acct.isOnline() and 'yes' or 'no',
acct.autoLogin and 'yes' or 'no', acct.gatewayType]
w.append(l)
def lockNewAccount(self, b):
self.xml.get_widget("NewAccountButton").set_sensitive(not b)
def on_NewAccountButton_clicked(self, b):
NewAccount(self)
def on_MainIMWindow_destroy(self, w):
print 'Saving...'
pickle.dump(self.accounts, open(SETTINGS_FILE,'wb'))
print 'Saved.'
gtk.mainquit()
def on_DeleteAccountButton_clicked(self, b):
lw = self.xml.get_widget("accountsList")
if lw.selection:
del self.accounts[lw.selection[0]]
self.refreshAccounts()
def on_LogOnButton_clicked(self, b):
lw = self.xml.get_widget("accountsList")
if lw.selection:
self.accounts[lw.selection[0]].logOn(self.chatui)
class DummyAccountForm:
def __init__(self, manager):
self.widget = gtk.GtkButton("HELLO")
def create(self, sname, autoLogin):
return None
class NewAccount:
def __init__(self, manager):
self.manager = manager
self.manager.lockNewAccount(1)
self.xml = openGlade(GLADE_FILE, root="NewAccountWindow")
autoConnectMethods(self)
self.widget = self.xml.get_widget("NewAccountWindow")
self.frame = self.xml.get_widget("GatewayFrame")
# Making up for a deficiency in glade.
widgetMenu = self.xml.get_widget("GatewayOptionMenu")
m = gtk.GtkMenu()
activ = 0
self.currentGateway = None
for name, klas in registeredTypes:
i = gtk.GtkMenuItem(name)
m.append(i)
k = klas(self.manager)
i.connect("activate", self.gatewaySelected, k)
if not activ:
activ = 1
self.gatewaySelected(None, k)
widgetMenu.set_menu(m)
self.widget.show_all()
def gatewaySelected(self, ig, k):
if self.currentGateway:
self.frame.remove(self.currentGateway.widget)
self.currentGateway = k
self.frame.add(k.widget)
k.widget.show_all()
def createAccount(self, b):
autoLogin = self.xml.get_widget("AutoLogin").get_active()
accountName = self.xml.get_widget("accountName").get_text()
x = self.currentGateway.create(accountName, autoLogin)
if x:
self.manager.created(x)
self.destroyMe()
def destroyMe(self, b=None):
self.widget.destroy()
def on_NewAccountWindow_destroy(self, w):
self.manager.lockNewAccount(0)
from twisted.words.im.pbsupport import PBAccount
from twisted.words.im.tocsupport import TOCAccount
from twisted.words.im.ircsupport import IRCAccount
class PBAccountForm:
def __init__(self, manager):
self.manager = manager
self.xml = openGlade(GLADE_FILE, root="PBAccountWidget")
autoConnectMethods(self)
self.widget = self.xml.get_widget("PBAccountWidget")
self.on_serviceType_changed()
self.selectedRow = None
def addPerspective(self, b):
stype = self.xml.get_widget("serviceType").get_text()
sname = self.xml.get_widget("serviceName").get_text()
pname = self.xml.get_widget("perspectiveName").get_text()
self.xml.get_widget("serviceList").append([stype, sname, pname])
def removePerspective(self, b):
if self.selectedRow is not None:
self.xml.get_widget("serviceList").remove(self.selectedRow)
def on_serviceType_changed(self, w=None):
self.xml.get_widget("serviceName").set_text(self.xml.get_widget("serviceType").get_text())
self.xml.get_widget("perspectiveName").set_text(self.xml.get_widget("identity").get_text())
on_identity_changed = on_serviceType_changed
def on_serviceList_select_row(self, slist, row, column, event):
self.selectedRow = row
def create(self, accName, autoLogin):
host = self.xml.get_widget("hostname").get_text()
port = self.xml.get_widget("portno").get_text()
user = self.xml.get_widget("identity").get_text()
pasw = self.xml.get_widget("password").get_text()
serviceList = self.xml.get_widget("serviceList")
services = []
for r in xrange(0, serviceList.rows):
row = []
for c in xrange(0, serviceList.columns):
row.append(serviceList.get_text(r, c))
services.append(row)
if not services:
services.append([
self.xml.get_widget("serviceType").get_text(),
self.xml.get_widget("serviceName").get_text(),
self.xml.get_widget("perspectiveName").get_text()])
return PBAccount(accName, autoLogin, user, pasw, host, int(port),
services)
class TOCAccountForm:
def __init__(self, maanger):
self.xml = openGlade(GLADE_FILE, root="TOCAccountWidget")
self.widget = self.xml.get_widget("TOCAccountWidget")
def create(self, accountName, autoLogin):
return TOCAccount(
accountName, autoLogin,
self.xml.get_widget("TOCName").get_text(),
self.xml.get_widget("TOCPass").get_text(),
self.xml.get_widget("TOCHost").get_text(),
int(self.xml.get_widget("TOCPort").get_text()) )
class IRCAccountForm:
def __init__(self, maanger):
self.xml = openGlade(GLADE_FILE, root="IRCAccountWidget")
self.widget = self.xml.get_widget("IRCAccountWidget")
def create(self, accountName, autoLogin):
return IRCAccount(
accountName, autoLogin,
self.xml.get_widget("ircNick").get_text(),
self.xml.get_widget("ircPassword").get_text(),
self.xml.get_widget("ircServer").get_text(),
int(self.xml.get_widget("ircPort").get_text()),
self.xml.get_widget("ircChannels").get_text(),
)
registeredTypes = [ ("Twisted", PBAccountForm),
("AOL Instant Messenger", TOCAccountForm),
["IRC", IRCAccountForm],
("Dummy", DummyAccountForm) ]
|
{
"content_hash": "aabaf5273f87265fdcec4f71cc5a9576",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 99,
"avg_line_length": 35.268181818181816,
"alnum_prop": 0.615414357520299,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "61b52d74dea8b377e53e7568b83b8b2da4354c8e",
"size": "7843",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/twisted/words/im/gtkaccount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'click2pass.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^service/$', 'service.views.index'),
url(r'^service/add_bookmark/$', 'service.views.add_bookmark'),
url(r'^service/delete_bookmark/(?P<objid>\d+)/$', 'service.views.delete_bookmark'),
url(r'^service/update_bookmark/(?P<objid>\d+)/$', 'service.views.update_bookmark'),
)
|
{
"content_hash": "943f0455fe370ce3d52c1340006fe2f0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 87,
"avg_line_length": 36.9375,
"alnum_prop": 0.6531302876480541,
"repo_name": "iwagaki/click2pass",
"id": "7ca3931b9650d9797c29a29015669eb0ad053a2d",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "click2pass/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12761"
},
{
"name": "JavaScript",
"bytes": "4260"
},
{
"name": "Python",
"bytes": "7435"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
}
|
import json
from flask import request
from flask import render_template
from flask import abort
from flask import jsonify
from flask import current_app
from flask.views import MethodView
from rados import Rados
from rados import Error as RadosError
from app.base import ApiResource
class CephClusterProperties(dict):
"""
Validate ceph cluster connection properties
"""
def __init__(self, config):
dict.__init__(self)
self['conffile'] = config['ceph_config']
self['conf'] = dict()
if 'keyring' in config:
self['conf']['keyring'] = config['keyring']
if 'client_id' in config and 'client_name' in config:
raise RadosError("Can't supply both client_id and client_name")
if 'client_id' in config:
self['rados_id'] = config['client_id']
if 'client_name' in config:
self['name'] = config['client_name']
class CephClusterCommand(dict):
"""
Issue a ceph command on the given cluster and provide the returned json
"""
def __init__(self, cluster, **kwargs):
dict.__init__(self)
ret, buf, err = cluster.mon_command(json.dumps(kwargs), b'', timeout=5)
if ret != 0:
self['err'] = err
else:
self.update(json.loads(buf))
def find_host_for_osd(osd, osd_status):
""" find host for a given osd """
for obj in osd_status['nodes']:
if obj['type'] == 'host':
if osd in obj['children']:
return obj['name']
return 'unknown'
def get_unhealthy_osd_details(osd_status):
""" get all unhealthy osds from osd status """
unhealthy_osds = list()
for obj in osd_status['nodes']:
if obj['type'] == 'osd':
# if OSD does not exists (DNE in osd tree) skip this entry
if obj['exists'] == 0:
continue
if obj['status'] == 'down' or obj['reweight'] == 0.0:
# It is possible to have one host in more than one branch in the tree.
# Add each unhealthy OSD only once in the list
if obj['status'] == 'down':
status = 'down'
else:
status = 'out'
entry = {
'name': obj['name'],
'status': status,
'host': find_host_for_osd(obj['id'], osd_status)
}
if entry not in unhealthy_osds:
unhealthy_osds.append(entry)
return unhealthy_osds
class DashboardResource(ApiResource):
"""
Endpoint that shows overall cluster status
"""
endpoint = 'dashboard'
url_prefix = '/'
url_rules = {
'index': {
'rule': '/',
}
}
def __init__(self):
MethodView.__init__(self)
self.config = current_app.config['USER_CONFIG']
self.clusterprop = CephClusterProperties(self.config)
def get(self):
with Rados(**self.clusterprop) as cluster:
cluster_status = CephClusterCommand(cluster, prefix='status', format='json')
if 'err' in cluster_status:
abort(500, cluster_status['err'])
# ceph >= 15.2.5
if 'osdmap' not in cluster_status['osdmap']:
# osdmap has been converted to depth-1 dict
cluster_status['osdmap']['osdmap'] = cluster_status['osdmap'].copy()
monitor_status = CephClusterCommand(cluster, prefix='quorum_status', format='json')
cluster_status['monmap'] = monitor_status['monmap']
# check for unhealthy osds and get additional osd infos from cluster
total_osds = cluster_status['osdmap']['osdmap']['num_osds']
in_osds = cluster_status['osdmap']['osdmap']['num_up_osds']
up_osds = cluster_status['osdmap']['osdmap']['num_in_osds']
if up_osds < total_osds or in_osds < total_osds:
osd_status = CephClusterCommand(cluster, prefix='osd tree', format='json')
if 'err' in osd_status:
abort(500, osd_status['err'])
# find unhealthy osds in osd tree
cluster_status['osdmap']['details'] = get_unhealthy_osd_details(osd_status)
if request.mimetype == 'application/json':
return jsonify(cluster_status)
else:
return render_template('status.html', data=cluster_status, config=self.config)
|
{
"content_hash": "d39d8162d3522784270f49b05d0ac8fc",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 99,
"avg_line_length": 32.8978102189781,
"alnum_prop": 0.557577102285334,
"repo_name": "Crapworks/ceph-dash",
"id": "4d9e8677de5b27547a0d622322de92fdbf05a26d",
"size": "4554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/dashboard/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "783"
},
{
"name": "Dockerfile",
"bytes": "532"
},
{
"name": "HTML",
"bytes": "8957"
},
{
"name": "JavaScript",
"bytes": "173003"
},
{
"name": "Python",
"bytes": "14817"
},
{
"name": "Shell",
"bytes": "1141"
}
],
"symlink_target": ""
}
|
import argparse
import logging
from dvc.command import completion
from dvc.command.base import CmdBaseNoRepo, append_doc_link
from dvc.command.ls.ls_colors import LsColors
from dvc.exceptions import DvcException
from dvc.ui import ui
logger = logging.getLogger(__name__)
def _prettify(entries, with_color=False):
if with_color:
ls_colors = LsColors()
fmt = ls_colors.format
else:
def fmt(entry):
return entry["path"]
return [fmt(entry) for entry in entries]
class CmdList(CmdBaseNoRepo):
def run(self):
from dvc.repo import Repo
try:
entries = Repo.ls(
self.args.url,
self.args.path,
rev=self.args.rev,
recursive=self.args.recursive,
dvc_only=self.args.dvc_only,
)
if self.args.json:
ui.write_json(entries)
elif entries:
entries = _prettify(entries, with_color=True)
ui.write("\n".join(entries))
return 0
except DvcException:
logger.exception(f"failed to list '{self.args.url}'")
return 1
def add_parser(subparsers, parent_parser):
LIST_HELP = (
"List repository contents, including files"
" and directories tracked by DVC and by Git."
)
list_parser = subparsers.add_parser(
"list",
aliases=["ls"],
parents=[parent_parser],
description=append_doc_link(LIST_HELP, "list"),
help=LIST_HELP,
formatter_class=argparse.RawTextHelpFormatter,
)
list_parser.add_argument("url", help="Location of DVC repository to list")
list_parser.add_argument(
"-R",
"--recursive",
action="store_true",
help="Recursively list files.",
)
list_parser.add_argument(
"--dvc-only", action="store_true", help="Show only DVC outputs."
)
list_parser.add_argument(
"--json",
"--show-json",
action="store_true",
help="Show output in JSON format.",
)
list_parser.add_argument(
"--rev",
nargs="?",
help="Git revision (e.g. SHA, branch, tag)",
metavar="<commit>",
)
list_parser.add_argument(
"path",
nargs="?",
help="Path to directory within the repository to list outputs for",
).complete = completion.DIR
list_parser.set_defaults(func=CmdList)
|
{
"content_hash": "565d2c48e15e8d1a44ee70f6691166e9",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 28.011363636363637,
"alnum_prop": 0.5768762677484787,
"repo_name": "dmpetrov/dataversioncontrol",
"id": "d8bfe02413e2799dce5835cc9cbcdd7be09f4c32",
"size": "2465",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dvc/command/ls/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127601"
},
{
"name": "Shell",
"bytes": "1677"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='django-simple-captcha',
version=__import__('captcha').get_version(),
description='A very simple, yet powerful, Django captcha application',
author='Marco Bonetti',
author_email='mbonetti@gmail.com',
url='http://code.google.com/p/django-simple-captcha/',
license='MIT',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP',
'Framework :: Django',
],
include_package_data=True,
zip_safe=False,
install_requires=['setuptools'],
requires= [ 'PIL (>=1.1.6)' ]
)
|
{
"content_hash": "efec9509b41c7d7589ece0a4be280b0d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 74,
"avg_line_length": 34.57692307692308,
"alnum_prop": 0.6151279199110122,
"repo_name": "andela-bojengwa/django-simple-captcha",
"id": "3d69000df4756572e1c09fd9c6f8c9102702502a",
"size": "899",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27671"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, division, absolute_import, print_function)
import gc
import sys
from types import FrameType
from itertools import chain
# From http://code.activestate.com/recipes/523004-find-cyclical-references/
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
'''Find reference cycles
:param list objects:
A list of objects to find cycles in. It is often useful to pass in
gc.garbage to find the cycles that are preventing some objects from
being garbage collected.
:param file outstream:
The stream for output.
:param bool show_progress:
If True, print the number of objects reached as they are found.
'''
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
written = False
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
written = True
break
if key is next:
outstream.write("[key] = %s" % repr(val))
written = True
break
elif isinstance(step, (list, tuple)):
for i, item in enumerate(step):
if item is next:
outstream.write("[%d]" % i)
written = True
elif getattr(type(step), '__getattribute__', None) in (object.__getattribute__, type.__getattribute__):
for attr in chain(dir(step), getattr(step, '__dict__', ())):
if getattr(step, attr, None) is next:
try:
outstream.write('%r.%s' % (step, attr))
except TypeError:
outstream.write('.%s' % (step, attr))
written = True
break
if not written:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
try:
outstream.write('Cyclic reference: %r\n' % referent)
except TypeError:
try:
outstream.write('Cyclic reference: %i (%r)\n' % (id(referent), type(referent)))
except TypeError:
outstream.write('Cyclic reference: %i\n' % id(referent))
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + (obj,))
for obj in objects:
# We are not interested in non-powerline cyclic references
try:
if not type(obj).__module__.startswith('powerline'):
continue
except AttributeError:
continue
recurse(obj, obj, {}, ())
|
{
"content_hash": "74278077d6cd7651991fe5a2116f8067",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 106,
"avg_line_length": 31.083333333333332,
"alnum_prop": 0.6578418230563002,
"repo_name": "magus424/powerline",
"id": "478b9ed8e1e05734251d7f7a39d9d764102ee11f",
"size": "3036",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "powerline/lib/debug.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3149"
},
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Python",
"bytes": "580149"
},
{
"name": "Shell",
"bytes": "36311"
},
{
"name": "VimL",
"bytes": "10739"
}
],
"symlink_target": ""
}
|
"""HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
try:
from urllib.parse import quote, urlsplit, urlunsplit
except ImportError: # Python 2
from urllib import quote
from urlparse import urlsplit, urlunsplit
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.text import normalize_newlines
from .html_parser import HTMLParser, HTMLParseError
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
unquoted_percents_re = re.compile(r'%(?![0-9A-Fa-f]{2})')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if isinstance(text, SafeData):
return text
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
six.iteritems(kwargs)])
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
s = MLStripper()
try:
s.feed(value)
s.close()
except HTMLParseError:
return value
else:
return s.get_data()
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_text(value))
fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
# An URL is considered unquoted if it contains no % characters or
# contains a % not followed by two hexadecimal digits. See #9655.
if '%' not in url or unquoted_percents_re.search(url):
# See http://bugs.python.org/issue2637
url = quote(force_bytes(url), safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif not ':' in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
from django.utils.text import normalize_newlines
text = normalize_newlines(force_text(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return '<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text
clean_html = allow_lazy(clean_html, six.text_type)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
|
{
"content_hash": "0952c6329dcbf233610a8be967b775e3",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 147,
"avg_line_length": 39.236421725239616,
"alnum_prop": 0.5897728198029476,
"repo_name": "makinacorpus/django",
"id": "4893b6b18a759c407317416716d15b2b8b499c7f",
"size": "12281",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/utils/html.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98175"
},
{
"name": "Python",
"bytes": "8391980"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
import cassiopeia.type.dto.common
import cassiopeia.type.core.common
if cassiopeia.type.dto.common.sqlalchemy_imported:
import sqlalchemy
import sqlalchemy.orm
@cassiopeia.type.core.common.inheritdocs
class RunePages(cassiopeia.type.dto.common.CassiopeiaDto):
"""
pages list<RunePage> collection of rune pages associated with the summoner
summonerId int summoner ID
"""
def __init__(self, dictionary):
self.pages = [(RunePage(p) if not isinstance(p, RunePage) else p) for p in dictionary.get("pages", []) if p]
self.summonerId = dictionary.get("summonerId", 0)
@property
def rune_ids(self):
"""Gets all rune IDs contained in this object"""
ids = set()
for p in self.pages:
ids = ids | p.rune_ids
return ids
@cassiopeia.type.core.common.inheritdocs
class RunePage(cassiopeia.type.dto.common.CassiopeiaDto):
"""
current bool indicates if the page is the current page
id int rune page ID
name str rune page name
slots list<RuneSlot> collection of rune slots associated with the rune page
"""
def __init__(self, dictionary):
self.current = dictionary.get("current", False)
self.id = dictionary.get("id", 0)
self.name = dictionary.get("name", "")
self.slots = [(RuneSlot(s) if not isinstance(s, RuneSlot) else s) for s in dictionary.get("slots", []) if s]
@property
def rune_ids(self):
"""Gets all rune IDs contained in this object"""
ids = set()
for s in self.slots:
if s.runeId:
ids.add(s.runeId)
return ids
@cassiopeia.type.core.common.inheritdocs
class RuneSlot(cassiopeia.type.dto.common.CassiopeiaDto):
"""
runeId int rune ID associated with the rune slot. For static information correlating to rune IDs, please refer to the LoL Static Data API.
runeSlotId int rune slot ID.
"""
def __init__(self, dictionary):
self.runeId = dictionary.get("runeId", 0)
self.runeSlotId = dictionary.get("runeSlotId", 0)
@cassiopeia.type.core.common.inheritdocs
class MasteryPages(cassiopeia.type.dto.common.CassiopeiaDto):
"""
pages list<MasteryPage> collection of mastery pages associated with the summoner
summonerId int summoner ID
"""
def __init__(self, dictionary):
self.pages = [(MasteryPage(p) if not isinstance(p, MasteryPage) else p) for p in dictionary.get("pages", []) if p]
self.summonerId = dictionary.get("summonerId", 0)
@property
def mastery_ids(self):
"""Gets all mastery IDs contained in this object"""
ids = set()
for p in self.pages:
ids = ids | p.mastery_ids
return ids
@cassiopeia.type.core.common.inheritdocs
class MasteryPage(cassiopeia.type.dto.common.CassiopeiaDto):
"""
current bool indicates if the mastery page is the current mastery page
id int mastery page ID
masteries list<MasteryDto> collection of masteries associated with the mastery page
name str mastery page name.
"""
def __init__(self, dictionary):
self.current = dictionary.get("current", False)
self.id = dictionary.get("id", 0)
self.masteries = [(Mastery(s) if not isinstance(s, Mastery) else s) for s in dictionary.get("masteries", []) if s]
self.name = dictionary.get("name", "")
@property
def mastery_ids(self):
"""Gets all mastery IDs contained in this object"""
ids = set()
for m in self.masteries:
if m.id:
ids.add(m.id)
return ids
@cassiopeia.type.core.common.inheritdocs
class Mastery(cassiopeia.type.dto.common.CassiopeiaDto):
"""
id int mastery ID. For static information correlating to masteries, please refer to the LoL Static Data API.
rank int mastery rank (i.e. the number of points put into this mastery)
"""
def __init__(self, dictionary):
self.id = dictionary.get("id", 0)
self.rank = dictionary.get("rank", 0)
@cassiopeia.type.core.common.inheritdocs
class Summoner(cassiopeia.type.dto.common.CassiopeiaDto):
"""
id int summoner ID
name str summoner name
profileIconId int ID of the summoner icon associated with the summoner
revisionDate int date summoner was last modified specified as epoch milliseconds. The following events will update this timestamp: profile icon change, playing the tutorial or advanced tutorial, finishing a game, summoner name change.
summonerLevel int summoner level associated with the summoner
"""
def __init__(self, dictionary):
self.id = dictionary.get("id", 0)
self.name = dictionary.get("name", "")
self.profileIconId = dictionary.get("profileIconId", 0)
self.revisionDate = dictionary.get("revisionDate", 0)
self.summonerLevel = dictionary.get("summonerLevel", 0)
###############################
# Dynamic SQLAlchemy bindings #
###############################
def _sa_bind_rune_page():
global RunePage
@cassiopeia.type.core.common.inheritdocs
class RunePage(RunePage, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "RunePage"
current = sqlalchemy.Column(sqlalchemy.Boolean)
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(50))
slots = sqlalchemy.orm.relationship("cassiopeia.type.dto.summoner.RuneSlot", cascade="all, delete-orphan, delete, merge", passive_deletes=True)
def _sa_bind_rune_slot():
global RuneSlot
@cassiopeia.type.core.common.inheritdocs
class RuneSlot(RuneSlot, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "RuneSlot"
runeId = sqlalchemy.Column(sqlalchemy.Integer)
runeSlotId = sqlalchemy.Column(sqlalchemy.Integer)
_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_page_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey("RunePage.id", ondelete="CASCADE"))
def _sa_bind_mastery_page():
global MasteryPage
@cassiopeia.type.core.common.inheritdocs
class MasteryPage(MasteryPage, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "MasteryPage"
current = sqlalchemy.Column(sqlalchemy.Boolean)
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
masteries = sqlalchemy.orm.relationship("cassiopeia.type.dto.summoner.Mastery", cascade="all, delete-orphan, delete, merge", passive_deletes=True)
name = sqlalchemy.Column(sqlalchemy.String(50))
def _sa_bind_mastery():
global Mastery
@cassiopeia.type.core.common.inheritdocs
class Mastery(Mastery, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "MasterySlot"
id = sqlalchemy.Column(sqlalchemy.Integer)
rank = sqlalchemy.Column(sqlalchemy.Integer)
_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_page_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey("MasteryPage.id", ondelete="CASCADE"))
def _sa_bind_summoner():
global Summoner
@cassiopeia.type.core.common.inheritdocs
class Summoner(Summoner, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "Summoner"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(30))
profileIconId = sqlalchemy.Column(sqlalchemy.Integer)
revisionDate = sqlalchemy.Column(sqlalchemy.BigInteger)
summonerLevel = sqlalchemy.Column(sqlalchemy.Integer)
def _sa_bind_all():
_sa_bind_rune_page()
_sa_bind_rune_slot()
_sa_bind_mastery_page()
_sa_bind_mastery()
_sa_bind_summoner()
|
{
"content_hash": "865a6587d960d2220a0de83d5610f5b2",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 245,
"avg_line_length": 39.1078431372549,
"alnum_prop": 0.6504136375031336,
"repo_name": "MakersF/cassiopeia",
"id": "8103358f3648c4e348de91f154a424f105f314a5",
"size": "7978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cassiopeia/type/dto/summoner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "602362"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from tempest import clients
from tempest import config
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
CONF_FLAVORS = None
CONF_IMAGES = None
CONF_NETWORKS = []
CONF_PRIV_NETWORK_NAME = None
CONF_PUB_NETWORK = None
CONF_PUB_ROUTER = None
CONF_TENANTS = None
CONF_USERS = None
IS_CEILOMETER = None
IS_CINDER = None
IS_GLANCE = None
IS_HEAT = None
IS_NEUTRON = None
IS_NOVA = None
def init_conf():
global CONF_FLAVORS
global CONF_IMAGES
global CONF_NETWORKS
global CONF_PRIV_NETWORK
global CONF_PRIV_NETWORK_NAME
global CONF_PUB_NETWORK
global CONF_PUB_ROUTER
global CONF_TENANTS
global CONF_USERS
global IS_CEILOMETER
global IS_CINDER
global IS_GLANCE
global IS_HEAT
global IS_NEUTRON
global IS_NOVA
IS_CEILOMETER = CONF.service_available.ceilometer
IS_CINDER = CONF.service_available.cinder
IS_GLANCE = CONF.service_available.glance
IS_HEAT = CONF.service_available.heat
IS_NEUTRON = CONF.service_available.neutron
IS_NOVA = CONF.service_available.nova
CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
CONF_PUB_NETWORK = CONF.network.public_network_id
CONF_PUB_ROUTER = CONF.network.public_router_id
CONF_TENANTS = [CONF.identity.admin_tenant_name,
CONF.identity.tenant_name,
CONF.identity.alt_tenant_name]
CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
CONF.identity.alt_username]
if IS_NEUTRON:
CONF_PRIV_NETWORK = _get_priv_net_id(CONF.compute.fixed_network_name,
CONF.identity.tenant_name)
CONF_NETWORKS = [CONF_PUB_NETWORK, CONF_PRIV_NETWORK]
def _get_priv_net_id(prv_net_name, tenant_name):
am = clients.AdminManager()
net_cl = am.network_client
id_cl = am.identity_client
networks = net_cl.list_networks()
tenant = id_cl.get_tenant_by_name(tenant_name)
t_id = tenant['id']
n_id = None
for net in networks['networks']:
if (net['tenant_id'] == t_id and net['name'] == prv_net_name):
n_id = net['id']
break
return n_id
class BaseService(object):
def __init__(self, kwargs):
self.client = None
for key, value in kwargs.items():
setattr(self, key, value)
def _filter_by_tenant_id(self, item_list):
if (item_list is None
or len(item_list) == 0
or not hasattr(self, 'tenant_id')
or self.tenant_id is None
or 'tenant_id' not in item_list[0]):
return item_list
return [item for item in item_list
if item['tenant_id'] == self.tenant_id]
def list(self):
pass
def delete(self):
pass
def dry_run(self):
pass
def save_state(self):
pass
def run(self):
if self.is_dry_run:
self.dry_run()
elif self.is_save_state:
self.save_state()
else:
self.delete()
class SnapshotService(BaseService):
def __init__(self, manager, **kwargs):
super(SnapshotService, self).__init__(kwargs)
self.client = manager.snapshots_client
def list(self):
client = self.client
snaps = client.list_snapshots()
LOG.debug("List count, %s Snapshots" % len(snaps))
return snaps
def delete(self):
snaps = self.list()
client = self.client
for snap in snaps:
try:
client.delete_snapshot(snap['id'])
except Exception as e:
LOG.exception("Delete Snapshot exception: %s" % e)
pass
def dry_run(self):
snaps = self.list()
self.data['snapshots'] = snaps
class ServerService(BaseService):
def __init__(self, manager, **kwargs):
super(ServerService, self).__init__(kwargs)
self.client = manager.servers_client
def list(self):
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
LOG.debug("List count, %s Servers" % len(servers))
return servers
def delete(self):
client = self.client
servers = self.list()
for server in servers:
try:
client.delete_server(server['id'])
except Exception as e:
LOG.exception("Delete Server exception: %s" % e)
pass
def dry_run(self):
servers = self.list()
self.data['servers'] = servers
class ServerGroupService(ServerService):
def list(self):
client = self.client
sgs = client.list_server_groups()
LOG.debug("List count, %s Server Groups" % len(sgs))
return sgs
def delete(self):
client = self.client
sgs = self.list()
for sg in sgs:
try:
client.delete_server_group(sg['id'])
except Exception as e:
LOG.exception("Delete Server Group exception: %s" % e)
pass
def dry_run(self):
sgs = self.list()
self.data['server_groups'] = sgs
class StackService(BaseService):
def __init__(self, manager, **kwargs):
super(StackService, self).__init__(kwargs)
self.client = manager.orchestration_client
def list(self):
client = self.client
stacks = client.list_stacks()
LOG.debug("List count, %s Stacks" % len(stacks))
return stacks
def delete(self):
client = self.client
stacks = self.list()
for stack in stacks:
try:
client.delete_stack(stack['id'])
except Exception as e:
LOG.exception("Delete Stack exception: %s " % e)
pass
def dry_run(self):
stacks = self.list()
self.data['stacks'] = stacks
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
super(KeyPairService, self).__init__(kwargs)
self.client = manager.keypairs_client
def list(self):
client = self.client
keypairs = client.list_keypairs()
LOG.debug("List count, %s Keypairs" % len(keypairs))
return keypairs
def delete(self):
client = self.client
keypairs = self.list()
for k in keypairs:
try:
name = k['keypair']['name']
client.delete_keypair(name)
except Exception as e:
LOG.exception("Delete Keypairs exception: %s" % e)
pass
def dry_run(self):
keypairs = self.list()
self.data['keypairs'] = keypairs
class SecurityGroupService(BaseService):
def __init__(self, manager, **kwargs):
super(SecurityGroupService, self).__init__(kwargs)
self.client = manager.security_groups_client
def list(self):
client = self.client
secgrps = client.list_security_groups()
secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
LOG.debug("List count, %s Security Groups" % len(secgrp_del))
return secgrp_del
def delete(self):
client = self.client
secgrp_del = self.list()
for g in secgrp_del:
try:
client.delete_security_group(g['id'])
except Exception as e:
LOG.exception("Delete Security Groups exception: %s" % e)
def dry_run(self):
secgrp_del = self.list()
self.data['security_groups'] = secgrp_del
class FloatingIpService(BaseService):
def __init__(self, manager, **kwargs):
super(FloatingIpService, self).__init__(kwargs)
self.client = manager.floating_ips_client
def list(self):
client = self.client
floating_ips = client.list_floating_ips()
LOG.debug("List count, %s Floating IPs" % len(floating_ips))
return floating_ips
def delete(self):
client = self.client
floating_ips = self.list()
for f in floating_ips:
try:
client.delete_floating_ip(f['id'])
except Exception as e:
LOG.exception("Delete Floating IPs exception: %s" % e)
pass
def dry_run(self):
floating_ips = self.list()
self.data['floating_ips'] = floating_ips
class VolumeService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeService, self).__init__(kwargs)
self.client = manager.volumes_client
def list(self):
client = self.client
vols = client.list_volumes()
LOG.debug("List count, %s Volumes" % len(vols))
return vols
def delete(self):
client = self.client
vols = self.list()
for v in vols:
try:
client.delete_volume(v['id'])
except Exception as e:
LOG.exception("Delete Volume exception: %s" % e)
pass
def dry_run(self):
vols = self.list()
self.data['volumes'] = vols
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeQuotaService, self).__init__(kwargs)
self.client = manager.volume_quotas_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception as e:
LOG.exception("Delete Volume Quotas exception: %s" % e)
pass
def dry_run(self):
quotas = self.client.show_quota_usage(self.tenant_id)
self.data['volume_quotas'] = quotas
class NovaQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(NovaQuotaService, self).__init__(kwargs)
self.client = manager.quotas_client
self.limits_client = manager.limits_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception as e:
LOG.exception("Delete Quotas exception: %s" % e)
pass
def dry_run(self):
client = self.limits_client
quotas = client.get_absolute_limits()
self.data['compute_quotas'] = quotas
# Begin network service classes
class NetworkService(BaseService):
def __init__(self, manager, **kwargs):
super(NetworkService, self).__init__(kwargs)
self.client = manager.network_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
return item_list
return [item for item in item_list if item['network_id']
not in CONF_NETWORKS]
def list(self):
client = self.client
networks = client.list_networks()
networks = self._filter_by_tenant_id(networks['networks'])
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
if network['id'] not in CONF_NETWORKS]
LOG.debug("List count, %s Networks" % networks)
return networks
def delete(self):
client = self.client
networks = self.list()
for n in networks:
try:
client.delete_network(n['id'])
except Exception as e:
LOG.exception("Delete Network exception: %s" % e)
pass
def dry_run(self):
networks = self.list()
self.data['networks'] = networks
class NetworkIpSecPolicyService(NetworkService):
def list(self):
client = self.client
ipsecpols = client.list_ipsecpolicies()
ipsecpols = ipsecpols['ipsecpolicies']
ipsecpols = self._filter_by_tenant_id(ipsecpols)
LOG.debug("List count, %s IP Security Policies" % len(ipsecpols))
return ipsecpols
def delete(self):
client = self.client
ipsecpols = self.list()
for ipsecpol in ipsecpols:
try:
client.delete_ipsecpolicy(ipsecpol['id'])
except Exception as e:
LOG.exception("Delete IP Securty Policy exception: %s" % e)
pass
def dry_run(self):
ipsecpols = self.list()
self.data['ip_security_policies'] = ipsecpols
class NetworkFwPolicyService(NetworkService):
def list(self):
client = self.client
fwpols = client.list_firewall_policies()
fwpols = fwpols['firewall_policies']
fwpols = self._filter_by_tenant_id(fwpols)
LOG.debug("List count, %s Firewall Policies" % len(fwpols))
return fwpols
def delete(self):
client = self.client
fwpols = self.list()
for fwpol in fwpols:
try:
client.delete_firewall_policy(fwpol['id'])
except Exception as e:
LOG.exception("Delete Firewall Policy exception: %s" % e)
pass
def dry_run(self):
fwpols = self.list()
self.data['firewall_policies'] = fwpols
class NetworkFwRulesService(NetworkService):
def list(self):
client = self.client
fwrules = client.list_firewall_rules()
fwrules = fwrules['firewall_rules']
fwrules = self._filter_by_tenant_id(fwrules)
LOG.debug("List count, %s Firewall Rules" % len(fwrules))
return fwrules
def delete(self):
client = self.client
fwrules = self.list()
for fwrule in fwrules:
try:
client.delete_firewall_rule(fwrule['id'])
except Exception as e:
LOG.exception("Delete Firewall Rule exception: %s" % e)
pass
def dry_run(self):
fwrules = self.list()
self.data['firewall_rules'] = fwrules
class NetworkIkePolicyService(NetworkService):
def list(self):
client = self.client
ikepols = client.list_ikepolicies()
ikepols = ikepols['ikepolicies']
ikepols = self._filter_by_tenant_id(ikepols)
LOG.debug("List count, %s IKE Policies" % len(ikepols))
return ikepols
def delete(self):
client = self.client
ikepols = self.list()
for ikepol in ikepols:
try:
client.delete_firewall_rule(ikepol['id'])
except Exception as e:
LOG.exception("Delete IKE Policy exception: %s" % e)
pass
def dry_run(self):
ikepols = self.list()
self.data['ike_policies'] = ikepols
class NetworkVpnServiceService(NetworkService):
def list(self):
client = self.client
vpnsrvs = client.list_vpnservices()
vpnsrvs = vpnsrvs['vpnservices']
vpnsrvs = self._filter_by_tenant_id(vpnsrvs)
LOG.debug("List count, %s VPN Services" % len(vpnsrvs))
return vpnsrvs
def delete(self):
client = self.client
vpnsrvs = self.list()
for vpnsrv in vpnsrvs:
try:
client.delete_vpnservice(vpnsrv['id'])
except Exception as e:
LOG.exception("Delete VPN Service exception: %s" % e)
pass
def dry_run(self):
vpnsrvs = self.list()
self.data['vpn_services'] = vpnsrvs
class NetworkFloatingIpService(NetworkService):
def list(self):
client = self.client
flips = client.list_floatingips()
flips = flips['floatingips']
flips = self._filter_by_tenant_id(flips)
LOG.debug("List count, %s Network Floating IPs" % len(flips))
return flips
def delete(self):
client = self.client
flips = self.list()
for flip in flips:
try:
client.delete_floatingip(flip['id'])
except Exception as e:
LOG.exception("Delete Network Floating IP exception: %s" % e)
pass
def dry_run(self):
flips = self.list()
self.data['floating_ips'] = flips
class NetworkRouterService(NetworkService):
def list(self):
client = self.client
routers = client.list_routers()
routers = routers['routers']
routers = self._filter_by_tenant_id(routers)
if self.is_preserve:
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
LOG.debug("List count, %s Routers" % len(routers))
return routers
def delete(self):
client = self.client
routers = self.list()
for router in routers:
try:
rid = router['id']
ports = client.list_router_interfaces(rid)
ports = ports['ports']
for port in ports:
subid = port['fixed_ips'][0]['subnet_id']
client.remove_router_interface_with_subnet_id(rid, subid)
client.delete_router(rid)
except Exception as e:
LOG.exception("Delete Router exception: %s" % e)
pass
def dry_run(self):
routers = self.list()
self.data['routers'] = routers
class NetworkHealthMonitorService(NetworkService):
def list(self):
client = self.client
hms = client.list_health_monitors()
hms = hms['health_monitors']
hms = self._filter_by_tenant_id(hms)
LOG.debug("List count, %s Health Monitors" % len(hms))
return hms
def delete(self):
client = self.client
hms = self.list()
for hm in hms:
try:
client.delete_health_monitor(hm['id'])
except Exception as e:
LOG.exception("Delete Health Monitor exception: %s" % e)
pass
def dry_run(self):
hms = self.list()
self.data['health_monitors'] = hms
class NetworkMemberService(NetworkService):
def list(self):
client = self.client
members = client.list_members()
members = members['members']
members = self._filter_by_tenant_id(members)
LOG.debug("List count, %s Members" % len(members))
return members
def delete(self):
client = self.client
members = self.list()
for member in members:
try:
client.delete_member(member['id'])
except Exception as e:
LOG.exception("Delete Member exception: %s" % e)
pass
def dry_run(self):
members = self.list()
self.data['members'] = members
class NetworkVipService(NetworkService):
def list(self):
client = self.client
vips = client.list_vips()
vips = vips['vips']
vips = self._filter_by_tenant_id(vips)
LOG.debug("List count, %s VIPs" % len(vips))
return vips
def delete(self):
client = self.client
vips = self.list()
for vip in vips:
try:
client.delete_vip(vip['id'])
except Exception as e:
LOG.exception("Delete VIP exception: %s" % e)
pass
def dry_run(self):
vips = self.list()
self.data['vips'] = vips
class NetworkPoolService(NetworkService):
def list(self):
client = self.client
pools = client.list_pools()
pools = pools['pools']
pools = self._filter_by_tenant_id(pools)
LOG.debug("List count, %s Pools" % len(pools))
return pools
def delete(self):
client = self.client
pools = self.list()
for pool in pools:
try:
client.delete_pool(pool['id'])
except Exception as e:
LOG.exception("Delete Pool exception: %s" % e)
pass
def dry_run(self):
pools = self.list()
self.data['pools'] = pools
class NetworkMeteringLabelRuleService(NetworkService):
def list(self):
client = self.client
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
LOG.debug("List count, %s Metering Label Rules" % len(rules))
return rules
def delete(self):
client = self.client
rules = self.list()
for rule in rules:
try:
client.delete_metering_label_rule(rule['id'])
except Exception as e:
LOG.exception("Delete Metering Label Rule exception: %s" % e)
pass
def dry_run(self):
rules = self.list()
self.data['rules'] = rules
class NetworkMeteringLabelService(NetworkService):
def list(self):
client = self.client
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
LOG.debug("List count, %s Metering Labels" % len(labels))
return labels
def delete(self):
client = self.client
labels = self.list()
for label in labels:
try:
client.delete_metering_label(label['id'])
except Exception as e:
LOG.exception("Delete Metering Label exception: %s" % e)
pass
def dry_run(self):
labels = self.list()
self.data['labels'] = labels
class NetworkPortService(NetworkService):
def list(self):
client = self.client
ports = client.list_ports()
ports = ports['ports']
ports = self._filter_by_tenant_id(ports)
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
LOG.debug("List count, %s Ports" % len(ports))
return ports
def delete(self):
client = self.client
ports = self.list()
for port in ports:
try:
client.delete_port(port['id'])
except Exception as e:
LOG.exception("Delete Port exception: %s" % e)
pass
def dry_run(self):
ports = self.list()
self.data['ports'] = ports
class NetworkSubnetService(NetworkService):
def list(self):
client = self.client
subnets = client.list_subnets()
subnets = subnets['subnets']
subnets = self._filter_by_tenant_id(subnets)
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets" % len(subnets))
return subnets
def delete(self):
client = self.client
subnets = self.list()
for subnet in subnets:
try:
client.delete_subnet(subnet['id'])
except Exception as e:
LOG.exception("Delete Subnet exception: %s" % e)
pass
def dry_run(self):
subnets = self.list()
self.data['subnets'] = subnets
# Telemetry services
class TelemetryAlarmService(BaseService):
def __init__(self, manager, **kwargs):
super(TelemetryAlarmService, self).__init__(kwargs)
self.client = manager.telemetry_client
def list(self):
client = self.client
alarms = client.list_alarms()
LOG.debug("List count, %s Alarms" % len(alarms))
return alarms
def delete(self):
client = self.client
alarms = self.list()
for alarm in alarms:
try:
client.delete_alarm(alarm['id'])
except Exception as e:
LOG.exception("Delete Alarms exception: %s" % e)
pass
def dry_run(self):
alarms = self.list()
self.data['alarms'] = alarms
# begin global services
class FlavorService(BaseService):
def __init__(self, manager, **kwargs):
super(FlavorService, self).__init__(kwargs)
self.client = manager.flavors_client
def list(self):
client = self.client
flavors = client.list_flavors({"is_public": None})
if not self.is_save_state:
# recreate list removing saved flavors
flavors = [flavor for flavor in flavors if flavor['id']
not in self.saved_state_json['flavors'].keys()]
if self.is_preserve:
flavors = [flavor for flavor in flavors
if flavor['id'] not in CONF_FLAVORS]
LOG.debug("List count, %s Flavors after reconcile" % len(flavors))
return flavors
def delete(self):
client = self.client
flavors = self.list()
for flavor in flavors:
try:
client.delete_flavor(flavor['id'])
except Exception as e:
LOG.exception("Delete Flavor exception: %s" % e)
pass
def dry_run(self):
flavors = self.list()
self.data['flavors'] = flavors
def save_state(self):
flavors = self.list()
self.data['flavors'] = {}
for flavor in flavors:
self.data['flavors'][flavor['id']] = flavor['name']
class ImageService(BaseService):
def __init__(self, manager, **kwargs):
super(ImageService, self).__init__(kwargs)
self.client = manager.images_client
def list(self):
client = self.client
images = client.list_images({"all_tenants": True})
if not self.is_save_state:
images = [image for image in images if image['id']
not in self.saved_state_json['images'].keys()]
if self.is_preserve:
images = [image for image in images
if image['id'] not in CONF_IMAGES]
LOG.debug("List count, %s Images after reconcile" % len(images))
return images
def delete(self):
client = self.client
images = self.list()
for image in images:
try:
client.delete_image(image['id'])
except Exception as e:
LOG.exception("Delete Image exception: %s" % e)
pass
def dry_run(self):
images = self.list()
self.data['images'] = images
def save_state(self):
images = self.list()
self.data['images'] = {}
for image in images:
self.data['images'][image['id']] = image['name']
class IdentityService(BaseService):
def __init__(self, manager, **kwargs):
super(IdentityService, self).__init__(kwargs)
self.client = manager.identity_client
class UserService(IdentityService):
def list(self):
client = self.client
users = client.get_users()
if not self.is_save_state:
users = [user for user in users if user['id']
not in self.saved_state_json['users'].keys()]
if self.is_preserve:
users = [user for user in users if user['name']
not in CONF_USERS]
elif not self.is_save_state: # Never delete admin user
users = [user for user in users if user['name'] !=
CONF.identity.admin_username]
LOG.debug("List count, %s Users after reconcile" % len(users))
return users
def delete(self):
client = self.client
users = self.list()
for user in users:
try:
client.delete_user(user['id'])
except Exception as e:
LOG.exception("Delete User exception: %s" % e)
pass
def dry_run(self):
users = self.list()
self.data['users'] = users
def save_state(self):
users = self.list()
self.data['users'] = {}
for user in users:
self.data['users'][user['id']] = user['name']
class RoleService(IdentityService):
def list(self):
client = self.client
try:
roles = client.list_roles()
# reconcile roles with saved state and never list admin role
if not self.is_save_state:
roles = [role for role in roles if
(role['id'] not in
self.saved_state_json['roles'].keys()
and role['name'] != CONF.identity.admin_role)]
LOG.debug("List count, %s Roles after reconcile" % len(roles))
return roles
except Exception as ex:
LOG.exception("Cannot retrieve Roles, exception: %s" % ex)
return []
def delete(self):
client = self.client
roles = self.list()
for role in roles:
try:
client.delete_role(role['id'])
except Exception as e:
LOG.exception("Delete Role exception: %s" % e)
pass
def dry_run(self):
roles = self.list()
self.data['roles'] = roles
def save_state(self):
roles = self.list()
self.data['roles'] = {}
for role in roles:
self.data['roles'][role['id']] = role['name']
class TenantService(IdentityService):
def list(self):
client = self.client
tenants = client.list_tenants()
if not self.is_save_state:
tenants = [tenant for tenant in tenants if (tenant['id']
not in self.saved_state_json['tenants'].keys()
and tenant['name'] != CONF.identity.admin_tenant_name)]
if self.is_preserve:
tenants = [tenant for tenant in tenants if tenant['name']
not in CONF_TENANTS]
LOG.debug("List count, %s Tenants after reconcile" % len(tenants))
return tenants
def delete(self):
client = self.client
tenants = self.list()
for tenant in tenants:
try:
client.delete_tenant(tenant['id'])
except Exception as e:
LOG.exception("Delete Tenant exception: %s" % e)
pass
def dry_run(self):
tenants = self.list()
self.data['tenants'] = tenants
def save_state(self):
tenants = self.list()
self.data['tenants'] = {}
for tenant in tenants:
self.data['tenants'][tenant['id']] = tenant['name']
class DomainService(BaseService):
def __init__(self, manager, **kwargs):
super(DomainService, self).__init__(kwargs)
self.client = manager.identity_v3_client
def list(self):
client = self.client
domains = client.list_domains()
if not self.is_save_state:
domains = [domain for domain in domains if domain['id']
not in self.saved_state_json['domains'].keys()]
LOG.debug("List count, %s Domains after reconcile" % len(domains))
return domains
def delete(self):
client = self.client
domains = self.list()
for domain in domains:
try:
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
except Exception as e:
LOG.exception("Delete Domain exception: %s" % e)
pass
def dry_run(self):
domains = self.list()
self.data['domains'] = domains
def save_state(self):
domains = self.list()
self.data['domains'] = {}
for domain in domains:
self.data['domains'][domain['id']] = domain['name']
def get_tenant_cleanup_services():
tenant_services = []
if IS_CEILOMETER:
tenant_services.append(TelemetryAlarmService)
if IS_NOVA:
tenant_services.append(ServerService)
tenant_services.append(KeyPairService)
tenant_services.append(SecurityGroupService)
tenant_services.append(ServerGroupService)
if not IS_NEUTRON:
tenant_services.append(FloatingIpService)
tenant_services.append(NovaQuotaService)
if IS_HEAT:
tenant_services.append(StackService)
if IS_NEUTRON:
if test.is_extension_enabled('vpnaas', 'network'):
tenant_services.append(NetworkIpSecPolicyService)
tenant_services.append(NetworkIkePolicyService)
tenant_services.append(NetworkVpnServiceService)
if test.is_extension_enabled('fwaas', 'network'):
tenant_services.append(NetworkFwPolicyService)
tenant_services.append(NetworkFwRulesService)
if test.is_extension_enabled('lbaas', 'network'):
tenant_services.append(NetworkHealthMonitorService)
tenant_services.append(NetworkMemberService)
tenant_services.append(NetworkVipService)
tenant_services.append(NetworkPoolService)
if test.is_extension_enabled('metering', 'network'):
tenant_services.append(NetworkMeteringLabelRuleService)
tenant_services.append(NetworkMeteringLabelService)
tenant_services.append(NetworkRouterService)
tenant_services.append(NetworkFloatingIpService)
tenant_services.append(NetworkPortService)
tenant_services.append(NetworkSubnetService)
tenant_services.append(NetworkService)
if IS_CINDER:
tenant_services.append(SnapshotService)
tenant_services.append(VolumeService)
tenant_services.append(VolumeQuotaService)
return tenant_services
def get_global_cleanup_services():
global_services = []
if IS_NOVA:
global_services.append(FlavorService)
if IS_GLANCE:
global_services.append(ImageService)
global_services.append(UserService)
global_services.append(TenantService)
global_services.append(DomainService)
global_services.append(RoleService)
return global_services
|
{
"content_hash": "b907b4654316a1b1b3c7bab937eb32d9",
"timestamp": "",
"source": "github",
"line_count": 1118,
"max_line_length": 78,
"avg_line_length": 30.292486583184257,
"alnum_prop": 0.571411698703753,
"repo_name": "eggmaster/tempest",
"id": "eb6f1437ddcb392dfb15343971e86ad4512032b3",
"size": "34492",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/cmd/cleanup_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2724691"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
import subprocess
subprocess.call(['pwd'])
|
{
"content_hash": "f34007fbaf07948ad7374f20eb839667",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 24,
"avg_line_length": 21.5,
"alnum_prop": 0.7674418604651163,
"repo_name": "Knln/COMP2041",
"id": "ec34dca28d488c1da41a887c444834b6f4d9c580",
"size": "67",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pwd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "496"
},
{
"name": "Perl",
"bytes": "19980"
},
{
"name": "Python",
"bytes": "4083"
},
{
"name": "Shell",
"bytes": "6721"
}
],
"symlink_target": ""
}
|
from datapackage_pipelines.wrapper import process
import datetime, logging
kns_knessetdates = []
def process_row(row, row_index, spec, resource_index, parameters, stats):
if spec['name'] == 'kns_knessetdates':
kns_knessetdates.append((row['KnessetNum'], row['Assembly'], row['Plenum'],
row['PlenumStart'], row['PlenumFinish']))
elif spec['name'] == parameters['resource']:
if row_index == 0:
for i, v in enumerate(sorted(kns_knessetdates, key=lambda k: k[3])):
kns_knessetdates[i] = v
event_date = parameters.get('event-date')
event_time = parameters.get('event-time')
event_datetime = parameters.get('event-datetime')
try:
if event_date and event_time:
event_date = row[event_date]
if isinstance(event_date, datetime.datetime):
event_date = event_date.date()
event_datetime = '{} {}'.format(event_date,
row[event_time] if row[event_time] else '00:00')
event_datetime = datetime.datetime.strptime(event_datetime, "%Y-%m-%d %H:%M")
elif event_datetime:
event_datetime = datetime.datetime.strptime(row[event_datetime], "%Y-%m-%d %H:%M:%S")
assert event_datetime
except Exception:
logging.info(spec)
logging.info(row)
raise
knesset_field = parameters.get('knesset', 'knesset')
plenum_field = parameters.get('plenum', 'plenum')
assembly_field = parameters.get('assembly', 'assembly')
pagra_field = parameters.get('pagra', 'pagra')
last_knesset, last_assembly, last_plenum = None, None, None
updated = False
for knesset, assembly, plenum, plenum_start, plenum_finish in kns_knessetdates:
if event_datetime < plenum_start:
updated = True
if not pagra_field:
if plenum_field:
row[plenum_field] = plenum
if assembly_field:
row[assembly_field] = assembly
row[knesset_field] = knesset
else:
if plenum_field:
row[plenum_field] = last_plenum
if assembly_field:
row[assembly_field] = last_assembly
row.update(**{knesset_field: last_knesset,
pagra_field: True})
break
elif not plenum_finish or event_datetime <= plenum_finish:
updated = True
if assembly_field:
row[assembly_field] = assembly
if plenum_field:
row[plenum_field] = plenum
row[knesset_field] = knesset
if pagra_field:
row[pagra_field] = False
break
last_knesset, last_assembly, last_plenum = knesset, assembly, plenum
if not updated:
logging.warning('failed to update plenum/assembly for event_datetime: {}'.format(event_datetime))
if assembly_field:
row[assembly_field] = ''
if plenum_field:
row[plenum_field] = ''
if pagra_field:
row[pagra_field] = ''
return row
def modify_datapackage(datapackage, parameters, stats):
for resource in datapackage['resources']:
if resource['name'] == parameters['resource']:
knesset_field = parameters.get('knesset', 'knesset')
plenum_field = parameters.get('plenum', 'plenum')
assembly_field = parameters.get('assembly', 'assembly')
pagra_field = parameters.get('pagra', 'pagra')
existing_fields = {field['name']: field for field in resource['schema']['fields']}
for new_field_name in (knesset_field, plenum_field, assembly_field, pagra_field):
if not new_field_name:
continue
new_field_type = 'boolean' if new_field_name == pagra_field else 'integer'
if new_field_name in existing_fields:
existing_fields[new_field_name]['type'] = new_field_type
else:
resource['schema']['fields'].append({'name': new_field_name,
'type': new_field_type})
return datapackage
if __name__ == '__main__':
process(modify_datapackage, process_row)
|
{
"content_hash": "4694575fde0634a66ba41d5459e44467",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 109,
"avg_line_length": 46.06,
"alnum_prop": 0.531480677377334,
"repo_name": "hasadna/knesset-data-pipelines",
"id": "fbda6a4462eb04f2b62a3f68e2ed71cabbcb6167",
"size": "4606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datapackage_pipelines_knesset/processors/add_event_plenum_assembly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1469"
},
{
"name": "Jupyter Notebook",
"bytes": "4163927"
},
{
"name": "Python",
"bytes": "294483"
},
{
"name": "Shell",
"bytes": "1601"
}
],
"symlink_target": ""
}
|
from ..registry_tools import iso_register
from .core import UnitedStates
@iso_register('US-NH')
class NewHampshire(UnitedStates):
"""New Hampshire"""
include_thanksgiving_friday = True
martin_luther_king_label = "Martin Luther King, Jr. Civil Rights Day"
|
{
"content_hash": "d0453270732787ad4318faff8f01b974",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.7360594795539034,
"repo_name": "novapost/workalendar",
"id": "51739ad9daaae38ec70ec38ceb29935897d8019e",
"size": "269",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "workalendar/usa/new_hampshire.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "268634"
}
],
"symlink_target": ""
}
|
from muntjac.api import \
(VerticalLayout, Form, HorizontalLayout, Button, Alignment,
ComboBox, PasswordField)
from muntjac.ui.themes import BaseTheme
from muntjac.ui import button
from muntjac.data.validator import IValidator, InvalidValueException
from muntjac.demo.sampler.ExampleUtil import ExampleUtil
from muntjac.ui.window import Notification
from muntjac.ui.default_field_factory import DefaultFieldFactory
from muntjac.data.validators.string_length_validator import \
StringLengthValidator
class FormPojoExample(VerticalLayout):
_COMMON_FIELD_WIDTH = '12em'
def __init__(self):
super(FormPojoExample, self).__init__()
self._person = Person() # a person POJO
personItem = BeanItem(self._person) # item from POJO
# Create the Form
personForm = Form()
personForm.setCaption('Personal details')
personForm.setWriteThrough(False) # we want explicit 'apply'
personForm.setInvalidCommitted(False) # no invalid values in datamodel
# FieldFactory for customizing the fields and adding validators
personForm.setFormFieldFactory(PersonFieldFactory(self))
personForm.setItemDataSource(personItem) # bind to POJO via BeanItem
# Determines which properties are shown, and in which order:
personForm.setVisibleItemProperties(['firstName', 'lastName',
'countryCode', 'password', 'birthdate', 'shoesize', 'uuid'])
# Add form to layout
self.addComponent(personForm)
# The cancel / apply buttons
buttons = HorizontalLayout()
buttons.setSpacing(True)
discardChanges = Button('Discard changes', DiscardListener(personForm))
discardChanges.setStyleName(BaseTheme.BUTTON_LINK)
buttons.addComponent(discardChanges)
buttons.setComponentAlignment(discardChanges, Alignment.MIDDLE_LEFT)
aply = Button('Apply', ApplyListener(personForm))
buttons.addComponent(aply)
personForm.getFooter().addComponent(buttons)
personForm.getFooter().setMargin(False, False, True, True)
# button for showing the internal state of the POJO
l = InternalStateListener(self)
showPojoState = Button('Show POJO internal state', l)
self.addComponent(showPojoState)
def showPojoState(self):
n = Notification('POJO state', Notification.TYPE_TRAY_NOTIFICATION)
n.setPosition(Notification.POSITION_CENTERED)
n.setDescription('First name: ' + self._person.getFirstName()
+ '<br/>Last name: ' + self._person.getLastName()
+ '<br/>Country: ' + self._person.getCountryCode()
+ '<br/>Birthdate: ' + self._person.getBirthdate()
+ '<br/>Shoe size: ' + self._person.getShoesize()
+ '<br/>Password: ' + self._person.getPassword()
+ '<br/>UUID: ' + self._person.getUuid())
self.getWindow().showNotification(n)
class DiscardListener(button.IClickListener):
def __init__(self, personForm):
self._personForm = personForm
def buttonClick(self, event):
self._personForm.discard()
class ApplyListener(button.IClickListener):
def __init__(self, personForm):
self._personForm = personForm
def buttonClick(self, event):
try:
self.personForm.commit()
except Exception:
pass # Ignored, we'll let the Form handle the errors
class InternalStateListener(button.IClickListener):
def __init__(self, c):
self._c = c
def buttonClick(self, event):
self._c.showPojoState()
class PersonFieldFactory(DefaultFieldFactory):
def __init__(self, c):
self._c = c
super(PersonFieldFactory, self).__init__()
self.countries = ComboBox('Country')
self.countries.setWidth(self._c._COMMON_FIELD_WIDTH)
self.countries.setContainerDataSource(
ExampleUtil.getISO3166Container())
self.countries.setItemCaptionPropertyId(
ExampleUtil.iso3166_PROPERTY_NAME)
self.countries.setItemIconPropertyId(
ExampleUtil.iso3166_PROPERTY_FLAG)
self.countries.setFilteringMode(ComboBox.FILTERINGMODE_STARTSWITH)
def createField(self, item, propertyId, uiContext):
if 'countryCode' == propertyId:
# filtering ComboBox w/ country names
return self.countries
elif 'password' == propertyId:
# Create a password field so the password is not shown
f = self.createPasswordField(propertyId)
else:
# Use the super class to create a suitable field base on the
# property type.
f = super(PersonFieldFactory, self).createField(item,
propertyId, uiContext)
if 'firstName' == propertyId:
tf = f
tf.setRequired(True)
tf.setRequiredError('Please enter a First Name')
tf.setWidth(self._c._COMMON_FIELD_WIDTH)
tf.addValidator(StringLengthValidator(
'First Name must be 3-25 characters', 3, 25, False))
elif 'lastName' == propertyId:
tf = f
tf.setRequired(True)
tf.setRequiredError('Please enter a Last Name')
tf.setWidth(self._c._COMMON_FIELD_WIDTH)
tf.addValidator(StringLengthValidator(
'Last Name must be 3-50 characters', 3, 50, False))
elif 'password' == propertyId:
pf = f
pf.setRequired(True)
pf.setRequiredError('Please enter a password')
pf.setWidth('10em')
pf.addValidator(StringLengthValidator(
'Password must be 6-20 characters', 6, 20, False))
elif 'shoesize' == propertyId:
tf = f
tf.setNullRepresentation('')
tf.setNullSettingAllowed(True)
tf.addValidator(IntegerValidator('Shoe size must be an Integer'))
tf.setWidth('2em')
elif 'uuid' == propertyId:
tf = f
tf.setWidth('20em')
return f
def createPasswordField(self, propertyId):
pf = PasswordField()
pf.setCaption(DefaultFieldFactory.createCaptionByPropertyId(propertyId))
return pf
class Person(object):
def __init__(self):
self._uuid = '3856c3da-ea56-4717-9f58-85f6c5f560a5'
self._firstName = ''
self._lastName = ''
self._birthdate = None
self._shoesize = 42
self._password = ''
self._countryCode = ''
def getFirstName(self):
return self._firstName
def setFirstName(self, firstName):
self._firstName = firstName
def getLastName(self):
return self._lastName
def setLastName(self, lastName):
self._lastName = lastName
def getBirthdate(self):
return self._birthdate
def setBirthdate(self, birthdate):
self._birthdate = birthdate
def getShoesize(self):
return self._shoesize
def setShoesize(self, shoesize):
self._shoesize = shoesize
def getPassword(self):
return self._password
def setPassword(self, password):
self._password = password
def getUuid(self):
return self._uuid
def getCountryCode(self):
return self._countryCode
def setCountryCode(self, countryCode):
self._countryCode = countryCode
class IntegerValidator(IValidator):
def __init__(self, message):
self._message = message
def isValid(self, value):
if (value is None) or (not isinstance(value, str)):
return False
try:
int(value)
except Exception:
return False
return True
def validate(self, value):
if not self.isValid(value):
raise InvalidValueException(self._message)
|
{
"content_hash": "5564fa6b4026a76fcea49e3387f81638",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 32.207317073170735,
"alnum_prop": 0.6290546510160293,
"repo_name": "rwl/muntjac",
"id": "37749fb621015e9b6c9806b9870966fedd09574f",
"size": "7924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muntjac/demo/sampler/features/form/FormPojoExample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8602"
},
{
"name": "Java",
"bytes": "2243"
},
{
"name": "JavaScript",
"bytes": "32438"
},
{
"name": "Python",
"bytes": "3212361"
}
],
"symlink_target": ""
}
|
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = eval(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
|
{
"content_hash": "45f4c5c76ed61b5dda357ed9ba5bc02b",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 74,
"avg_line_length": 25.076923076923077,
"alnum_prop": 0.5674846625766872,
"repo_name": "neerajvashistha/pa-dude",
"id": "adba3e9e9adf9430138ef3910a702d00a7f1375c",
"size": "1809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/thresholder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "359307"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "114504"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "216904"
},
{
"name": "JavaScript",
"bytes": "1323680"
},
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "31341230"
},
{
"name": "Self",
"bytes": "40307"
},
{
"name": "Shell",
"bytes": "5427"
},
{
"name": "TeX",
"bytes": "96790"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
"""Tests for the toolchain sub-system"""
import sys
import os
from string import printable
from copy import deepcopy
from mock import MagicMock, patch
from hypothesis import given, settings
from hypothesis.strategies import text, lists, fixed_dictionaries, booleans
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..",
".."))
sys.path.insert(0, ROOT)
from tools.toolchains import TOOLCHAIN_CLASSES, LEGACY_TOOLCHAIN_NAMES,\
Resources, TOOLCHAIN_PATHS
from tools.targets import TARGET_MAP
def test_instantiation():
"""Test that all exported toolchain may be instantiated"""
for name, tc_class in TOOLCHAIN_CLASSES.items():
cls = tc_class(TARGET_MAP["K64F"])
assert name == cls.name or\
name == LEGACY_TOOLCHAIN_NAMES[cls.name]
ALPHABET = [char for char in printable if char not in [u'.', u'/']]
@given(fixed_dictionaries({
'common': lists(text()),
'c': lists(text()),
'cxx': lists(text()),
'asm': lists(text()),
'ld': lists(text())}),
lists(text(min_size=1, alphabet=ALPHABET), min_size=1))
def test_toolchain_profile_c(profile, source_file):
"""Test that the appropriate profile parameters are passed to the
C compiler"""
filename = deepcopy(source_file)
filename[-1] += ".c"
to_compile = os.path.join(*filename)
with patch('os.mkdir') as _mkdir:
for _, tc_class in TOOLCHAIN_CLASSES.items():
toolchain = tc_class(TARGET_MAP["K64F"], build_profile=profile)
toolchain.inc_md5 = ""
toolchain.build_dir = ""
compile_command = toolchain.compile_command(to_compile,
to_compile + ".o", [])
for parameter in profile['c'] + profile['common']:
assert any(parameter in cmd for cmd in compile_command), \
"Toolchain %s did not propigate arg %s" % (toolchain.name,
parameter)
@given(fixed_dictionaries({
'common': lists(text()),
'c': lists(text()),
'cxx': lists(text()),
'asm': lists(text()),
'ld': lists(text())}),
lists(text(min_size=1, alphabet=ALPHABET), min_size=1))
def test_toolchain_profile_cpp(profile, source_file):
"""Test that the appropriate profile parameters are passed to the
C++ compiler"""
filename = deepcopy(source_file)
filename[-1] += ".cpp"
to_compile = os.path.join(*filename)
with patch('os.mkdir') as _mkdir:
for _, tc_class in TOOLCHAIN_CLASSES.items():
toolchain = tc_class(TARGET_MAP["K64F"], build_profile=profile)
toolchain.inc_md5 = ""
toolchain.build_dir = ""
compile_command = toolchain.compile_command(to_compile,
to_compile + ".o", [])
for parameter in profile['cxx'] + profile['common']:
assert any(parameter in cmd for cmd in compile_command), \
"Toolchain %s did not propigate arg %s" % (toolchain.name,
parameter)
@given(fixed_dictionaries({
'common': lists(text()),
'c': lists(text()),
'cxx': lists(text()),
'asm': lists(text()),
'ld': lists(text())}),
lists(text(min_size=1, alphabet=ALPHABET), min_size=1))
def test_toolchain_profile_asm(profile, source_file):
"""Test that the appropriate profile parameters are passed to the
Assembler"""
filename = deepcopy(source_file)
filename[-1] += ".s"
to_compile = os.path.join(*filename)
with patch('os.mkdir') as _mkdir:
for _, tc_class in TOOLCHAIN_CLASSES.items():
toolchain = tc_class(TARGET_MAP["K64F"], build_profile=profile)
toolchain.inc_md5 = ""
toolchain.build_dir = ""
compile_command = toolchain.compile_command(to_compile,
to_compile + ".o", [])
if not compile_command:
assert compile_command, to_compile
for parameter in profile['asm']:
assert any(parameter in cmd for cmd in compile_command), \
"Toolchain %s did not propigate arg %s" % (toolchain.name,
parameter)
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(TARGET_MAP["K64F"])
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
@given(lists(text(alphabet=ALPHABET, min_size=1), min_size=1))
def test_detect_duplicates(filenames):
c_sources = [os.path.join(name, "dupe.c") for name in filenames]
s_sources = [os.path.join(name, "dupe.s") for name in filenames]
cpp_sources = [os.path.join(name, "dupe.cpp") for name in filenames]
with MagicMock() as notify:
toolchain = TOOLCHAIN_CLASSES["ARM"](TARGET_MAP["K64F"], notify=notify)
res = Resources()
res.c_sources = c_sources
res.s_sources = s_sources
res.cpp_sources = cpp_sources
assert res.detect_duplicates(toolchain) == 1,\
"Not Enough duplicates found"
_, (notification, _), _ = notify.mock_calls[1]
assert "dupe.o" in notification["message"]
assert "dupe.s" in notification["message"]
assert "dupe.c" in notification["message"]
assert "dupe.cpp" in notification["message"]
@given(text(alphabet=ALPHABET + ["/"], min_size=1))
@given(booleans())
@given(booleans())
@settings(max_examples=20)
def test_path_specified_gcc(gcc_loc, exists_at_loc, exists_in_path):
with patch('tools.toolchains.gcc.exists') as _exists:
with patch('tools.toolchains.gcc.find_executable') as _find:
_exists.return_value = exists_at_loc
_find.return_value = exists_in_path
TOOLCHAIN_PATHS['GCC_ARM'] = gcc_loc
toolchain_class = TOOLCHAIN_CLASSES["GCC_ARM"]
found_p = toolchain_class.check_executable()
assert found_p == (exists_at_loc or exists_in_path)
if exists_at_loc:
assert TOOLCHAIN_PATHS['GCC_ARM'] == gcc_loc
elif exists_in_path:
assert TOOLCHAIN_PATHS['GCC_ARM'] == ''
|
{
"content_hash": "9dd1da189ddd5bd5007aabe18abac9e7",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 43.50344827586207,
"alnum_prop": 0.5829105897273303,
"repo_name": "fanghuaqi/mbed",
"id": "01a684fae8aa883256c087c3ed0e278f440851ed",
"size": "6308",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tools/test/toolchains/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5511173"
},
{
"name": "C",
"bytes": "155789688"
},
{
"name": "C++",
"bytes": "7813784"
},
{
"name": "CMake",
"bytes": "27635"
},
{
"name": "HTML",
"bytes": "1531100"
},
{
"name": "Makefile",
"bytes": "131050"
},
{
"name": "Objective-C",
"bytes": "169382"
},
{
"name": "Python",
"bytes": "18259"
},
{
"name": "Shell",
"bytes": "24790"
},
{
"name": "XSLT",
"bytes": "11192"
}
],
"symlink_target": ""
}
|
'''
Conversion of basis sets to deMon2K format
'''
from .. import lut, manip, sort, misc, printing
def write_demon2k(basis):
'''Converts a basis set to deMon2K format
'''
if 'gto_spherical' in basis['function_types']:
s = '# This basis set uses spherical components\n\n'
else:
s = '# This basis set uses cartesian components\n\n'
basis = manip.uncontract_spdf(basis, 0, True)
basis = manip.uncontract_general(basis, False)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if electron_elements:
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, True)
elname = lut.element_name_from_Z(z).upper()
cont_string = misc.contraction_string(data)
# Need the start of electron shells if there are ECPs
ecp_electrons = data.get('ecp_electrons', 0)
shells_start = lut.electron_shells_start(ecp_electrons)
shells_start = list(shells_start)
s += 'O-{} {} ({})\n'.format(elname, sym.upper(), basis['name'])
s += '# {}\n'.format(cont_string)
nshells = len(data['electron_shells'])
s += ' {}\n'.format(nshells)
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
# We removed spdf already
assert len(shell['angular_momentum']) == 1
am = shell['angular_momentum'][0]
# shells_start has starting principal quantum numbers for all AM
pqn = shells_start[am]
shells_start[am] += 1
s += ' {} {} {}\n'.format(pqn, am, nprim)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=False)
# Write out ECP
if ecp_elements:
s += '\n\nECP\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, normalize=True)
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += '{} nelec {}\n'.format(sym, data['ecp_electrons'])
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).upper()
if am[0] == max_ecp_am:
s += '{} ul\n'.format(sym)
else:
s += '{} {}\n'.format(sym, amchar)
point_places = [0, 9, 32]
s += printing.write_matrix([rexponents, gexponents, *coefficients], point_places, convert_exp=False)
s += 'END\n'
return s
|
{
"content_hash": "e536dced2ab34c90570b22ccd844900a",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 116,
"avg_line_length": 36.854166666666664,
"alnum_prop": 0.5398530243075184,
"repo_name": "MOLSSI-BSE/basis_set_exchange",
"id": "0d5e485bb9c75b9b0973f15e447f0e1a2428fbf4",
"size": "3538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basis_set_exchange/writers/demon2k.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "513196"
}
],
"symlink_target": ""
}
|
"""
remove everthing top of the first msgid line (PIs destroyed by gettext),
prepend some processing instructions to a .po file to be able to put it
onto moinmaster wiki, letting it get processed by gettext parser
"""
master_url = "http://master19.moinmo.in/?action=xmlrpc2"
user = "ThomasWaldmann" # must be a known Wiki account
import sys, os
import xmlrpclib
password = os.environ.get("PASS", "")
sys.path.insert(0, '../..')
def run():
excluded = ["en", ] # languages managed in repository, not in wiki
lang = sys.argv[1]
data = sys.stdin.read()
if lang in excluded:
f = open("%s.MoinMoin.po" % lang, "w")
f.write(data)
f.close()
sys.exit(0)
data = data.decode('utf-8')
cutpos = data.index(u"msgid")
data = data[cutpos:] # remove comments at top
data = u"""\
## Please edit system and help pages ONLY in the master wiki!
## For more information, please see MoinMoin:MoinDev/Translation.
##master-page:None
##master-date:None
#acl -All:write Default
#format gettext
#language %s
#
# MoinMoin %s system text translation
#
%s""" % (lang, lang, data)
pagename = "MoinI18n/%s" % lang
pagedata = data.encode('utf-8')
wiki = xmlrpclib.ServerProxy(master_url)
token = wiki.getAuthToken(user, password)
mc = xmlrpclib.MultiCall(wiki)
mc.applyAuthToken(token)
mc.WhoAmI() # then we see in the result if auth worked correctly!
mc.putPage(pagename, pagedata)
mc.deleteAuthToken(token)
result = mc()
print "Page: %s rc=%r" % (pagename, list(result))
if __name__ == "__main__":
run()
|
{
"content_hash": "24c5c4e07d249429186463c028b139cf",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 26.203125,
"alnum_prop": 0.6249254621347644,
"repo_name": "Glottotopia/aagd",
"id": "fdf16d999707612ba13e4127bd0e2e7327a56a4a",
"size": "1696",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/i18n/tools/po2wiki.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
}
|
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import zutonggu213
class TestZutonggu213Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "493f938bd99b77a3dafcd58873a8ef86",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 50,
"avg_line_length": 13.45,
"alnum_prop": 0.6356877323420075,
"repo_name": "sinotradition/meridian",
"id": "8b1c4f8d326382c1facd32168e04084a95f25532",
"size": "303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meridian/tst/acupoints/test_zutonggu213.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "239622"
}
],
"symlink_target": ""
}
|
import datetime
from unittest import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
from magnum.api.controllers.v1 import nodegroup as api_nodegroup
from magnum.conductor import api as rpcapi
import magnum.conf
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.db import utils as db_utils
from magnum.tests.unit.objects import utils as obj_utils
CONF = magnum.conf.CONF
class TestNodegroupObject(base.TestCase):
def test_nodegroup_init(self):
nodegroup_dict = apiutils.nodegroup_post_data()
del nodegroup_dict['node_count']
del nodegroup_dict['min_node_count']
del nodegroup_dict['max_node_count']
nodegroup = api_nodegroup.NodeGroup(**nodegroup_dict)
self.assertEqual(1, nodegroup.node_count)
self.assertEqual(0, nodegroup.min_node_count)
self.assertIsNone(nodegroup.max_node_count)
class NodeGroupControllerTest(api_base.FunctionalTest):
headers = {"Openstack-Api-Version": "container-infra latest"}
def _add_headers(self, kwargs):
if 'headers' not in kwargs:
kwargs['headers'] = self.headers
def get_json(self, *args, **kwargs):
self._add_headers(kwargs)
return super(NodeGroupControllerTest, self).get_json(*args, **kwargs)
def post_json(self, *args, **kwargs):
self._add_headers(kwargs)
return super(NodeGroupControllerTest, self).post_json(*args, **kwargs)
def delete(self, *args, **kwargs):
self._add_headers(kwargs)
return super(NodeGroupControllerTest, self).delete(*args, **kwargs)
def patch_json(self, *args, **kwargs):
self._add_headers(kwargs)
return super(NodeGroupControllerTest, self).patch_json(*args, **kwargs)
class TestListNodegroups(NodeGroupControllerTest):
_expanded_attrs = ["id", "project_id", "docker_volume_size", "labels",
"node_addresses", "links"]
_nodegroup_attrs = ["uuid", "name", "flavor_id", "node_count", "role",
"is_default", "image_id", "min_node_count",
"max_node_count"]
def setUp(self):
super(TestListNodegroups, self).setUp()
obj_utils.create_test_cluster_template(self.context)
self.cluster_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster(
self.context, uuid=self.cluster_uuid)
self.cluster = objects.Cluster.get_by_uuid(self.context,
self.cluster_uuid)
def _test_list_nodegroups(self, cluster_id, filters=None, expected=None):
url = '/clusters/%s/nodegroups' % cluster_id
if filters is not None:
filter_list = ['%s=%s' % (k, v) for k, v in filters.items()]
url += '?' + '&'.join(f for f in filter_list)
response = self.get_json(url)
if expected is None:
expected = []
ng_uuids = [ng['uuid'] for ng in response['nodegroups']]
self.assertEqual(expected, ng_uuids)
for ng in response['nodegroups']:
self._verify_attrs(self._nodegroup_attrs, ng)
self._verify_attrs(self._expanded_attrs, ng, positive=False)
def test_get_all(self):
expected = [ng.uuid for ng in self.cluster.nodegroups]
self._test_list_nodegroups(self.cluster_uuid, expected=expected)
def test_get_all_by_name(self):
expected = [ng.uuid for ng in self.cluster.nodegroups]
self._test_list_nodegroups(self.cluster.name, expected=expected)
def test_get_all_by_name_non_default_ngs(self):
db_utils.create_test_nodegroup(cluster_id=self.cluster_uuid,
name='non_default_ng')
expected = [ng.uuid for ng in self.cluster.nodegroups]
self._test_list_nodegroups(self.cluster.name, expected=expected)
def test_get_all_with_pagination_marker(self):
worker_ng_uuid = self.cluster.default_ng_worker.uuid
master_ng_uuid = self.cluster.default_ng_master.uuid
# First make sure that the api returns 1 ng and since they
# are sorted by id, the ng should be the default-worker
url = '/clusters/%s/nodegroups?limit=1' % (self.cluster_uuid)
response = self.get_json(url)
self.assertEqual(1, len(response['nodegroups']))
self.assertEqual(worker_ng_uuid, response['nodegroups'][0]['uuid'])
marker = "marker=%s" % worker_ng_uuid
self.assertIn(marker, response['next'])
# Now using the next url make sure that we get the default-master
next_url = response['next'].split('v1')[1]
response = self.get_json(next_url)
self.assertEqual(1, len(response['nodegroups']))
self.assertEqual(master_ng_uuid, response['nodegroups'][0]['uuid'])
marker = "marker=%s" % master_ng_uuid
self.assertIn(marker, response['next'])
# Now we should not get any other entry since the cluster only has two
# nodegroups and the marker is set at the default-master.
next_url = response['next'].split('v1')[1]
response = self.get_json(next_url)
self.assertEqual(0, len(response['nodegroups']))
self.assertNotIn('next', response)
def test_get_all_by_role(self):
filters = {'role': 'master'}
expected = [self.cluster.default_ng_master.uuid]
self._test_list_nodegroups(self.cluster.name, filters=filters,
expected=expected)
filters = {'role': 'worker'}
expected = [self.cluster.default_ng_worker.uuid]
self._test_list_nodegroups(self.cluster.name, filters=filters,
expected=expected)
def test_get_all_by_non_existent_role(self):
filters = {'role': 'non-existent'}
self._test_list_nodegroups(self.cluster.name, filters=filters)
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_get_all_as_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
self.context.all_tenants = True
cluster = objects.Cluster.get_by_uuid(self.context, temp_uuid)
expected = [ng.uuid for ng in cluster.nodegroups]
self._test_list_nodegroups(cluster.uuid, expected=expected)
def test_get_all_non_existent_cluster(self):
response = self.get_json('/clusters/not-here/nodegroups',
expect_errors=True)
self.assertEqual(404, response.status_code)
def test_get_one(self):
worker = self.cluster.default_ng_worker
url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, worker.uuid)
response = self.get_json(url)
self.assertEqual(worker.name, response['name'])
self._verify_attrs(self._nodegroup_attrs, response)
self._verify_attrs(self._expanded_attrs, response)
self.assertEqual({}, response['labels_overridden'])
self.assertEqual({}, response['labels_skipped'])
self.assertEqual({}, response['labels_added'])
def test_get_one_non_default(self):
self.cluster.labels = {'label1': 'value1', 'label2': 'value2'}
self.cluster.save()
ng_name = 'non_default_ng'
ng_labels = {
'label1': 'value3', 'label2': 'value2', 'label4': 'value4'
}
db_utils.create_test_nodegroup(cluster_id=self.cluster.uuid,
name=ng_name, labels=ng_labels)
url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, ng_name)
response = self.get_json(url)
self._verify_attrs(self._nodegroup_attrs, response)
self._verify_attrs(self._expanded_attrs, response)
self.assertEqual(ng_labels, response['labels'])
overridden_labels = {'label1': 'value1'}
self.assertEqual(overridden_labels, response['labels_overridden'])
self.assertEqual({'label4': 'value4'}, response['labels_added'])
self.assertEqual({}, response['labels_skipped'])
def test_get_one_non_default_skipped_labels(self):
self.cluster.labels = {'label1': 'value1', 'label2': 'value2'}
self.cluster.save()
ng_name = 'non_default_ng'
ng_labels = {'label1': 'value3', 'label4': 'value4'}
db_utils.create_test_nodegroup(cluster_id=self.cluster.uuid,
name=ng_name, labels=ng_labels)
url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, ng_name)
response = self.get_json(url)
self._verify_attrs(self._nodegroup_attrs, response)
self._verify_attrs(self._expanded_attrs, response)
self.assertEqual(ng_labels, response['labels'])
overridden_labels = {'label1': 'value1'}
self.assertEqual(overridden_labels, response['labels_overridden'])
self.assertEqual({'label4': 'value4'}, response['labels_added'])
self.assertEqual({'label2': 'value2'}, response['labels_skipped'])
def test_get_one_non_existent_ng(self):
url = '/clusters/%s/nodegroups/not-here' % self.cluster.uuid
response = self.get_json(url, expect_errors=True)
self.assertEqual(404, response.status_code)
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_get_one_as_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
self.context.all_tenants = True
cluster = objects.Cluster.get_by_uuid(self.context, temp_uuid)
worker = cluster.default_ng_worker
url = '/clusters/%s/nodegroups/%s' % (cluster.uuid, worker.uuid)
response = self.get_json(url)
self.assertEqual(worker.name, response['name'])
self._verify_attrs(self._nodegroup_attrs, response)
self._verify_attrs(self._expanded_attrs, response)
def test_get_one_wrong_microversion(self):
headers = {"Openstack-Api-Version": "container-infra 1.8"}
worker = self.cluster.default_ng_worker
url = '/clusters/%s/nodegroups/%s' % (self.cluster.uuid, worker.uuid)
response = self.get_json(url, headers=headers, expect_errors=True)
self.assertEqual(406, response.status_code)
def test_get_all_wrong_microversion(self):
headers = {"Openstack-Api-Version": "container-infra 1.8"}
url = '/clusters/%s/nodegroups/' % (self.cluster.uuid)
response = self.get_json(url, headers=headers, expect_errors=True)
self.assertEqual(406, response.status_code)
class TestPost(NodeGroupControllerTest):
def setUp(self):
super(TestPost, self).setUp()
self.cluster_template = obj_utils.create_test_cluster_template(
self.context)
self.cluster = obj_utils.create_test_cluster(self.context)
self.cluster.refresh()
p = mock.patch.object(rpcapi.API, 'nodegroup_create_async')
self.mock_ng_create = p.start()
self.mock_ng_create.side_effect = self._simulate_nodegroup_create
self.addCleanup(p.stop)
self.url = "/clusters/%s/nodegroups" % self.cluster.uuid
def _simulate_nodegroup_create(self, cluster, nodegroup):
nodegroup.create()
return nodegroup
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
self.assertFalse(response.json['is_default'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_without_node_count(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data()
del ng_dict['node_count']
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
# Verify node_count defaults to 1
self.assertEqual(1, response.json['node_count'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_with_zero_nodes(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data()
ng_dict['node_count'] = 0
ng_dict['min_node_count'] = 0
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
# Verify node_count is set to zero
self.assertEqual(0, response.json['node_count'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_with_max_node_count(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data(max_node_count=5)
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertEqual(5, response.json['max_node_count'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_with_role(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data(role='test-role')
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertEqual('test-role', response.json['role'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_with_labels(self, mock_utcnow):
labels = {'label1': 'value1'}
ng_dict = apiutils.nodegroup_post_data(labels=labels)
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertEqual(labels, response.json['labels'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_with_image_id(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data(image_id='test_image')
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertEqual('test_image', response.json['image_id'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_with_flavor(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data(flavor_id='test_flavor')
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertEqual('test_flavor', response.json['flavor_id'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_only_name(self, mock_utcnow):
ng_dict = {'name': 'test_ng'}
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertEqual('worker', response.json['role'])
self.assertEqual(self.cluster_template.image_id,
response.json['image_id'])
self.assertEqual(self.cluster.flavor_id, response.json['flavor_id'])
self.assertEqual(self.cluster.uuid, response.json['cluster_id'])
self.assertEqual(self.cluster.project_id, response.json['project_id'])
self.assertEqual(self.cluster.labels, response.json['labels'])
self.assertEqual('worker', response.json['role'])
self.assertEqual(0, response.json['min_node_count'])
self.assertEqual(1, response.json['node_count'])
self.assertIsNone(response.json['max_node_count'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_nodegroup_invalid_node_count(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data(node_count=7, max_node_count=5)
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(self.url, ng_dict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_int)
ng_dict = apiutils.nodegroup_post_data(node_count=2, min_node_count=3)
response = self.post_json(self.url, ng_dict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_int)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_master_ng(self, mock_utcnow):
ng_dict = apiutils.nodegroup_post_data(role='master')
response = self.post_json(self.url, ng_dict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_ng_same_name(self, mock_utcnow):
existing_name = self.cluster.default_ng_master.name
ng_dict = apiutils.nodegroup_post_data(name=existing_name)
response = self.post_json(self.url, ng_dict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_int)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_ng_wrong_microversion(self, mock_utcnow):
headers = {"Openstack-Api-Version": "container-infra 1.8"}
ng_dict = apiutils.nodegroup_post_data(name="new_ng")
response = self.post_json(self.url, ng_dict, headers=headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(406, response.status_int)
def test_create_ng_cluster_no_api_address(self):
# Remove the api address from the cluster and make sure
# that the request is not accepted.
self.cluster.api_address = None
self.cluster.save()
ng_dict = apiutils.nodegroup_post_data()
response = self.post_json(self.url, ng_dict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_int)
def test_create_ng_with_labels(self):
cluster_labels = {'label1': 'value1', 'label2': 'value2'}
self.cluster.labels = cluster_labels
self.cluster.save()
ng_labels = {'label3': 'value3'}
ng_dict = apiutils.nodegroup_post_data(labels=ng_labels)
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
(cluster, ng), _ = self.mock_ng_create.call_args
self.assertEqual(ng_labels, ng.labels)
def test_create_ng_with_merge_labels(self):
cluster_labels = {'label1': 'value1', 'label2': 'value2'}
self.cluster.labels = cluster_labels
self.cluster.save()
ng_labels = {'label1': 'value3', 'label4': 'value4'}
ng_dict = apiutils.nodegroup_post_data(labels=ng_labels,
merge_labels=True)
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
(cluster, ng), _ = self.mock_ng_create.call_args
expected_labels = cluster.labels
expected_labels.update(ng_labels)
self.assertEqual(expected_labels, ng.labels)
def test_create_ng_with_merge_labels_no_labels(self):
cluster_labels = {'label1': 'value1', 'label2': 'value2'}
self.cluster.labels = cluster_labels
self.cluster.save()
ng_dict = apiutils.nodegroup_post_data(merge_labels=True)
ng_dict.pop('labels')
response = self.post_json(self.url, ng_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
(cluster, ng), _ = self.mock_ng_create.call_args
self.assertEqual(cluster.labels, ng.labels)
class TestDelete(NodeGroupControllerTest):
def setUp(self):
super(TestDelete, self).setUp()
self.cluster_template = obj_utils.create_test_cluster_template(
self.context)
self.cluster = obj_utils.create_test_cluster(self.context)
self.cluster.refresh()
self.nodegroup = obj_utils.create_test_nodegroup(
self.context, cluster_id=self.cluster.uuid, is_default=False)
p = mock.patch.object(rpcapi.API, 'nodegroup_delete_async')
self.mock_ng_delete = p.start()
self.mock_ng_delete.side_effect = self._simulate_nodegroup_delete
self.addCleanup(p.stop)
self.url = "/clusters/%s/nodegroups/" % self.cluster.uuid
def _simulate_nodegroup_delete(self, cluster, nodegroup):
nodegroup.destroy()
def test_delete_nodegroup(self):
response = self.delete(self.url + self.nodegroup.uuid)
self.assertEqual(204, response.status_int)
response = self.get_json(self.url + self.nodegroup.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIsNotNone(response.json['errors'])
def test_delete_nodegroup_by_name(self):
response = self.delete(self.url + self.nodegroup.name)
self.assertEqual(204, response.status_int)
response = self.get_json(self.url + self.nodegroup.name,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIsNotNone(response.json['errors'])
def test_delete_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.delete(self.url + uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIsNotNone(response.json['errors'])
def test_delete_by_name_not_found(self):
response = self.delete(self.url + "not-there", expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIsNotNone(response.json['errors'])
def test_delete_default_nodegroup(self):
response = self.delete(self.url + self.cluster.default_ng_master.uuid,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIsNotNone(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_delete_nodegroup_as_admin(self, mock_context, mock_policy):
cluster_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster(self.context, uuid=cluster_uuid,
project_id='fake', name='test-fake')
ng_uuid = uuidutils.generate_uuid()
obj_utils.create_test_nodegroup(self.context, uuid=ng_uuid,
cluster_id=cluster_uuid,
is_default=False,
project_id='fake', id=50)
self.context.is_admin = True
url = '/clusters/%s/nodegroups/%s' % (cluster_uuid, ng_uuid)
response = self.delete(url)
self.assertEqual(204, response.status_int)
def test_delete_wrong_microversion(self):
headers = {"Openstack-Api-Version": "container-infra 1.8"}
response = self.delete(self.url + self.nodegroup.uuid, headers=headers,
expect_errors=True)
self.assertEqual(406, response.status_int)
class TestPatch(NodeGroupControllerTest):
def setUp(self):
super(TestPatch, self).setUp()
self.cluster_template = obj_utils.create_test_cluster_template(
self.context)
self.cluster = obj_utils.create_test_cluster(self.context)
self.cluster.refresh()
self.nodegroup = obj_utils.create_test_nodegroup(
self.context, cluster_id=self.cluster.uuid, is_default=False,
min_node_count=2, max_node_count=5, node_count=2)
p = mock.patch.object(rpcapi.API, 'nodegroup_update_async')
self.mock_ng_update = p.start()
self.mock_ng_update.side_effect = self._simulate_nodegroup_update
self.addCleanup(p.stop)
self.url = "/clusters/%s/nodegroups/" % self.cluster.uuid
def _simulate_nodegroup_update(self, cluster, nodegroup):
nodegroup.save()
return nodegroup
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok(self, mock_utcnow):
max_node_count = 4
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json(self.url + self.nodegroup.uuid,
[{'path': '/max_node_count',
'value': max_node_count,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
response = self.get_json(self.url + self.nodegroup.uuid)
self.assertEqual(max_node_count, response['max_node_count'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name(self, mock_utcnow):
max_node_count = 4
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/max_node_count',
'value': max_node_count,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
response = self.get_json(self.url + self.nodegroup.uuid)
self.assertEqual(max_node_count, response['max_node_count'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
def test_replace_node_count_failed(self):
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/node_count',
'value': 3,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json['errors'])
def test_replace_max_node_count_failed(self):
# min_node_count equals to 2. Verify that if the max_node_count
# is less than the min the patch fails
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/max_node_count',
'value': 1,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_code)
self.assertIsNotNone(response.json['errors'])
def test_replace_min_node_count_failed(self):
# min_node_count equals to 2. Verify that if the max_node_count
# is less than the min the patch fails
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/min_node_count',
'value': 3,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_code)
self.assertIsNotNone(response.json['errors'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_remove_ok(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/max_node_count',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
response = self.get_json(self.url + self.nodegroup.uuid)
self.assertIsNone(response['max_node_count'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_remove_min_node_count(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/min_node_count',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
response = self.get_json(self.url + self.nodegroup.uuid)
# Removing the min_node_count just restores the default value
self.assertEqual(0, response['min_node_count'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_remove_internal_attr(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/node_count',
'op': 'remove'}], expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json['errors'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_remove_non_existent_property(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/not_there',
'op': 'remove'}], expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_update_nodegroup_as_admin(self, mock_context, mock_policy):
cluster_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster(self.context, uuid=cluster_uuid,
project_id='fake', name='test-fake')
ng_uuid = uuidutils.generate_uuid()
obj_utils.create_test_nodegroup(self.context, uuid=ng_uuid,
cluster_id=cluster_uuid,
is_default=False,
project_id='fake', id=50)
self.context.is_admin = True
url = '/clusters/%s/nodegroups/%s' % (cluster_uuid, ng_uuid)
response = self.patch_json(url,
[{'path': '/max_node_count',
'value': 4,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
def test_replace_wrong_microversion(self):
headers = {"Openstack-Api-Version": "container-infra 1.8"}
response = self.patch_json(self.url + self.nodegroup.name,
[{'path': '/max_node_count',
'value': 4,
'op': 'replace'}], headers=headers,
expect_errors=True)
self.assertEqual(406, response.status_code)
class TestNodeGroupPolicyEnforcement(NodeGroupControllerTest):
def setUp(self):
super(TestNodeGroupPolicyEnforcement, self).setUp()
obj_utils.create_test_cluster_template(self.context)
self.cluster_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster(
self.context, uuid=self.cluster_uuid)
self.cluster = objects.Cluster.get_by_uuid(self.context,
self.cluster_uuid)
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"nodegroup:get_all", self.get_json,
'/clusters/%s/nodegroups' % self.cluster_uuid, expect_errors=True)
def test_policy_disallow_get_one(self):
worker = self.cluster.default_ng_worker
self._common_policy_check(
"nodegroup:get", self.get_json,
'/clusters/%s/nodegroups/%s' % (self.cluster.uuid, worker.uuid),
expect_errors=True)
|
{
"content_hash": "c08339b2bcea1dd94f1a696183ee5ae4",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 79,
"avg_line_length": 47.3218085106383,
"alnum_prop": 0.6204687236553701,
"repo_name": "openstack/magnum",
"id": "a6f73d54b2c9389cdb2c6d9a44dc5dd730f72427",
"size": "36249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magnum/tests/unit/api/controllers/v1/test_nodegroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8788"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "2302791"
},
{
"name": "Shell",
"bytes": "547968"
}
],
"symlink_target": ""
}
|
import hashlib
import binascii
import random
import time
from Crypto.Cipher import AES
from Crypto.Util import Counter
rng = random.SystemRandom()
def init_key_generation(keylengthbits):
if keylengthbits < 8:
keylengthbits = 8
elif keylengthbits % 8 != 0:
keylengthbits += ( 8 - keylengthbits % 8)
key = ""
iters = keylengthbits / 8
while iters > 0:
key += format(rng.randint(0,255), '02x')
iters -= 1
return key
def do_xor_on_hex(hexstring1, hexstring2):
v1 = 0
v2 = 0
index = 0
hexstr1nums = []
hexstr2nums = []
finalnums = []
xorlen = len(hexstring1)
finalxor = ""
if xorlen != len(hexstring2) or xorlen % 2 != 0:
print "ERROR!"
return None
while v1 <= (xorlen - 2):
hexstr1nums.append(int(hexstring1[(v1):(v1+2)],16))
v1 += 2
while v2 <= (xorlen - 2):
hexstr2nums.append(int(hexstring2[(v2):(v2+2)],16))
v2 += 2
while index < (xorlen / 2):
finalnums.append(hexstr1nums[index] ^ hexstr2nums[index])
index += 1
for i in finalnums:
finalxor += format(i, '02x')
return finalxor
def generate_header_file(masterpassword, flen, fname, hver):
filelength = str(flen)
headername = str(fname) + ".header"
headerversion = format(hver, '02x')
if len(headerversion) != 2:
print "BAD HVER, ABORT"
return None
headercontents = ""
salt_to_use = init_key_generation(128)
#print "Salt used: " + salt_to_use
master_key = init_key_generation(512)
#print "Master key: " + master_key
encrypted_key = do_xor_on_hex(master_key, binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterpassword, salt_to_use, 100000)))
#print "Encrypted key: " + encrypted_key
headerfile = open(headername, "wb")
headercontents = headerversion + salt_to_use + encrypted_key + filelength
headerfile.write(headercontents)
headerfile.close()
return master_key, salt_to_use
def read_header_file(masterpassword, fname):
headername = str(fname) + ".header"
headerfile = open(headername, "rb")
totalheader = headerfile.read()
header_version = int(totalheader[0:2],16)
header_salt = totalheader[2:34]
header_encrypted_key = totalheader[34:162]
header_master_key = do_xor_on_hex(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterpassword, header_salt, 100000)), header_encrypted_key)
header_length = totalheader[162:]
print "Salt used: " + header_salt
print "Master key: " + header_master_key
print "Encrypted key: " + header_encrypted_key
print "File length: " + header_length
headerfile.close()
return header_master_key, header_length, header_version, header_salt
def edit_header_file(oldpassword, newpassword, fname):
headername = str(fname) + ".header"
headerfile = open(headername, "rb")
totalheader = headerfile.read()
headerfile.close()
newheadercontents = ""
header_version = totalheader[0:2]
header_salt = totalheader[2:34]
header_encrypted_key = totalheader[34:162]
header_master_key = do_xor_on_hex(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', oldpassword, header_salt, 100000)), header_encrypted_key)
header_new_encrypted_key = do_xor_on_hex(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', newpassword, header_salt, 100000)), header_master_key)
header_length = totalheader[162:]
newheadercontents = header_version + header_salt + header_new_encrypted_key + header_length
headerfile = open(headername, "wb")
headerfile.write(newheadercontents)
headerfile.close()
return "Done"
def hex_transpose(hexstr):
v1 = 0
newhex1 = ""
newhex2 = ""
hexlen = len(hexstr)
while v1 < (hexlen):
newhex1 += hexstr[v1+1] + hexstr[v1]
v1 += 2
newhex2 = newhex1[(hexlen/2):] + newhex1[0:(hexlen/2)]
return newhex2
def advance_cipher(inithash):
new_hash = hashlib.sha512(inithash).hexdigest()
transposed_hash = hex_transpose(new_hash)
hash_of_hash = hashlib.sha512(transposed_hash).hexdigest()
return new_hash, hash_of_hash
def advance_cipher_2(inithash, ptextfb):
new_hash = hashlib.sha512(inithash).hexdigest()
transposed_hash = hex_transpose(new_hash) + hex_transpose(ptextfb)
hash_of_hash = hashlib.sha512(transposed_hash).hexdigest()
return new_hash, hash_of_hash
def encrypt_file_1(filename, masterpassword):
output_filename = filename + ".crypt"
file_to_encrypt = open(filename, "rb")
file_to_output = open(output_filename, "wb")
file_to_output_hex = ""
current_key_to_xor = ""
startlen = 0
file_to_encrypt_bin = file_to_encrypt.read()
file_to_encrypt_hex = binascii.hexlify(file_to_encrypt_bin)
file_length = len(file_to_encrypt_hex)
masterkey, uss = generate_header_file(masterpassword, file_length, filename, 1)
file_padding = 128 - (file_length % 128)
while file_padding > 0:
if file_padding >= 2:
file_to_encrypt_hex += format(rng.randint(0,255), '02x')
file_padding -= 2
else:
file_to_encrypt_hex += "0"
file_padding -= 1
file_checksum = hashlib.sha512(file_to_encrypt_hex[0:file_length]).hexdigest()
file_to_encrypt_hex += file_checksum
file_length = len(file_to_encrypt_hex)
file_to_encrypt.close()
print "Times to iterate (W/chk): " + str(file_length / 128)
print "Encrypted file checksum: ", file_checksum
times_to_iterate = file_length / 128
times_to_iterate_total = times_to_iterate
current_key = masterkey
chunk_list = []
while times_to_iterate > 0:
#print "START KEY: ", current_key
current_key, current_key_to_xor = advance_cipher(current_key)
#print "KEY AFTER CA: ", current_key
#print "KEY TO XOR: ", current_key_to_xor
chunk_list.append(do_xor_on_hex(file_to_encrypt_hex[startlen:startlen+128],current_key_to_xor))
startlen += 128
times_to_iterate -= 1
if times_to_iterate % 1000 == 0:
print "Encryption Progress: ", (times_to_iterate_total - times_to_iterate) / float(times_to_iterate_total) * 100.0, "%"
#print file_to_output_hex
file_to_output_hex = "".join(chunk_list)
file_to_output.write(binascii.unhexlify(file_to_output_hex))
file_to_output.close()
dtk, dtl, dtv, uss = read_header_file(masterpassword, filename)
if dtv == 1:
is_correct = decrypt_file_1(filename, True, dtk, dtl)
if is_correct == "File decrypted, checksum OK":
return "Encryption Done and Verified"
else:
return "ERROR!"
else:
return "BAD HEADER ERROR!"
def decrypt_file_1(filename, testmode, decryption_master_key, decryption_length):
filename_to_decrypt = filename + ".crypt"
file_to_decrypt = open(filename_to_decrypt, "rb")
file_to_decrypt_bin = file_to_decrypt.read()
file_to_decrypt_hex = binascii.hexlify(file_to_decrypt_bin)
file_to_decrypt.close()
file_to_decrypt_output_hex = ""
real_file_to_decrypt_output_hex = ""
decrypt_checksum = ""
checksum_ok = False
current_key_to_xor_decrypt = ""
startlen_decrypt = 0
times_to_iterate_decrypt = len(file_to_decrypt_hex) / 128
times_to_iterate_decrypt_total = times_to_iterate_decrypt
current_key_decrypt = decryption_master_key
chunk_list_decrypt = []
while times_to_iterate_decrypt > 0:
current_key_decrypt, current_key_to_xor_decrypt = advance_cipher(current_key_decrypt)
chunk_list_decrypt.append(do_xor_on_hex(file_to_decrypt_hex[startlen_decrypt:startlen_decrypt+128],current_key_to_xor_decrypt))
startlen_decrypt += 128
times_to_iterate_decrypt -= 1
if times_to_iterate_decrypt % 1000 == 0:
print "Decryption Progress: ", (times_to_iterate_decrypt_total - times_to_iterate_decrypt) / float(times_to_iterate_decrypt_total) * 100.0, "%"
file_to_decrypt_output_hex = "".join(chunk_list_decrypt)
decrypt_checksum = file_to_decrypt_output_hex[-128:]
real_file_to_decrypt_output_hex = file_to_decrypt_output_hex[0:int(decryption_length)]
print "Decrypted file checksum (read): ", decrypt_checksum
print "Decrypted file checksum (calculated): ", hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest()
if decrypt_checksum == hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest():
checksum_ok = True
if testmode == False:
file_to_decrypt_output = open(filename, "wb")
file_to_decrypt_output.write(binascii.unhexlify(real_file_to_decrypt_output_hex))
file_to_decrypt_output.close()
if checksum_ok == True:
return "File decrypted, checksum OK"
else:
return "Wrong key, corrupted file or not a valid container"
def encrypt_file_2(filename, masterpassword):
output_filename = filename + ".crypt"
file_to_encrypt = open(filename, "rb")
file_to_output = open(output_filename, "wb")
file_to_output_hex = ""
current_key_to_xor = ""
startlen = 0
file_to_encrypt_bin = file_to_encrypt.read()
file_to_encrypt_hex = binascii.hexlify(file_to_encrypt_bin)
file_length = len(file_to_encrypt_hex)
masterkey, iv = generate_header_file(masterpassword, file_length, filename, 2)
file_padding = 128 - (file_length % 128)
while file_padding > 0:
if file_padding >= 2:
file_to_encrypt_hex += format(rng.randint(0,255), '02x')
file_padding -= 2
else:
file_to_encrypt_hex += "0"
file_padding -= 1
file_checksum = hashlib.sha512(file_to_encrypt_hex[0:file_length]).hexdigest()
file_to_encrypt_hex += file_checksum
file_length = len(file_to_encrypt_hex)
file_to_encrypt.close()
print "Times to iterate (W/chk): " + str(file_length / 128)
print "Encrypted file checksum: ", file_checksum
times_to_iterate = file_length / 128
times_to_iterate_total = times_to_iterate
current_key = masterkey
iv_hash = hashlib.sha512(iv).hexdigest()
current_plaintext_hash_feedback = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterkey, iv_hash, 100000))
chunk_list = []
while times_to_iterate > 0:
#print "START KEY: ", current_key
current_key, current_key_to_xor = advance_cipher_2(current_key, current_plaintext_hash_feedback)
#print "KEY AFTER CA: ", current_key
#print "KEY TO XOR: ", current_key_to_xor
current_plaintext_chunk = file_to_encrypt_hex[startlen:startlen+128]
current_plaintext_hash_feedback = hashlib.sha512(current_plaintext_chunk).hexdigest()
chunk_list.append(do_xor_on_hex(file_to_encrypt_hex[startlen:startlen+128],current_key_to_xor))
startlen += 128
times_to_iterate -= 1
if times_to_iterate % 1000 == 0:
print "Encryption Progress: ", (times_to_iterate_total - times_to_iterate) / float(times_to_iterate_total) * 100.0, "%"
#print file_to_output_hex
file_to_output_hex = "".join(chunk_list)
file_to_output.write(binascii.unhexlify(file_to_output_hex))
file_to_output.close()
dtk, dtl, dtv, div = read_header_file(masterpassword, filename)
if dtv == 2:
is_correct = decrypt_file_2(filename, True, dtk, dtl, div)
if is_correct == "File decrypted, checksum OK":
return "Encryption Done and Verified"
else:
return "ERROR!"
else:
return "BAD HEADER ERROR!"
def decrypt_file_2(filename, testmode, decryption_master_key, decryption_length, decryption_iv):
filename_to_decrypt = filename + ".crypt"
file_to_decrypt = open(filename_to_decrypt, "rb")
file_to_decrypt_bin = file_to_decrypt.read()
file_to_decrypt_hex = binascii.hexlify(file_to_decrypt_bin)
file_to_decrypt.close()
file_to_decrypt_output_hex = ""
real_file_to_decrypt_output_hex = ""
decrypt_checksum = ""
checksum_ok = False
current_key_to_xor_decrypt = ""
startlen_decrypt = 0
times_to_iterate_decrypt = len(file_to_decrypt_hex) / 128
times_to_iterate_decrypt_total = times_to_iterate_decrypt
current_key_decrypt = decryption_master_key
decryption_iv_hash = hashlib.sha512(decryption_iv).hexdigest()
current_plaintext_hash_feedback_decipher = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', decryption_master_key, decryption_iv_hash, 100000))
chunk_list_decrypt = []
while times_to_iterate_decrypt > 0:
current_key_decrypt, current_key_to_xor_decrypt = advance_cipher_2(current_key_decrypt, current_plaintext_hash_feedback_decipher)
current_deciphered_chunk = do_xor_on_hex(file_to_decrypt_hex[startlen_decrypt:startlen_decrypt+128],current_key_to_xor_decrypt)
chunk_list_decrypt.append(current_deciphered_chunk)
current_plaintext_hash_feedback_decipher = hashlib.sha512(current_deciphered_chunk).hexdigest()
startlen_decrypt += 128
times_to_iterate_decrypt -= 1
if times_to_iterate_decrypt % 1000 == 0:
print "Decryption Progress: ", (times_to_iterate_decrypt_total - times_to_iterate_decrypt) / float(times_to_iterate_decrypt_total) * 100.0, "%"
file_to_decrypt_output_hex = "".join(chunk_list_decrypt)
decrypt_checksum = file_to_decrypt_output_hex[-128:]
real_file_to_decrypt_output_hex = file_to_decrypt_output_hex[0:int(decryption_length)]
print "Decrypted file checksum (read): ", decrypt_checksum
print "Decrypted file checksum (calculated): ", hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest()
if decrypt_checksum == hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest():
checksum_ok = True
if testmode == False:
file_to_decrypt_output = open(filename, "wb")
file_to_decrypt_output.write(binascii.unhexlify(real_file_to_decrypt_output_hex))
file_to_decrypt_output.close()
if checksum_ok == True:
return "File decrypted, checksum OK"
else:
return "Wrong key, corrupted file or not a valid container"
def encrypt_file_3(filename, masterpassword, encmethod):
output_filename = filename + ".crypt"
file_to_encrypt = open(filename, "rb")
file_to_output = open(output_filename, "wb")
file_to_output_hex = ""
startlen = 0
file_to_encrypt_bin = file_to_encrypt.read()
file_to_encrypt_hex = binascii.hexlify(file_to_encrypt_bin)
file_length = len(file_to_encrypt_hex)
masterkey, iv = generate_header_file(masterpassword, file_length, filename, encmethod)
file_padding = 32 - (file_length % 32)
while file_padding > 0:
if file_padding >= 2:
file_to_encrypt_hex += format(rng.randint(0,255), '02x')
file_padding -= 2
else:
file_to_encrypt_hex += "0"
file_padding -= 1
file_checksum = hashlib.sha512(file_to_encrypt_hex[0:file_length]).hexdigest()
file_to_encrypt_hex += file_checksum
file_length = len(file_to_encrypt_hex)
file_to_encrypt.close()
print "Times to iterate (W/chk): " + str(file_length / 32)
print "Encrypted file checksum: ", file_checksum
times_to_iterate = file_length / 32
times_to_iterate_total = times_to_iterate
current_key = hashlib.sha256(masterkey).digest()
iv_hash = hashlib.sha512(iv+masterkey).hexdigest()
real_iv_to_use = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterkey, iv_hash, 10))
real_iv_to_use = binascii.unhexlify(real_iv_to_use[:32])
chunk_list = []
if encmethod == 3:
cipher = AES.new(current_key, AES.MODE_CBC, real_iv_to_use)
elif encmethod == 4:
icv = int(binascii.hexlify(real_iv_to_use),16)
ctr = Counter.new(128, initial_value=icv)
cipher = AES.new(current_key, AES.MODE_CTR, counter=ctr)
elif encmethod == 5:
cipher = AES.new(current_key, AES.MODE_CFB, real_iv_to_use)
elif encmethod == 6:
cipher = AES.new(current_key, AES.MODE_OFB, real_iv_to_use)
while times_to_iterate > 0:
current_plaintext_chunk = binascii.unhexlify(file_to_encrypt_hex[startlen:startlen+32])
chunk_list.append(binascii.hexlify(cipher.encrypt(current_plaintext_chunk)))
startlen += 32
times_to_iterate -= 1
if times_to_iterate % 15000 == 0:
print "Encryption Progress: ", (times_to_iterate_total - times_to_iterate) / float(times_to_iterate_total) * 100.0, "%"
#print file_to_output_hex
file_to_output_hex = "".join(chunk_list)
file_to_output.write(binascii.unhexlify(file_to_output_hex))
file_to_output.close()
dtk, dtl, dtv, div = read_header_file(masterpassword, filename)
if dtv == 3 or dtv == 4 or dtv == 5 or dtv == 6:
is_correct = decrypt_file_3(filename, True, dtk, dtl, div, dtv)
if is_correct == "File decrypted, checksum OK":
return "Encryption Done and Verified"
else:
return "ERROR!"
else:
return "BAD HEADER ERROR!"
def decrypt_file_3(filename, testmode, decryption_master_key, decryption_length, decryption_iv, encmethod):
filename_to_decrypt = filename + ".crypt"
file_to_decrypt = open(filename_to_decrypt, "rb")
file_to_decrypt_bin = file_to_decrypt.read()
file_to_decrypt_hex = binascii.hexlify(file_to_decrypt_bin)
file_to_decrypt.close()
file_to_decrypt_output_hex = ""
real_file_to_decrypt_output_hex = ""
decrypt_checksum = ""
checksum_ok = False
startlen_decrypt = 0
times_to_iterate_decrypt = len(file_to_decrypt_hex) / 32
times_to_iterate_decrypt_total = times_to_iterate_decrypt
current_key = hashlib.sha256(decryption_master_key).digest()
iv_hash = hashlib.sha512(decryption_iv+decryption_master_key).hexdigest()
real_iv_to_use = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', decryption_master_key, iv_hash, 10))
real_iv_to_use = binascii.unhexlify(real_iv_to_use[:32])
chunk_list_decrypt = []
if encmethod == 3:
cipher = AES.new(current_key, AES.MODE_CBC, real_iv_to_use)
elif encmethod == 4:
icv = int(binascii.hexlify(real_iv_to_use),16)
ctr = Counter.new(128, initial_value=icv)
cipher = AES.new(current_key, AES.MODE_CTR, counter=ctr)
elif encmethod == 5:
cipher = AES.new(current_key, AES.MODE_CFB, real_iv_to_use)
elif encmethod == 6:
cipher = AES.new(current_key, AES.MODE_OFB, real_iv_to_use)
while times_to_iterate_decrypt > 0:
current_deciphered_chunk = cipher.decrypt(binascii.unhexlify(file_to_decrypt_hex[startlen_decrypt:startlen_decrypt+32]))
chunk_list_decrypt.append(binascii.hexlify(current_deciphered_chunk))
startlen_decrypt += 32
times_to_iterate_decrypt -= 1
if times_to_iterate_decrypt % 15000 == 0:
print "Decryption Progress: ", (times_to_iterate_decrypt_total - times_to_iterate_decrypt) / float(times_to_iterate_decrypt_total) * 100.0, "%"
file_to_decrypt_output_hex = "".join(chunk_list_decrypt)
decrypt_checksum = file_to_decrypt_output_hex[-128:]
real_file_to_decrypt_output_hex = file_to_decrypt_output_hex[0:int(decryption_length)]
print "Decrypted file checksum (read): ", decrypt_checksum
print "Decrypted file checksum (calculated): ", hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest()
if decrypt_checksum == hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest():
checksum_ok = True
if testmode == False:
file_to_decrypt_output = open(filename, "wb")
file_to_decrypt_output.write(binascii.unhexlify(real_file_to_decrypt_output_hex))
file_to_decrypt_output.close()
if checksum_ok == True:
return "File decrypted, checksum OK"
else:
return "Wrong key, corrupted file or not a valid container"
print "Encryption Test Program r3.0"
print "by fabrizziop"
print "MIT licence"
what_to_do = int(raw_input("1: Encrypt, 2: Decrypt , 3: Change Password: "))
if what_to_do == 1:
mpas = str(raw_input("Master Password: "))
fnm = str(raw_input("File Name: "))
print "Methods:"
print "For AES methods, key is SHA-256 of master key, IV is 100k rounds SHA-512-HMAC PKBDF2 of master key and SHA-512 of master key+salt"
print "1: SHA512 stream, transpose, SHA512 again, then XOR"
print "2: SHA512 stream, transpose, append transposed SHA512 of plaintext chunk, SHA512 again, then XOR"
print "3: AES-256-CBC"
print "4: AES-256-CTR"
print "5: AES-256-CFB"
print "6: AES-256-OFB"
method = int(raw_input("Pick a method: "))
if method == 1:
print encrypt_file_1(fnm, mpas)
elif method == 2:
print encrypt_file_2(fnm, mpas)
elif method == 3 or method == 4 or method == 5 or method == 6:
print encrypt_file_3(fnm, mpas, method)
elif what_to_do == 2:
mpas = str(raw_input("Master Password: "))
fnm = str(raw_input("File Name: "))
dmk, dl, dv, dciv = read_header_file(mpas, fnm)
if dv == 1:
print "Method: SHA512 stream, transpose, SHA512 again, then XOR"
print decrypt_file_1(fnm, False, dmk, dl)
elif dv == 2:
print "Method: SHA512 stream, transpose, append transposed SHA512 of plaintext chunk, SHA512 again, then XOR"
print decrypt_file_2(fnm, False, dmk, dl, dciv)
elif dv == 3:
print "3: AES-256-CBC"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
elif dv == 4:
print "3: AES-256-CTR"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
elif dv == 5:
print "3: AES-256-CFB"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
elif dv == 6:
print "3: AES-256-OFB"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
else:
print "FILE NOT COMPATIBLE"
elif what_to_do == 3:
opas = str(raw_input("Old Password: "))
npas = str(raw_input("New Password: "))
fnm = str(raw_input("File Name: "))
print edit_header_file(opas, npas, fnm)
time.sleep(3)
|
{
"content_hash": "444e62f5463b40ef7815b55d6e56bc62",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 155,
"avg_line_length": 45.78958333333333,
"alnum_prop": 0.6548978570453615,
"repo_name": "fabrizziop/test-encryption",
"id": "224aaedcd1d3a7be415e2a1ef786fbf1f8861ef6",
"size": "21979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "encryptionr30.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51201"
}
],
"symlink_target": ""
}
|
import argparse
import unittest
from gluon import current
from gluon.storage import Storage
current.data = Storage()
# @ToDo: Load these only when running Selenium tests
# (shouldn't be required for Smoke tests)
# (means removing the *)
from selenium import webdriver
from tests.asset import *
from tests.inv import *
from tests.member import *
from tests.org import *
from tests.project import *
from tests.staff import *
from tests.volunteer import *
from tests.helpers import *
from tests.event import *
def loadAllTests():
# Run the file private/templates/<current_template>/tests.py to get tests list.
path = os.path.join(request.folder,
"private", "templates",
settings.get_template(),
"tests.py")
if os.path.exists(path):
settings.exec_template(path)
else:
# Fallback to the default template tests.
path = os.path.join(request.folder,
"private", "templates",
"default",
"tests.py")
settings.exec_template(path)
tests_list = current.selenium_tests
loadTests = unittest.TestLoader().loadTestsFromTestCase
# Initialise the suite with the first test.
exec("suite = loadTests(%s)" % tests_list[0])
# Shortcut
addTests = suite.addTests
# Add all tests to the suite.
for i in range(1, len(tests_list)):
exec("addTests(loadTests(%s))" % tests_list[i])
return suite
# Set up the command line arguments
desc = "Script to run the Sahana Eden test suite."
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("-C", "--class",
help = "Name of class to run")
method_desc = """Name of method to run, this is used in conjunction with the
class argument or with the name of the class followed by the name of the method
separated with a period, class.method.
"""
parser.add_argument("-M",
"--method",
"--test",
help = method_desc)
parser.add_argument("-A",
"--auth",
help = "web2py default argument feed")
parser.add_argument("-V", "--verbose",
type = int,
default = 2,
help = "The level of verbose reporting")
parser.add_argument("--nohtml",
action='store_const',
const=True,
help = "Disable HTML reporting.")
parser.add_argument("--html-path",
help = "Path where the HTML report will be saved.",
default = "")
parser.add_argument("--html-name-date",
action='store_const',
const=True,
help = "Include just the date in the name of the HTML report.")
suite_desc = """This will execute a standard testing schedule. The valid values
are, smoke, quick, complete and full. If a method or class options is selected
the the suite will be ignored.
The suite options can be described as follows:
smoke: This will run the broken link test
quick: This will run all the tests marked as essential
complete: This will run all tests except those marked as long
full: This will run all tests
"""
parser.add_argument("--suite",
help = suite_desc,
choices = ["smoke", "roles", "quick", "complete", "full"],
default = "quick")
parser.add_argument("--link-depth",
type = int,
default = 16,
help = "The recursive depth when looking for links")
desc = """This will record the timings in a spreadsheet file. The data
will be accumulated over time holding a maximum of 100 results, The file will
automatically rotated. This will hold details for another program to analyse.
The file will be written to the same location as the HTML report.
"""
parser.add_argument("-r",
"--record-timings",
action='store_const',
const=True,
help = desc)
up_desc = """The user name and password, separated by a /. Multiple user name
and passwords can be added by separating them with a comma. If multiple user
name and passwords are provided then the same test will be run sequentially
using the given user in each case.
"""
parser.add_argument("--user-password",
default = "admin@example.com/testing",
help = up_desc)
parser.add_argument("--keep-browser-open",
help = "Keep the browser open once the tests have finished running",
action='store_const',
const = True)
parser.add_argument("--browser",
help = "Set the browser to use (Firefox/Chrome)",
action = "store",
default = "Firefox")
desc = """Run the smoke tests even if debug is set to true.
With debug on it can add up to a second per link and given that a full run
of the smoke tests will include thousands of links the difference of having
this setting on can be measured in hours.
"""
parser.add_argument("--force-debug",
action='store_const',
const=True,
help = desc)
desc = """Set a threshold in seconds.
If in the smoke tests it takes longer than this to get the link then it will be reported.
"""
parser.add_argument("--threshold",
type = int,
default = 10,
help = desc)
desc = """Smoke test report only.
Don't actually run the smoke tests but rebuild the smoke test report.
"""
parser.add_argument("--smoke-report",
action='store_const',
const=True,
help = desc)
argsObj = parser.parse_args()
args = argsObj.__dict__
active_driver = {'firefox': webdriver.Firefox,
'chrome': webdriver.Chrome}[args['browser'].lower()]
# Read Settings
settings = current.deployment_settings
public_url = settings.get_base_public_url()
base_url = "%s/%s" % (public_url, current.request.application)
system_name = settings.get_system_name()
# Store these to be available to modules
config = current.test_config = Storage()
config.system_name = system_name
config.timeout = 5 # seconds
config.url = base_url
base_dir = os.path.join(os.getcwd(), "applications", current.request.application)
test_dir = os.path.join(base_dir, "modules", "tests")
config.base_dir = base_dir
if not args["suite"] == "smoke" and settings.get_ui_navigate_away_confirm():
print "The tests will fail unless you have settings.ui.navigate_away_confirm = False in models/000_config.py"
exit()
if args["suite"] == "smoke" or args["suite"] == "complete":
if settings.get_base_debug() and not args["force_debug"]:
print "settings.base.debug is set to True in 000_config.py, either set it to False or use the --force-debug switch"
exit()
config.record_timings = args["record_timings"]
if config.record_timings:
path = args["html_path"]
config.record_timings_filename = os.path.join(path, "Sahana-Eden-record-timings.xls")
config.record_summary_filename = os.path.join(path, "Sahana-Eden-record-summary.xls")
config.verbose = args["verbose"]
browser_open = False
# @todo test with invalid class and methods passed as CLA
if args["method"]:
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
if args["class"]:
name = "%s.%s" % (args["class"], args["method"])
else:
name = args["method"]
suite = unittest.TestLoader().loadTestsFromName(args["method"],
globals()[args["class"]]
)
elif args["class"]:
# Run a single Selenium test
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[args["class"]])
elif args["suite"] == "smoke":
# Run Smoke tests
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite = unittest.TestSuite()
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
elif args["suite"] == "roles":
# Run Roles tests
from tests.roles.test_roles import *
suite = test_roles()
elif args["suite"] == "complete":
# Run all Selenium Tests & Smoke Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
else:
# Run all Selenium Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
config.html = False
if args["nohtml"]:
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
else:
try:
path = args["html_path"]
if args["html_name_date"]:
filename = "Sahana-Eden-%s.html" % current.request.now.date()
else:
filename = "Sahana-Eden-%s.html" % current.request.now
# Windows compatibility
filename = filename.replace(":", "-")
fullname = os.path.join(path, filename)
fp = open(fullname, "wb")
config.html = True
from tests.runner import EdenHTMLTestRunner
runner = EdenHTMLTestRunner(stream = fp,
title = "Sahana Eden",
verbosity = config.verbose,
)
runner.run(suite)
except ImportError:
config.html = False
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
# Cleanup
if browser_open and not args["keep_browser_open"]:
browser.close()
# END =========================================================================
|
{
"content_hash": "7a3e5b67e1874f6a4a54e04f0bd7a98c",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 123,
"avg_line_length": 36.9041095890411,
"alnum_prop": 0.6055122494432071,
"repo_name": "sahildua2305/eden",
"id": "00219cfe12ab690e2e6d4a823e79677ddfda407e",
"size": "11030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/tests/suite.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
from itertools import chain
from six.moves import range, reduce
import os
import re
import numpy as np
import tensorflow as tf
import codecs
import sys
from tqdm import tqdm
import pickle
import logging
class Dataset():
def __init__(self, data='data/tasks_1-20_v1-2/en/',ts_num=1):
self._data = get_train_test(data,ts_num)
self.len_train = len(self._data['train']['S'])
self.len_val = len(self._data['val']['S'])
self.len_test = len(self._data['test']['S'])
def get_minibatches(self,n, minibatch_size, shuffle=False):
idx_list = np.arange(0, n, minibatch_size)
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
for idx in idx_list:
minibatches.append(np.arange(idx, min(idx + minibatch_size, n)))
return minibatches
def gen_examples(self,batch_size,tip):
"""
Divide examples into batches of size `batch_size`.
"""
minibatches = self.get_minibatches(len(self._data[tip]['S']), batch_size)
all_ex = []
for minibatch in minibatches:
mb_x1 = [self._data[tip]['S'][t] for t in minibatch]
mb_x2 = [self._data[tip]['Q'][t] for t in minibatch]
mb_y = [self._data[tip]['A'][t] for t in minibatch]
all_ex.append((np.array(mb_x1), np.array(mb_x2), np.array(mb_y)))
return all_ex
def get_train_test(which_task='data/tasks_1-20_v1-2/en/',task_num=1):
train, val, test = load_task(which_task,task_num)
data = train + test + val
vocab = sorted(reduce(lambda x, y: x | y, (set(list(chain.from_iterable(s)) + q + a) for s, q, a in data)))
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([ len(s) for s, _, _ in data ]))
sentence_size = max(map(len, chain.from_iterable(s for s, _, _ in data)))
query_size = max(map(len, (q for _, q, _ in data)))
if (task_num==3):
max_story_size = min(130, max_story_size)
else:
max_story_size = min(70, max_story_size)
vocab_size = len(word_idx) +1# +1 for nil word
sentence_size = max(query_size, sentence_size) # for the position
sentence_size+=1
logging.info("Longest sentence length: "+ str( sentence_size))
logging.info("Longest story length: "+ str( max_story_size))
logging.info("Average story length: "+ str( mean_story_size))
logging.info("Training sample: "+ str(len(train)))
logging.info("Validation sample: "+ str(len(val)))
logging.info("Test sample: "+ str(len(test)))
logging.info("Vocab size : "+ str(vocab_size))
S, Q, A = vectorize_data(train, word_idx, sentence_size, max_story_size)
valS, valQ, valA = vectorize_data(val, word_idx, sentence_size, max_story_size)
testS, testQ, testA = vectorize_data(test, word_idx, sentence_size, max_story_size)
return {'train':{'S':S, 'Q':np.expand_dims(Q, axis=1), 'A':A},
'val':{'S':valS, 'Q':np.expand_dims(valQ, axis=1), 'A':valA},
'test':{'S':testS, 'Q':np.expand_dims(testQ, axis=1), 'A':testA},
'vocab':vocab,
'vocab_size':vocab_size,
'sent_len':sentence_size,
'sent_numb':max_story_size,
'word_idx':word_idx,
'len_training':len(train)}
def load_task(data_dir, task_id, only_supporting=False):
'''Load the nth task. There are 20 tasks in total.
Returns a tuple containing the training and testing data for the task.
'''
assert task_id > 0 and task_id < 21
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = 'qa{}_'.format(task_id)
train_file = [f for f in files if s in f and 'train' in f][0]
val_file = [f for f in files if s in f and 'valid.txt' in f][0]
test_file = [f for f in files if s in f and 'test' in f][0]
train_data = get_stories(train_file, only_supporting)
val_data = get_stories(val_file, only_supporting)
test_data = get_stories(test_file, only_supporting)
return train_data, val_data, test_data
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbI tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = str.lower(line)
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line: # question
q, a, supporting = line.split('\t')
q = tokenize(q)
#a = tokenize(a)
# answer is one vocab word even if it's actually multiple words
a = [a]
substory = None
# remove question marks
if q[-1] == "?":
q = q[:-1]
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else: # regular sentence
# remove periods
sent = tokenize(line)
if sent[-1] == ".":
sent = sent[:-1]
story.append(sent)
return data
def get_stories(f, only_supporting=False):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_stories(f.readlines(), only_supporting=only_supporting)
def vectorize_data(data, word_idx, sentence_size, memory_size):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
S = []
Q = []
A = []
for story, query, answer in data:
ss = []
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
ss = ss[::-1][:memory_size][::-1]
# Make the last word of each sentence the time 'word' which
# corresponds to vector of lookup table
#for i in range(len(ss)):
# ss[i][-1] = len(word_idx) - memory_size - i + len(ss)
# pad to memory_size
lm = max(0, memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] for w in query] + [0] * lq
# y = np.zeros(len(word_idx) + 1) # 0 is reserved for nil word
# print(answer)
# for a in answer:
# y[word_idx[a]] = 1
S.append(ss)
Q.append(q)
A.append(word_idx[answer[0]])
return np.array(S), np.array(Q), np.array(A)
def gen_embeddings(word_dict, dim, in_file=None, init=None):
"""
Generate an initial embedding matrix for `word_dict`.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
num_words = max(word_dict.values()) +1
# embeddings = np.zeros((num_words, dim))
embeddings = np.random.standard_normal(size=(num_words, dim))
logging.info('Embeddings: %d x %d' % (num_words, dim))
if in_file is not None:
logging.info('Loading embedding file: %s' % in_file)
pre_trained = 0
for line in open(in_file).readlines():
sp = line.split()
assert len(sp) == dim + 1
if sp[0] in word_dict:
pre_trained += 1
embeddings[word_dict[sp[0]]] = [float(x) for x in sp[1:]]
logging.info('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / num_words))
return embeddings
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
# mean = tf.reduce_mean(var)
# tf.scalar_summary('mean/' + name, mean)
# with tf.name_scope('stddev'):
# stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
# tf.scalar_summary('stddev/' + name, stddev)
# tf.scalar_summary('max/' + name, tf.reduce_max(var))
# tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
|
{
"content_hash": "0aa36501585ceadf501f7b736d0bd4e7",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 114,
"avg_line_length": 37.31578947368421,
"alnum_prop": 0.579364218292286,
"repo_name": "andreamad8/QDREN",
"id": "5db8cef90ee691c29fadcc67224bbad81b6fe03f",
"size": "9217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bAbI/src/utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122584"
}
],
"symlink_target": ""
}
|
from functools import update_wrapper
from collections import Iterable
__author__ = 'y.gavenchuk aka murminathor'
__all__ = ['data_provider', ]
def _get_data_source(data_set_source):
if callable(data_set_source):
data_source = data_set_source()
elif isinstance(data_set_source, Iterable):
data_source = data_set_source
else:
raise TypeError(
"The '{0}' is unsupported type of data set source!".format(
type(data_set_source)
)
)
if not data_source:
raise ValueError("There's no data in current data set!")
return data_source
def data_provider(data_set_source):
"""
Data provider decorator, allows another callable to provide the data
for the test. If data_set_source is empty or (if callable) return
empty sequence will be raise ValueError exception
:param collections.Iterable | callable data_set_source: data source
:raises: TypeError|ValueError
"""
def test_decorator(fn):
# next if statement added 'cause MagicMock in python2 raises
# AttributeError.
# See https://code.google.com/p/mock/issues/detail?id=67
if not hasattr(fn, '__name__'):
setattr(fn, '__name__', str(fn))
def repl(self, *args):
# The setUp method has been called already.
# And the tearDown cannot be called after last iteration
# The next code solves this contradiction
def _set_up():
"""
Replace local setUp function to the original TestCase's
instance
"""
repl._setUp = self.setUp
def _tear_down():
"""
Replace local tearDown function to the original TestCase's
instance
"""
repl._tearDown = self.tearDown
repl._setUp = _set_up
repl._tearDown = _tear_down
data_source = _get_data_source(data_set_source)
step = 0
for i in data_source:
repl._tearDown()
repl._setUp()
try:
fn(self, *i)
step += 1
except AssertionError:
msg_tpl = "Step #{step}. Assertion error caught with " \
"data set {data_set}"
print(msg_tpl.format(step=step, data_set=i))
raise
return update_wrapper(repl, fn)
return test_decorator
|
{
"content_hash": "66f5734d514f5c7a35496c2e6265bcd8",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 76,
"avg_line_length": 31.475609756097562,
"alnum_prop": 0.5393258426966292,
"repo_name": "ygavenchuk/test-tools",
"id": "fd298f9cd0393b8117ef7f720f70b26bc9efd897",
"size": "3204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_tools/dprovider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14356"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payments', '0002_auto_20160718_2345'),
]
operations = [
migrations.CreateModel(
name='VitepayPayment',
fields=[
('payment_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='payments.Payment')),
('language_code', models.CharField(default=b'en', max_length=10)),
('currency_code', models.CharField(default=b'XOF', max_length=10)),
('country_code', models.CharField(default=b'ML', max_length=10)),
('order_id', models.CharField(max_length=10, null=True)),
('description', models.CharField(max_length=500, null=True)),
('amount_100', models.IntegerField(null=True)),
('buyer_ip_adress', models.CharField(max_length=200, null=True)),
('return_url', models.CharField(max_length=500, null=True)),
('decline_url', models.CharField(max_length=500, null=True)),
('cancel_url', models.CharField(max_length=500, null=True)),
('callback_url', models.CharField(max_length=500, null=True)),
('email', models.CharField(max_length=500, null=True)),
('p_type', models.CharField(default=b'orange_money', max_length=500)),
('payment_url', models.CharField(max_length=500, null=True)),
],
options={
'ordering': ('-created', '-updated'),
'verbose_name': 'Vitepay Payment',
'verbose_name_plural': 'Vitepay Payments',
},
bases=('payments.payment',),
),
]
|
{
"content_hash": "525dde0f253272b38d3e6625eaef1232",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 194,
"avg_line_length": 45.833333333333336,
"alnum_prop": 0.5750649350649351,
"repo_name": "onepercentclub/bluebottle",
"id": "f709e9abdda917983a56be8d0236370b06343eef",
"size": "1997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/payments_vitepay/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
from var_plots import plot_input
plot_input()
|
{
"content_hash": "5efabdc020261e9c4a016b8baa8dcca6",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 15.666666666666666,
"alnum_prop": 0.7659574468085106,
"repo_name": "josef-pkt/statsmodels",
"id": "969f238eb309b780d9a944391f6f9230b5bf48db",
"size": "47",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "docs/source/plots/var_plot_input.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14428857"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25322"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
}
|
"""Tests that show Distribute Coordinator works with Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import sys
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import estimator_training as dc_training
from tensorflow.python.distribute.distribute_config import DistributeConfig
from tensorflow.python.eager import context
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.estimator import training as estimator_training
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export as export_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import server_lib
BATCH_SIZE = 10
LABEL_DIMENSION = 2
DATA = np.linspace(
0., 2., BATCH_SIZE * LABEL_DIMENSION, dtype=np.float32).reshape(
BATCH_SIZE, LABEL_DIMENSION)
EVAL_NAME = "foo"
EXPORTER_NAME = "saved_model_exporter"
MAX_STEPS = 10
CHIEF = dc._TaskType.CHIEF
EVALUATOR = dc._TaskType.EVALUATOR
WORKER = dc._TaskType.WORKER
PS = dc._TaskType.PS
original_run_distribute_coordinator = dc.run_distribute_coordinator
# TODO(yuefengz): merge this method back to test_util.
def _create_local_cluster(num_workers,
num_ps,
has_eval=False,
protocol="grpc",
worker_config=None,
ps_config=None):
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
if has_eval:
cluster_dict["evaluator"] = ["localhost:%s" % portpicker.pick_unused_port()]
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
if has_eval:
evals = [
server_lib.Server(
cs,
job_name="evaluator",
protocol=protocol,
task_index=0,
config=worker_config,
start=True)
]
else:
evals = []
return workers, ps_servers, evals
def _create_in_process_cluster(num_workers, num_ps, has_eval=False):
"""Create an in-process cluster that consists of only standard server."""
# Leave some memory for cuda runtime.
if has_eval:
gpu_mem_frac = 0.7 / (num_workers + 1)
else:
gpu_mem_frac = 0.7 / num_workers
worker_config = config_pb2.ConfigProto()
worker_config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_frac
# Enable collective ops which has no impact on non-collective ops.
# TODO(yuefengz, tucker): removing this after we move the initialization of
# collective mgr to the session level.
worker_config.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
ps_config = config_pb2.ConfigProto()
ps_config.device_count["GPU"] = 0
return _create_local_cluster(
num_workers,
num_ps=num_ps,
has_eval=has_eval,
worker_config=worker_config,
ps_config=ps_config,
protocol="grpc")
def _create_cluster_spec(has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec[CHIEF] = ["localhost:%s" % portpicker.pick_unused_port()]
if num_workers:
cluster_spec[WORKER] = [
"localhost:%s" % portpicker.pick_unused_port()
for _ in range(num_workers)
]
if num_ps:
cluster_spec[PS] = [
"localhost:%s" % portpicker.pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec[EVALUATOR] = ["localhost:%s" % portpicker.pick_unused_port()]
return cluster_spec
def _bytes_to_str(maybe_bytes):
if isinstance(maybe_bytes, six.string_types):
return maybe_bytes
else:
return str(maybe_bytes, "utf-8")
def _strip_protocol(target):
# cluster_spec expects "host:port" strings.
if "//" in target:
return target.split("//")[1]
else:
return target
class DistributeCoordinatorIntegrationTest(test.TestCase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers."""
cls._workers, cls._ps, cls._evals = _create_in_process_cluster(
num_workers=3, num_ps=2, has_eval=True)
cls._cluster_spec = {
"worker": [
_strip_protocol(_bytes_to_str(w.target)) for w in cls._workers
],
"ps": [_strip_protocol(_bytes_to_str(ps.target)) for ps in cls._ps],
"evaluator": [
_strip_protocol(_bytes_to_str(e.target)) for e in cls._evals
]
}
def setUp(self):
self._model_dir = tempfile.mkdtemp()
self._event = threading.Event()
super(DistributeCoordinatorIntegrationTest, self).setUp()
def dataset_input_fn(self, x, y, batch_size, shuffle):
def input_fn():
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
if shuffle:
dataset = dataset.shuffle(batch_size)
dataset = dataset.repeat(100).batch(batch_size)
return dataset
return input_fn
def _get_exporter(self, name, fc):
feature_spec = feature_column.make_parse_example_spec(fc)
serving_input_receiver_fn = (
export_lib.build_parsing_serving_input_receiver_fn(feature_spec))
return exporter_lib.LatestExporter(
name, serving_input_receiver_fn=serving_input_receiver_fn)
def _extract_loss_and_global_step(self, event_folder):
"""Returns the loss and global step in last event."""
event_paths = glob.glob(os.path.join(event_folder, "events*"))
loss = None
global_step_count = None
for e in summary_iterator.summary_iterator(event_paths[-1]):
current_loss = None
for v in e.summary.value:
if v.tag == "loss":
current_loss = v.simple_value
# If loss is not found, global step is meaningless.
if current_loss is None:
continue
current_global_step = e.step
if global_step_count is None or current_global_step > global_step_count:
global_step_count = current_global_step
loss = current_loss
return (loss, global_step_count)
def _get_estimator(self,
train_distribute,
eval_distribute,
remote_cluster=None):
input_dimension = LABEL_DIMENSION
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
return dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=LABEL_DIMENSION,
model_dir=self._model_dir,
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=train_distribute,
eval_distribute=eval_distribute,
remote_cluster=remote_cluster)))
def _complete_flow(self,
train_distribute,
eval_distribute,
remote_cluster=None):
estimator = self._get_estimator(train_distribute, eval_distribute,
remote_cluster)
input_dimension = LABEL_DIMENSION
train_input_fn = self.dataset_input_fn(
x={"x": DATA},
y=DATA,
batch_size=BATCH_SIZE // len(train_distribute.worker_devices),
shuffle=True)
if eval_distribute:
eval_batch_size = BATCH_SIZE // len(eval_distribute.worker_devices)
else:
eval_batch_size = BATCH_SIZE
eval_input_fn = self.dataset_input_fn(
x={"x": DATA}, y=DATA, batch_size=eval_batch_size, shuffle=False)
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
feature_columns = linear_feature_columns + dnn_feature_columns
estimator_training.train_and_evaluate(
estimator,
estimator_training.TrainSpec(train_input_fn, max_steps=MAX_STEPS),
estimator_training.EvalSpec(
name=EVAL_NAME,
input_fn=eval_input_fn,
steps=None,
exporters=self._get_exporter(EXPORTER_NAME, feature_columns),
start_delay_secs=0,
throttle_secs=1))
return estimator
def _inspect_train_and_eval_events(self, estimator):
# Make sure nothing is stuck in limbo.
writer_cache.FileWriterCache.clear()
# Examine the training events. Use a range to check global step to avoid
# flakyness due to global step race condition.
training_loss, _ = self._extract_loss_and_global_step(self._model_dir)
self.assertIsNotNone(training_loss)
# Examine the eval events. The global step should be accurate.
eval_dir = os.path.join(self._model_dir, "eval_" + EVAL_NAME)
eval_loss, eval_global_step = self._extract_loss_and_global_step(
event_folder=eval_dir)
self.assertIsNotNone(eval_loss)
self.assertGreaterEqual(eval_global_step, MAX_STEPS)
# Examine the export folder.
export_dir = os.path.join(
os.path.join(self._model_dir, "export"), EXPORTER_NAME)
self.assertTrue(gfile.Exists(export_dir))
# Examine the ckpt for predict.
def predict_input_fn():
return dataset_ops.Dataset.from_tensor_slices({
"x": DATA
}).batch(BATCH_SIZE)
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((BATCH_SIZE, LABEL_DIMENSION), predicted_proba.shape)
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[
mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy
],
eval_distribute_cls=[
None, mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy
],
required_gpus=1))
def test_complete_flow_standalone_client(self, train_distribute_cls,
eval_distribute_cls):
try:
train_distribute = train_distribute_cls(num_gpus=context.num_gpus())
except TypeError:
train_distribute = train_distribute_cls(num_gpus_per_worker=2)
if eval_distribute_cls:
eval_distribute = eval_distribute_cls()
else:
eval_distribute = None
estimator = self._complete_flow(
train_distribute, eval_distribute, remote_cluster=self._cluster_spec)
self._inspect_train_and_eval_events(estimator)
def _mock_run_distribute_coordinator(
self,
worker_fn,
strategy,
eval_fn,
eval_strategy,
mode=dc.CoordinatorMode.STANDALONE_CLIENT,
cluster_spec=None,
session_config=None):
# Calls the origial `run_distribute_coordinator` method but gets task config
# from environment variables and then signals the caller.
task_type = None
task_id = None
if not cluster_spec:
cluster_spec = None
tf_config = json.loads(os.environ.get("TF_CONFIG", "{}"))
if not cluster_spec:
cluster_spec = tf_config.get("cluster", {})
task_env = tf_config.get("task", {})
if task_env:
task_type = task_env.get("type", task_type)
task_id = int(task_env.get("index", task_id))
self._event.set()
original_run_distribute_coordinator(
worker_fn,
strategy,
eval_fn,
eval_strategy,
mode=mode,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
session_config=session_config)
def _task_thread(self, train_distribute, eval_distribute):
with test.mock.patch.object(dc, "run_distribute_coordinator",
self._mock_run_distribute_coordinator):
self._complete_flow(train_distribute, eval_distribute)
def _run_task_in_thread(self, cluster_spec, task_type, task_id,
train_distribute, eval_distribute):
if task_type:
tf_config = {
"cluster": cluster_spec,
"task": {
"type": task_type,
"index": task_id
}
}
else:
tf_config = {
"cluster": cluster_spec,
"task": {
"type": task_type,
"index": task_id
}
}
self._event.clear()
t = threading.Thread(
target=self._task_thread, args=(train_distribute, eval_distribute))
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
t.start()
self._event.wait()
return t
def _run_multiple_tasks_in_threads(self, cluster_spec, train_distribute,
eval_distribute):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_task_in_thread(cluster_spec, task_type, task_id,
train_distribute, eval_distribute)
threads[task_type].append(t)
return threads
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[
parameter_server_strategy.ParameterServerStrategy,
],
eval_distribute_cls=[
None, mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy
],
required_gpus=1))
def test_complete_flow_indepedent_worker_between_graph(
self, train_distribute_cls, eval_distribute_cls):
train_distribute = train_distribute_cls(
num_gpus_per_worker=context.num_gpus())
if eval_distribute_cls:
eval_distribute = eval_distribute_cls()
else:
eval_distribute = None
cluster_spec = _create_cluster_spec(num_workers=3, num_ps=2, has_eval=True)
threads = self._run_multiple_tasks_in_threads(
cluster_spec, train_distribute, eval_distribute)
for task_type, ts in threads.items():
if task_type == PS:
continue
for t in ts:
t.join()
estimator = self._get_estimator(train_distribute, eval_distribute)
self._inspect_train_and_eval_events(estimator)
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[mirrored_strategy.MirroredStrategy],
eval_distribute_cls=[None, mirrored_strategy.MirroredStrategy],
required_gpus=1))
def test_complete_flow_indepedent_worker_in_graph(self, train_distribute_cls,
eval_distribute_cls):
train_distribute = train_distribute_cls(num_gpus=context.num_gpus())
if eval_distribute_cls:
eval_distribute = eval_distribute_cls()
else:
eval_distribute = None
cluster_spec = _create_cluster_spec(num_workers=3, num_ps=2, has_eval=True)
threads = self._run_multiple_tasks_in_threads(
cluster_spec, train_distribute, eval_distribute)
threads[WORKER][0].join()
threads[EVALUATOR][0].join()
estimator = self._get_estimator(train_distribute, eval_distribute)
self._inspect_train_and_eval_events(estimator)
TF_CONFIG_WITH_CHIEF = {
"cluster": {
"chief": ["fake_chief"],
},
"task": {
"type": "chief",
"index": 0
}
}
TF_CONFIG_WITH_MASTER = {
"cluster": {
"master": ["fake_master"],
},
"task": {
"type": "master",
"index": 0
}
}
TF_CONFIG_WITHOUT_TASK = {"cluster": {"chief": ["fake_worker"]}}
class RunConfigTest(test.TestCase):
def test_previously_unexpected_cluster_spec(self):
with test.mock.patch.dict(
"os.environ", {"TF_CONFIG": json.dumps(TF_CONFIG_WITHOUT_TASK)}):
run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
def test_should_run_distribute_coordinator(self):
"""Tests that should_run_distribute_coordinator return a correct value."""
# We don't use distribute coordinator for local training.
self.assertFalse(
dc_training.should_run_distribute_coordinator(
run_config_lib.RunConfig()))
# When `train_distribute` is not specified, don't use distribute
# coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
self.assertFalse(
dc_training.should_run_distribute_coordinator(
run_config_lib.RunConfig()))
# When `train_distribute` is specified and TF_CONFIG is detected, use
# distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config_with_train_distribute = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
config_with_eval_distribute = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
eval_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
self.assertTrue(
dc_training.should_run_distribute_coordinator(
config_with_train_distribute))
self.assertFalse(
dc_training.should_run_distribute_coordinator(
config_with_eval_distribute))
# With a master in the cluster, don't run distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_MASTER)}):
config = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
self.assertFalse(dc_training.should_run_distribute_coordinator(config))
def test_init_run_config_duplicate_distribute(self):
with self.assertRaises(ValueError):
run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy()))
with self.assertRaises(ValueError):
run_config_lib.RunConfig(
eval_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
eval_distribute=mirrored_strategy.MirroredStrategy()))
def test_init_run_config_none_distribute_coordinator_mode(self):
# We don't use distribute coordinator for local training.
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
dc_training.init_run_config(config, {})
self.assertIsNone(config._distribute_coordinator_mode)
# With a master in the cluster, don't run distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_MASTER)}):
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
self.assertIsNone(config._distribute_coordinator_mode)
# When `train_distribute` is not specified, don't use distribute
# coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config = run_config_lib.RunConfig()
self.assertFalse(hasattr(config, "_distribute_coordinator_mode"))
def test_init_run_config_independent_worker(self):
# When `train_distribute` is specified and TF_CONFIG is detected, use
# distribute coordinator with INDEPENDENT_WORKER mode.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
self.assertEqual(config._distribute_coordinator_mode,
dc.CoordinatorMode.INDEPENDENT_WORKER)
def test_init_run_config_standalone_client(self):
# When `train_distribute` is specified, TF_CONFIG is detected and
# `experimental.remote_cluster` is set use distribute coordinator with
# STANDALONE_CLIENT mode.
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
remote_cluster={"chief": ["fake_worker"]}))
self.assertEqual(config._distribute_coordinator_mode,
dc.CoordinatorMode.STANDALONE_CLIENT)
if __name__ == "__main__":
with test.mock.patch.object(sys, "exit", os._exit):
test.main()
|
{
"content_hash": "56d7fb4cf57b26e9f65bbc52e83ee721",
"timestamp": "",
"source": "github",
"line_count": 645,
"max_line_length": 80,
"avg_line_length": 35.674418604651166,
"alnum_prop": 0.6452846588439809,
"repo_name": "AnishShah/tensorflow",
"id": "5348512016efc504f92e5a956d627698b93b209a",
"size": "23699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/estimator_training_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "337393"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48452986"
},
{
"name": "CMake",
"bytes": "195768"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1210238"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834103"
},
{
"name": "Jupyter Notebook",
"bytes": "2584246"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40782103"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "458367"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from django import forms
import voxel_globe.meta.models as models
class CreateSiteForm(forms.Form):
name = forms.CharField(label="Site Name")
south_d = forms.FloatField(label="South Latitude", help_text="degrees")
west_d = forms.FloatField(label="West Longitude", help_text="degrees")
bottom_d = forms.FloatField(label="Bottom Altitude", help_text="meters")
north_d = forms.FloatField(label="North Latitude", help_text="degrees")
east_d = forms.FloatField(label="East Longitude", help_text="degrees")
top_d = forms.FloatField(label="Top Altitude", help_text="meters")
south_d.widget.attrs['class'] = 'bbox degree'
west_d.widget.attrs['class'] = 'bbox degree'
bottom_d.widget.attrs['class'] = 'bbox degree'
north_d.widget.attrs['class'] = 'bbox degree'
east_d.widget.attrs['class'] = 'bbox degree'
top_d.widget.attrs['class'] = 'bbox degree'
|
{
"content_hash": "ecbe2f505cdbd3737d79e773f5f9411d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 45.73684210526316,
"alnum_prop": 0.714614499424626,
"repo_name": "VisionSystemsInc/voxel_globe",
"id": "3a908809f8916bde1c83ae449b5bd96efa7e75fa",
"size": "869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "voxel_globe/create_site/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10038"
},
{
"name": "HTML",
"bytes": "124988"
},
{
"name": "JavaScript",
"bytes": "296605"
},
{
"name": "Nginx",
"bytes": "2623"
},
{
"name": "Python",
"bytes": "377549"
},
{
"name": "Shell",
"bytes": "100713"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('admin_ddjj_app', '0024_auto_20150713_1554'),
]
operations = [
migrations.AlterField(
model_name='ddjj',
name='obs',
field=models.TextField(verbose_name='observaciones p\xfablicas', blank=True),
preserve_default=True,
),
]
|
{
"content_hash": "a83e82733314a2a124bc742b6fe7a340",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 89,
"avg_line_length": 23.894736842105264,
"alnum_prop": 0.6035242290748899,
"repo_name": "lanacioncom/ddjj_admin_lanacion",
"id": "a976274464b464a155141055cd58bc145cbef531",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin_ddjj_app/migrations/0025_auto_20150715_2023.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159"
},
{
"name": "HTML",
"bytes": "40878"
},
{
"name": "JavaScript",
"bytes": "8464"
},
{
"name": "Python",
"bytes": "115348"
},
{
"name": "Shell",
"bytes": "1344"
}
],
"symlink_target": ""
}
|
from tempest.api.compute import base
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
@classmethod
def setUpClass(cls):
# A network and a subnet will be created for these tests
cls.set_network_resources(network=True, subnet=True)
super(BaseSecurityGroupsTest, cls).setUpClass()
|
{
"content_hash": "7a7a58f4cbd1c1bc0f4964b346b7b7df",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 31.8,
"alnum_prop": 0.7327044025157232,
"repo_name": "ntymtsiv/tempest",
"id": "6838ce1e324aaff1fd37dc0a3142f1e874207623",
"size": "954",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/api/compute/security_groups/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2312198"
},
{
"name": "Shell",
"bytes": "9160"
}
],
"symlink_target": ""
}
|
import json
import webob
from lxml import etree
from nova.api.openstack import wsgi
from nova.api.openstack.compute.contrib import keypairs
from nova import db
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
def fake_keypair(name):
return {'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
pass
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_get(context, user_id, name):
pass
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list(self):
req = webob.Request.blank('/v2/fake/os-keypairs')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
response = {'keypairs': [{'keypair': fake_keypair('FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_invalid_name(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertFalse('private_key' in res_dict['keypair'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_get", db_key_pair_get)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeyPairNotFound()
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(fakes.wsgi_app())
print res
self.assertEqual(res.status_int, 404)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
|
{
"content_hash": "ac0bae404c5424ac323817ee41c3742a",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 77,
"avg_line_length": 36.47659574468085,
"alnum_prop": 0.5748950069995333,
"repo_name": "gyang/nova",
"id": "fa962dd15eb66a38ce1080ad05a0781b6bea01a6",
"size": "9200",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/contrib/test_keypairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Tests for the compute pollsters.
"""
import mock
import time
from ceilometer.compute import pollsters
from ceilometer.compute import manager
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.tests import base as test_base
class TestPollsterBase(test_base.TestCase):
def setUp(self):
super(TestPollsterBase, self).setUp()
self.mox.StubOutWithMock(virt_inspector, 'get_hypervisor_inspector')
self.inspector = self.mox.CreateMock(virt_inspector.Inspector)
virt_inspector.get_hypervisor_inspector().AndReturn(self.inspector)
self.instance = mock.MagicMock()
self.instance.name = 'instance-00000001'
setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name',
self.instance.name)
self.instance.id = 1
self.instance.flavor = {'name': 'm1.small', 'id': 2}
class TestInstancePollster(TestPollsterBase):
def setUp(self):
super(TestInstancePollster, self).setUp()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_counters(self):
self.mox.ReplayAll()
mgr = manager.AgentManager()
pollster = pollsters.InstancePollster()
counters = list(pollster.get_counters(mgr, self.instance))
self.assertEquals(len(counters), 2)
self.assertEqual(counters[0].name, 'instance')
self.assertEqual(counters[1].name, 'instance:m1.small')
class TestDiskIOPollster(TestPollsterBase):
def setUp(self):
super(TestDiskIOPollster, self).setUp()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_counters(self):
disks = [
(virt_inspector.Disk(device='vda'),
virt_inspector.DiskStats(read_bytes=1L, read_requests=2L,
write_bytes=3L, write_requests=4L,
errors=-1L))
]
self.inspector.inspect_disks(self.instance.name).AndReturn(disks)
self.mox.ReplayAll()
mgr = manager.AgentManager()
pollster = pollsters.DiskIOPollster()
counters = list(pollster.get_counters(mgr, self.instance))
assert counters
self.assertEqual(set([c.name for c in counters]),
set(pollster.get_counter_names()))
def _verify_disk_metering(name, expected_volume):
match = [c for c in counters if c.name == name]
self.assertEquals(len(match), 1, 'missing counter %s' % name)
self.assertEquals(match[0].volume, expected_volume)
self.assertEquals(match[0].type, 'cumulative')
_verify_disk_metering('disk.read.requests', 2L)
_verify_disk_metering('disk.read.bytes', 1L)
_verify_disk_metering('disk.write.requests', 4L)
_verify_disk_metering('disk.write.bytes', 3L)
class TestNetPollster(TestPollsterBase):
def setUp(self):
super(TestNetPollster, self).setUp()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_counters(self):
vnic0 = virt_inspector.Interface(
name='vnet0',
fref='fa163e71ec6e',
mac='fa:16:3e:71:ec:6d',
parameters=dict(ip='10.0.0.2',
projmask='255.255.255.0',
projnet='proj1',
dhcp_server='10.0.0.1'))
stats0 = virt_inspector.InterfaceStats(rx_bytes=1L, rx_packets=2L,
tx_bytes=3L, tx_packets=4L)
vnic1 = virt_inspector.Interface(
name='vnet1',
fref='fa163e71ec6f',
mac='fa:16:3e:71:ec:6e',
parameters=dict(ip='192.168.0.3',
projmask='255.255.255.0',
projnet='proj2',
dhcp_server='10.0.0.2'))
stats1 = virt_inspector.InterfaceStats(rx_bytes=5L, rx_packets=6L,
tx_bytes=7L, tx_packets=8L)
vnics = [(vnic0, stats0), (vnic1, stats1)]
self.inspector.inspect_vnics(self.instance.name).AndReturn(vnics)
self.mox.ReplayAll()
mgr = manager.AgentManager()
pollster = pollsters.NetPollster()
counters = list(pollster.get_counters(mgr, self.instance))
assert counters
self.assertEqual(set([c.name for c in counters]),
set(pollster.get_counter_names()))
def _verify_vnic_metering(name, ip, expected_volume):
match = [c for c in counters if c.name == name and
c.resource_metadata['parameters']['ip'] == ip]
self.assertEquals(len(match), 1, 'missing counter %s' % name)
self.assertEquals(match[0].volume, expected_volume)
self.assertEquals(match[0].type, 'cumulative')
_verify_vnic_metering('network.incoming.bytes', '10.0.0.2', 1L)
_verify_vnic_metering('network.incoming.bytes', '192.168.0.3', 5L)
_verify_vnic_metering('network.outgoing.bytes', '10.0.0.2', 3L)
_verify_vnic_metering('network.outgoing.bytes', '192.168.0.3', 7L)
_verify_vnic_metering('network.incoming.packets', '10.0.0.2', 2L)
_verify_vnic_metering('network.incoming.packets', '192.168.0.3', 6L)
_verify_vnic_metering('network.outgoing.packets', '10.0.0.2', 4L)
_verify_vnic_metering('network.outgoing.packets', '192.168.0.3', 8L)
class TestCPUPollster(TestPollsterBase):
def setUp(self):
super(TestCPUPollster, self).setUp()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_counters(self):
self.inspector.inspect_cpus(self.instance.name).AndReturn(
virt_inspector.CPUStats(time=1 * (10 ** 6), number=2))
self.inspector.inspect_cpus(self.instance.name).AndReturn(
virt_inspector.CPUStats(time=3 * (10 ** 6), number=2))
# cpu_time resets on instance restart
self.inspector.inspect_cpus(self.instance.name).AndReturn(
virt_inspector.CPUStats(time=2 * (10 ** 6), number=2))
self.mox.ReplayAll()
mgr = manager.AgentManager()
pollster = pollsters.CPUPollster()
def _verify_cpu_metering(zero, expected_time):
counters = list(pollster.get_counters(mgr, self.instance))
self.assertEquals(len(counters), 2)
self.assertEqual(set([c.name for c in counters]),
set(pollster.get_counter_names()))
assert counters[0].name == 'cpu_util'
assert (counters[0].volume == 0.0 if zero else
counters[0].volume > 0.0)
assert counters[1].name == 'cpu'
assert counters[1].volume == expected_time
# ensure elapsed time between polling cycles is non-zero
time.sleep(0.001)
_verify_cpu_metering(True, 1 * (10 ** 6))
_verify_cpu_metering(False, 3 * (10 ** 6))
_verify_cpu_metering(False, 2 * (10 ** 6))
|
{
"content_hash": "709508d914694077ee080bed54eca84e",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 76,
"avg_line_length": 41.36257309941521,
"alnum_prop": 0.6000282765446062,
"repo_name": "dreamhost/ceilometer",
"id": "34518546d1d77fb20975f4107ab3b40b44cdf006",
"size": "7825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/compute/test_pollsters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "368517"
},
{
"name": "Python",
"bytes": "993129"
}
],
"symlink_target": ""
}
|
import sys
import six
AUTH_TOKEN = "foobar"
AUTH_URL = "http://0.0.0.0"
class FakeStdout(object):
def __init__(self):
self.content = []
def write(self, text):
self.content.append(text)
def make_string(self):
result = ''
for line in self.content:
result = result + line
return result
class FakeApp(object):
def __init__(self, _stdout):
self.stdout = _stdout
self.client_manager = None
self.stdin = sys.stdin
self.stdout = _stdout or sys.stdout
self.stderr = sys.stderr
self.restapi = None
class FakeClientManager(object):
def __init__(self):
self.compute = None
self.identity = None
self.image = None
self.object = None
self.volume = None
self.network = None
self.auth_ref = None
class FakeModule(object):
def __init__(self, name, version):
self.name = name
self.__version__ = version
class FakeResource(object):
def __init__(self, manager, info, loaded=False):
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def _add_details(self, info):
for (k, v) in six.iteritems(info):
setattr(self, k, v)
def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
|
{
"content_hash": "b58b7c22a355ed0adec92aaf4c69229f",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 76,
"avg_line_length": 24,
"alnum_prop": 0.5435606060606061,
"repo_name": "openstack/python-congressclient",
"id": "94184276c65a68ca871bdb93c3e1cf3df2684e34",
"size": "2183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congressclient/tests/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "195"
},
{
"name": "Python",
"bytes": "126233"
}
],
"symlink_target": ""
}
|
import requests
import discord
def getSupportedCurrencies():
return {"AED": "United Arab Emirates Dirham", "AFN": "Afghan Afghani", "ALL": "Albanian Lek", "AMD": "Armenian Dram",
"ANG": "Netherlands Antillean Guilder", "AOA": "Angolan Kwanza", "ARS": "Argentine Peso", "AUD": "Australian Dollar",
"AWG": "Aruban Florin", "AZN": "Azerbaijani Manat", "BAM": "Bosnia-Herzegovina Convertible Mark", "BBD": "Barbadian Dollar",
"BDT": "Bangladeshi Taka", "BGN": "Bulgarian Lev", "BHD": "Bahraini Dinar", "BIF": "Burundian Franc", "BMD": "Bermudan Dollar",
"BND": "Brunei Dollar", "BOB": "Bolivian Boliviano", "BRL": "Brazilian Real", "BSD": "Bahamian Dollar", "BTC": "Bitcoin",
"BTN": "Bhutanese Ngultrum", "BWP": "Botswanan Pula", "BYR": "Belarusian Ruble", "BZD": "Belize Dollar", "CAD": "Canadian Dollar",
"CDF": "Congolese Franc", "CHF": "Swiss Franc", "CLF": "Chilean Unit of Account (UF)", "CLP": "Chilean Peso", "CNY": "Chinese Yuan",
"COP": "Colombian Peso", "CRC": "Costa Rican Col\u00f3n", "CUP": "Cuban Peso", "CVE": "Cape Verdean Escudo", "CZK": "Czech Republic Koruna",
"DJF": "Djiboutian Franc", "DKK": "Danish Krone", "DOP": "Dominican Peso", "DZD": "Algerian Dinar", "EEK": "Estonian Kroon",
"EGP": "Egyptian Pound", "ERN": "Eritrean Nnakfa", "ETB": "Ethiopian Birr", "EUR": "Euro", "FJD": "Fijian Dollar",
"FKP": "Falkland Islands Pound", "GBP": "British Pound Sterling", "GEL": "Georgian Lari", "GHS": "Ghanaian Cedi", "GIP": "Gibraltar Pound",
"GMD": "Gambian Dalasi", "GNF": "Guinean Franc", "GTQ": "Guatemalan Quetzal", "GYD": "Guyanaese Dollar", "HKD": "Hong Kong Dollar",
"HNL": "Honduran Lempira", "HRK": "Croatian Kuna", "HTG": "Haitian Gourde", "HUF": "Hungarian Forint", "IDR": "Indonesian Rupiah",
"ILS": "Israeli New Sheqel", "INR": "Indian Rupee", "IQD": "Iraqi Dinar", "IRR": "Iranian Rial", "ISK": "Icelandic Kr\u00f3na",
"JEP": "Jersey Pound", "JMD": "Jamaican Dollar", "JOD": "Jordanian Dinar", "JPY": "Japanese Yen", "KES": "Kenyan Shilling",
"KGS": "Kyrgystani Som", "KHR": "Cambodian Riel", "KMF": "Comorian Franc", "KPW": "North Korean Won", "KRW": "South Korean Won",
"KWD": "Kuwaiti Dinar", "KYD": "Cayman Islands Dollar", "KZT": "Kazakhstani Tenge", "LAK": "Laotian Kip", "LBP": "Lebanese Pound",
"LKR": "Sri Lankan Rupee", "LRD": "Liberian Dollar", "LSL": "Lesotho Loti", "LTL": "Lithuanian Litas", "LVL": "Latvian Lats",
"LYD": "Libyan Dinar", "MAD": "Moroccan Dirham", "MDL": "Moldovan Leu", "MGA": "Malagasy Ariary", "MKD": "Macedonian Denar",
"MMK": "Myanma Kyat", "MNT": "Mongolian Tugrik", "MOP": "Macanese Pataca", "MRO": "Mauritanian Ouguiya", "MTL": "Maltese Lira",
"MUR": "Mauritian Rupee", "MVR": "Maldivian Rufiyaa", "MWK": "Malawian Kwacha", "MXN": "Mexican Peso", "MYR": "Malaysian Ringgit",
"MZN": "Mozambican Metical", "NAD": "Namibian Dollar", "NGN": "Nigerian Naira", "NIO": "Nicaraguan C\u00f3rdoba", "NOK": "Norwegian Krone",
"NPR": "Nepalese Rupee", "NZD": "New Zealand Dollar", "OMR": "Omani Rial", "PAB": "Panamanian Balboa", "PEN": "Peruvian Nuevo Sol",
"PGK": "Papua New Guinean Kina", "PHP": "Philippine Peso", "PKR": "Pakistani Rupee", "PLN": "Polish Zloty", "PYG": "Paraguayan Guarani",
"QAR": "Qatari Rial", "RON": "Romanian Leu", "RSD": "Serbian Dinar", "RUB": "Russian Ruble", "RWF": "Rwandan Franc", "SAR": "Saudi Riyal",
"SBD": "Solomon Islands Dollar", "SCR": "Seychellois Rupee", "SDG": "Sudanese Pound", "SEK": "Swedish Krona", "SGD": "Singapore Dollar",
"SHP": "Saint Helena Pound", "SLL": "Sierra Leonean Leone", "SOS": "Somali Shilling", "SRD": "Surinamese Dollar",
"STD": "S\u00e3o Tom\u00e9 and Pr\u00edncipe Dobra", "SVC": "Salvadoran Col\u00f3n", "SYP": "Syrian Pound", "SZL": "Swazi Lilangeni",
"THB": "Thai Baht", "TJS": "Tajikistani Somoni", "TMT": "Turkmenistani Manat", "TND": "Tunisian Dinar", "TOP": "Tongan Pa?anga",
"TRY": "Turkish Lira", "TTD": "Trinidad and Tobago Dollar", "TWD": "New Taiwan Dollar", "TZS": "Tanzanian Shilling", "UAH": "Ukrainian Hryvnia",
"UGX": "Ugandan Shilling", "USD": "United States Dollar", "UYU": "Uruguayan Peso", "UZS": "Uzbekistan Som", "VEF": "Venezuelan Bol\u00edvar Fuerte",
"VND": "Vietnamese Dong", "VUV": "Vanuatu Vatu", "WST": "Samoan Tala", "XAF": "CFA Franc BEAC", "XAG": "Silver (troy ounce)",
"XAU": "Gold (troy ounce)", "XBT": "Bitcoin", "XCD": "East Caribbean Dollar", "XDR": "Special Drawing Rights", "XOF": "CFA Franc BCEAO",
"XPF": "CFP Franc", "YER": "Yemeni Rial", "ZAR": "South African Rand", "ZMK": "Zambian Kwacha (pre-2013)", "ZMW": "Zambian Kwacha",
"ZWL": "Zimbabwean Dollar"}
# Returns ticker data from the Coindesk BPI for the given currency pair
def getTickerData(currency):
url = "http://api.coindesk.com/v1/bpi/currentprice/" + currency + ".json"
header = {"User-Agent": "Mozilla/5.0"}
currencies = getSupportedCurrencies()
if currency in currencies:
ticker = requests.get(url, headers = header)
if ticker.status_code == 200:
ticker = ticker.json()
return ticker["bpi"][currency]
else:
return None
# Returns formatted market data for the bot to send
def getTickerMessage(ticker):
header = "Bitcoin (" + ticker["code"] + " - " + ticker["description"] + ") - CoinDesk Bitcoin Price Index"
data = "Current Price: `" + "{:.2f}".format(ticker["rate_float"]) + " " + ticker["code"] + "`"
embed = discord.Embed(title = header, description = data, color = 0xFF9900)
embed.set_footer(text = "For more information about Satoshi, type +help")
return embed
|
{
"content_hash": "b511d9cfae144db8a520d5f85ca04d25",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 153,
"avg_line_length": 89.87301587301587,
"alnum_prop": 0.6305192511480042,
"repo_name": "cmsart/Satoshi",
"id": "5cf1f11875144c9f1bb35efd06d7cbb7ec0ec63c",
"size": "5662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coindesk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17214"
}
],
"symlink_target": ""
}
|
import base64
import hashlib
import logging
import os
import random
import re
import sys
import time
import traceback
from functools import lru_cache, wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
cast,
)
from django.conf import settings
from django.core.cache import cache as djcache
from django.core.cache import caches
from django.core.cache.backends.base import BaseCache
from django.db.models import Q
from django.http import HttpRequest
from zerver.lib.utils import make_safe_digest, statsd, statsd_key
if TYPE_CHECKING:
# These modules have to be imported for type annotations but
# they cannot be imported at runtime due to cyclic dependency.
from zerver.models import Message, Realm, UserProfile
MEMCACHED_MAX_KEY_LENGTH = 250
FuncT = TypeVar('FuncT', bound=Callable[..., object])
logger = logging.getLogger()
class NotFoundInCache(Exception):
pass
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time() -> float:
return remote_cache_total_time
def get_remote_cache_requests() -> int:
return remote_cache_total_requests
def remote_cache_stats_start() -> None:
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish() -> None:
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix() -> str:
if settings.CASPER_TESTS:
# This sets the prefix for the benefit of the Casper tests.
#
# Having a fixed key is OK since we don't support running
# multiple copies of the casper tests at the same time anyway.
return 'casper_tests:'
elif settings.TEST_SUITE:
# The Python tests overwrite KEY_PREFIX on each test, but use
# this codepath as well, just to save running the more complex
# code below for reading the normal key prefix.
return 'django_tests_unused:'
# directory `var` should exist in production
os.makedirs(os.path.join(settings.DEPLOY_ROOT, "var"), exist_ok=True)
filename = os.path.join(settings.DEPLOY_ROOT, "var", "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
random_hash = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).digest()
prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename) as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX: str = get_or_create_key_prefix()
def bounce_key_prefix_for_testing(test_name: str) -> None:
global KEY_PREFIX
KEY_PREFIX = test_name + ':' + str(os.getpid()) + ':'
# We are taking the hash of the KEY_PREFIX to decrease the size of the key.
# Memcached keys should have a length of less than 250.
KEY_PREFIX = hashlib.sha1(KEY_PREFIX.encode('utf-8')).hexdigest() + ":"
def get_cache_backend(cache_name: Optional[str]) -> BaseCache:
if cache_name is None:
return djcache
return caches[cache_name]
def get_cache_with_key(
keyfunc: Callable[..., str],
cache_name: Optional[str]=None,
) -> Callable[[FuncT], FuncT]:
"""
The main goal of this function getting value from the cache like in the "cache_with_key".
A cache value can contain any data including the "None", so
here used exception for case if value isn't found in the cache.
"""
def decorator(func: FuncT) -> FuncT:
@wraps(func)
def func_with_caching(*args: object, **kwargs: object) -> object:
key = keyfunc(*args, **kwargs)
try:
val = cache_get(key, cache_name=cache_name)
except InvalidCacheKeyException:
stack_trace = traceback.format_exc()
log_invalid_cache_keys(stack_trace, [key])
val = None
if val is not None:
return val[0]
raise NotFoundInCache()
return cast(FuncT, func_with_caching) # https://github.com/python/mypy/issues/1927
return decorator
def cache_with_key(
keyfunc: Callable[..., str], cache_name: Optional[str]=None,
timeout: Optional[int]=None, with_statsd_key: Optional[str]=None,
) -> Callable[[FuncT], FuncT]:
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func: FuncT) -> FuncT:
@wraps(func)
def func_with_caching(*args: object, **kwargs: object) -> object:
key = keyfunc(*args, **kwargs)
try:
val = cache_get(key, cache_name=cache_name)
except InvalidCacheKeyException:
stack_trace = traceback.format_exc()
log_invalid_cache_keys(stack_trace, [key])
return func(*args, **kwargs)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr(f"cache{extra}.{metric_key}.{status}")
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return cast(FuncT, func_with_caching) # https://github.com/python/mypy/issues/1927
return decorator
class InvalidCacheKeyException(Exception):
pass
def log_invalid_cache_keys(stack_trace: str, key: List[str]) -> None:
logger.warning(
"Invalid cache key used: %s\nStack trace: %s\n", key, stack_trace,
)
def validate_cache_key(key: str) -> None:
if not key.startswith(KEY_PREFIX):
key = KEY_PREFIX + key
# Theoretically memcached can handle non-ascii characters
# and only "control" characters are strictly disallowed, see:
# https://github.com/memcached/memcached/blob/master/doc/protocol.txt
# However, limiting the characters we allow in keys simiplifies things,
# and anyway we use make_safe_digest when forming some keys to ensure
# the resulting keys fit the regex below.
# The regex checks "all characters between ! and ~ in the ascii table",
# which happens to be the set of all "nice" ascii characters.
if not bool(re.fullmatch(r"([!-~])+", key)):
raise InvalidCacheKeyException("Invalid characters in the cache key: " + key)
if len(key) > MEMCACHED_MAX_KEY_LENGTH:
raise InvalidCacheKeyException(f"Cache key too long: {key} Length: {len(key)}")
def cache_set(key: str, val: Any, cache_name: Optional[str]=None, timeout: Optional[int]=None) -> None:
final_key = KEY_PREFIX + key
validate_cache_key(final_key)
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
cache_backend.set(final_key, (val,), timeout=timeout)
remote_cache_stats_finish()
def cache_get(key: str, cache_name: Optional[str]=None) -> Any:
final_key = KEY_PREFIX + key
validate_cache_key(final_key)
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(final_key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys: List[str], cache_name: Optional[str]=None) -> Dict[str, Any]:
keys = [KEY_PREFIX + key for key in keys]
for key in keys:
validate_cache_key(key)
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return {key[len(KEY_PREFIX):]: value for key, value in ret.items()}
def safe_cache_get_many(keys: List[str], cache_name: Optional[str]=None) -> Dict[str, Any]:
"""Variant of cache_get_many that drops any keys that fail
validation, rather than throwing an exception visible to the
caller."""
try:
# Almost always the keys will all be correct, so we just try
# to do normal cache_get_many to avoid the overhead of
# validating all the keys here.
return cache_get_many(keys, cache_name)
except InvalidCacheKeyException:
stack_trace = traceback.format_exc()
good_keys, bad_keys = filter_good_and_bad_keys(keys)
log_invalid_cache_keys(stack_trace, bad_keys)
return cache_get_many(good_keys, cache_name)
def cache_set_many(items: Dict[str, Any], cache_name: Optional[str]=None,
timeout: Optional[int]=None) -> None:
new_items = {}
for key in items:
new_key = KEY_PREFIX + key
validate_cache_key(new_key)
new_items[new_key] = items[key]
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
def safe_cache_set_many(items: Dict[str, Any], cache_name: Optional[str]=None,
timeout: Optional[int]=None) -> None:
"""Variant of cache_set_many that drops saving any keys that fail
validation, rather than throwing an exception visible to the
caller."""
try:
# Almost always the keys will all be correct, so we just try
# to do normal cache_set_many to avoid the overhead of
# validating all the keys here.
return cache_set_many(items, cache_name, timeout)
except InvalidCacheKeyException:
stack_trace = traceback.format_exc()
good_keys, bad_keys = filter_good_and_bad_keys(list(items.keys()))
log_invalid_cache_keys(stack_trace, bad_keys)
good_items = {key: items[key] for key in good_keys}
return cache_set_many(good_items, cache_name, timeout)
def cache_delete(key: str, cache_name: Optional[str]=None) -> None:
final_key = KEY_PREFIX + key
validate_cache_key(final_key)
remote_cache_stats_start()
get_cache_backend(cache_name).delete(final_key)
remote_cache_stats_finish()
def cache_delete_many(items: Iterable[str], cache_name: Optional[str]=None) -> None:
keys = [KEY_PREFIX + item for item in items]
for key in keys:
validate_cache_key(key)
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(keys)
remote_cache_stats_finish()
def filter_good_and_bad_keys(keys: List[str]) -> Tuple[List[str], List[str]]:
good_keys = []
bad_keys = []
for key in keys:
try:
validate_cache_key(key)
good_keys.append(key)
except InvalidCacheKeyException:
bad_keys.append(key)
return good_keys, bad_keys
# Generic_bulk_cached fetch and its helpers. We start with declaring
# a few type variables that help define its interface.
# Type for the cache's keys; will typically be int or str.
ObjKT = TypeVar('ObjKT')
# Type for items to be fetched from the database (e.g. a Django model object)
ItemT = TypeVar('ItemT')
# Type for items to be stored in the cache (e.g. a dictionary serialization).
# Will equal ItemT unless a cache_transformer is specified.
CacheItemT = TypeVar('CacheItemT')
# Type for compressed items for storage in the cache. For
# serializable objects, will be the object; if encoded, bytes.
CompressedItemT = TypeVar('CompressedItemT')
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
def generic_bulk_cached_fetch(
cache_key_function: Callable[[ObjKT], str],
query_function: Callable[[List[ObjKT]], Iterable[ItemT]],
object_ids: Sequence[ObjKT],
*,
extractor: Callable[[CompressedItemT], CacheItemT],
setter: Callable[[CacheItemT], CompressedItemT],
id_fetcher: Callable[[ItemT], ObjKT],
cache_transformer: Callable[[ItemT], CacheItemT],
) -> Dict[ObjKT, CacheItemT]:
if len(object_ids) == 0:
# Nothing to fetch.
return {}
cache_keys: Dict[ObjKT, str] = {}
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects_compressed: Dict[str, Tuple[CompressedItemT]] = safe_cache_get_many(
[cache_keys[object_id] for object_id in object_ids],
)
cached_objects: Dict[str, CacheItemT] = {}
for (key, val) in cached_objects_compressed.items():
cached_objects[key] = extractor(cached_objects_compressed[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
# Only call query_function if there are some ids to fetch from the database:
if len(needed_ids) > 0:
db_objects = query_function(needed_ids)
else:
db_objects = []
items_for_remote_cache: Dict[str, Tuple[CompressedItemT]] = {}
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
safe_cache_set_many(items_for_remote_cache)
return {object_id: cached_objects[cache_keys[object_id]] for object_id in object_ids
if cache_keys[object_id] in cached_objects}
def transformed_bulk_cached_fetch(
cache_key_function: Callable[[ObjKT], str],
query_function: Callable[[List[ObjKT]], Iterable[ItemT]],
object_ids: Sequence[ObjKT],
*,
id_fetcher: Callable[[ItemT], ObjKT],
cache_transformer: Callable[[ItemT], CacheItemT],
) -> Dict[ObjKT, CacheItemT]:
return generic_bulk_cached_fetch(
cache_key_function,
query_function,
object_ids,
extractor=lambda obj: obj,
setter=lambda obj: obj,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
)
def bulk_cached_fetch(
cache_key_function: Callable[[ObjKT], str],
query_function: Callable[[List[ObjKT]], Iterable[ItemT]],
object_ids: Sequence[ObjKT],
*,
id_fetcher: Callable[[ItemT], ObjKT],
) -> Dict[ObjKT, ItemT]:
return transformed_bulk_cached_fetch(
cache_key_function,
query_function,
object_ids,
id_fetcher=id_fetcher,
cache_transformer=lambda obj: obj,
)
def preview_url_cache_key(url: str) -> str:
return f"preview_url:{make_safe_digest(url)}"
def display_recipient_cache_key(recipient_id: int) -> str:
return f"display_recipient_dict:{recipient_id}"
def display_recipient_bulk_get_users_by_id_cache_key(user_id: int) -> str:
# Cache key function for a function for bulk fetching users, used internally
# by display_recipient code.
return 'bulk_fetch_display_recipients:' + user_profile_by_id_cache_key(user_id)
def user_profile_by_email_cache_key(email: str) -> str:
# See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return f'user_profile_by_email:{make_safe_digest(email.strip())}'
def user_profile_cache_key_id(email: str, realm_id: int) -> str:
return f"user_profile:{make_safe_digest(email.strip())}:{realm_id}"
def user_profile_cache_key(email: str, realm: 'Realm') -> str:
return user_profile_cache_key_id(email, realm.id)
def bot_profile_cache_key(email: str) -> str:
return f"bot_profile:{make_safe_digest(email.strip())}"
def user_profile_by_id_cache_key(user_profile_id: int) -> str:
return f"user_profile_by_id:{user_profile_id}"
def user_profile_by_api_key_cache_key(api_key: str) -> str:
return f"user_profile_by_api_key:{api_key}"
realm_user_dict_fields: List[str] = [
'id', 'full_name', 'email',
'avatar_source', 'avatar_version', 'is_active',
'role', 'is_bot', 'realm_id', 'timezone',
'date_joined', 'bot_owner_id', 'delivery_email',
'bot_type', 'long_term_idle'
]
def realm_user_dicts_cache_key(realm_id: int) -> str:
return f"realm_user_dicts:{realm_id}"
def get_realm_used_upload_space_cache_key(realm: 'Realm') -> str:
return f'realm_used_upload_space:{realm.id}'
def active_user_ids_cache_key(realm_id: int) -> str:
return f"active_user_ids:{realm_id}"
def active_non_guest_user_ids_cache_key(realm_id: int) -> str:
return f"active_non_guest_user_ids:{realm_id}"
bot_dict_fields: List[str] = [
'api_key',
'avatar_source',
'avatar_version',
'bot_owner__id',
'bot_type',
'default_all_public_streams',
'default_events_register_stream__name',
'default_sending_stream__name',
'email',
'full_name',
'id',
'is_active',
'realm_id',
]
def bot_dicts_in_realm_cache_key(realm: 'Realm') -> str:
return f"bot_dicts_in_realm:{realm.id}"
def get_stream_cache_key(stream_name: str, realm_id: int) -> str:
return f"stream_by_realm_and_name:{realm_id}:{make_safe_digest(stream_name.strip().lower())}"
def delete_user_profile_caches(user_profiles: Iterable['UserProfile']) -> None:
# Imported here to avoid cyclic dependency.
from zerver.lib.users import get_all_api_keys
from zerver.models import is_cross_realm_bot_email
keys = []
for user_profile in user_profiles:
keys.append(user_profile_by_email_cache_key(user_profile.delivery_email))
keys.append(user_profile_by_id_cache_key(user_profile.id))
for api_key in get_all_api_keys(user_profile):
keys.append(user_profile_by_api_key_cache_key(api_key))
keys.append(user_profile_cache_key(user_profile.email, user_profile.realm))
if user_profile.is_bot and is_cross_realm_bot_email(user_profile.email):
# Handle clearing system bots from their special cache.
keys.append(bot_profile_cache_key(user_profile.email))
cache_delete_many(keys)
def delete_display_recipient_cache(user_profile: 'UserProfile') -> None:
from zerver.models import Subscription # We need to import here to avoid cyclic dependency.
recipient_ids = Subscription.objects.filter(user_profile=user_profile)
recipient_ids = recipient_ids.values_list('recipient_id', flat=True)
keys = [display_recipient_cache_key(rid) for rid in recipient_ids]
keys.append(display_recipient_bulk_get_users_by_id_cache_key(user_profile.id))
cache_delete_many(keys)
def changed(kwargs: Any, fields: List[str]) -> bool:
if kwargs.get('update_fields') is None:
# adds/deletes should invalidate the cache
return True
update_fields = set(kwargs['update_fields'])
for f in fields:
if f in update_fields:
return True
return False
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender: Any, **kwargs: Any) -> None:
user_profile = kwargs['instance']
delete_user_profile_caches([user_profile])
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if changed(kwargs, realm_user_dict_fields):
cache_delete(realm_user_dicts_cache_key(user_profile.realm_id))
if changed(kwargs, ['is_active']):
cache_delete(active_user_ids_cache_key(user_profile.realm_id))
cache_delete(active_non_guest_user_ids_cache_key(user_profile.realm_id))
if changed(kwargs, ['role']):
cache_delete(active_non_guest_user_ids_cache_key(user_profile.realm_id))
if changed(kwargs, ['email', 'full_name', 'id', 'is_mirror_dummy']):
delete_display_recipient_cache(user_profile)
# Invalidate our bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and changed(kwargs, bot_dict_fields):
cache_delete(bot_dicts_in_realm_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance']
users = realm.get_active_users()
delete_user_profile_caches(users)
if realm.deactivated or (kwargs["update_fields"] is not None and
"string_id" in kwargs['update_fields']):
cache_delete(realm_user_dicts_cache_key(realm.id))
cache_delete(active_user_ids_cache_key(realm.id))
cache_delete(bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
cache_delete(active_non_guest_user_ids_cache_key(realm.id))
cache_delete(realm_rendered_description_cache_key(realm))
cache_delete(realm_text_description_cache_key(realm))
if changed(kwargs, ['description']):
cache_delete(realm_rendered_description_cache_key(realm))
cache_delete(realm_text_description_cache_key(realm))
def realm_alert_words_cache_key(realm: 'Realm') -> str:
return f"realm_alert_words:{realm.string_id}"
def realm_alert_words_automaton_cache_key(realm: 'Realm') -> str:
return f"realm_alert_words_automaton:{realm.string_id}"
def realm_rendered_description_cache_key(realm: 'Realm') -> str:
return f"realm_rendered_description:{realm.string_id}"
def realm_text_description_cache_key(realm: 'Realm') -> str:
return f"realm_text_description:{realm.string_id}"
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender: Any, **kwargs: Any) -> None:
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm_id)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)).exists():
cache_delete(bot_dicts_in_realm_cache_key(stream.realm))
def flush_used_upload_space_cache(sender: Any, **kwargs: Any) -> None:
attachment = kwargs['instance']
if kwargs.get("created") is None or kwargs.get("created") is True:
cache_delete(get_realm_used_upload_space_cache_key(attachment.owner.realm))
def to_dict_cache_key_id(message_id: int) -> str:
return f'message_dict:{message_id}'
def to_dict_cache_key(message: 'Message', realm_id: Optional[int]=None) -> str:
return to_dict_cache_key_id(message.id)
def open_graph_description_cache_key(content: Any, request: HttpRequest) -> str:
return 'open_graph_description_path:{}'.format(make_safe_digest(request.META['PATH_INFO']))
def flush_message(sender: Any, **kwargs: Any) -> None:
message = kwargs['instance']
cache_delete(to_dict_cache_key_id(message.id))
def flush_submessage(sender: Any, **kwargs: Any) -> None:
submessage = kwargs['instance']
# submessages are not cached directly, they are part of their
# parent messages
message_id = submessage.message_id
cache_delete(to_dict_cache_key_id(message_id))
DECORATOR = Callable[[Callable[..., Any]], Callable[..., Any]]
def ignore_unhashable_lru_cache(maxsize: int=128, typed: bool=False) -> DECORATOR:
"""
This is a wrapper over lru_cache function. It adds following features on
top of lru_cache:
* It will not cache result of functions with unhashable arguments.
* It will clear cache whenever zerver.lib.cache.KEY_PREFIX changes.
"""
internal_decorator = lru_cache(maxsize=maxsize, typed=typed)
def decorator(user_function: Callable[..., Any]) -> Callable[..., Any]:
if settings.DEVELOPMENT and not settings.TEST_SUITE: # nocoverage
# In the development environment, we want every file
# change to refresh the source files from disk.
return user_function
# Casting to Any since we're about to monkey-patch this.
cache_enabled_user_function = cast(Any, internal_decorator(user_function))
def wrapper(*args: Any, **kwargs: Any) -> Any:
if not hasattr(cache_enabled_user_function, 'key_prefix'):
cache_enabled_user_function.key_prefix = KEY_PREFIX
if cache_enabled_user_function.key_prefix != KEY_PREFIX:
# Clear cache when cache.KEY_PREFIX changes. This is used in
# tests.
cache_enabled_user_function.cache_clear()
cache_enabled_user_function.key_prefix = KEY_PREFIX
try:
return cache_enabled_user_function(*args, **kwargs)
except TypeError:
# args or kwargs contains an element which is unhashable. In
# this case we don't cache the result.
pass
# Deliberately calling this function from outside of exception
# handler to get a more descriptive traceback. Otherwise traceback
# can include the exception from cached_enabled_user_function as
# well.
return user_function(*args, **kwargs)
setattr(wrapper, 'cache_info', cache_enabled_user_function.cache_info)
setattr(wrapper, 'cache_clear', cache_enabled_user_function.cache_clear)
return wrapper
return decorator
def dict_to_items_tuple(user_function: Callable[..., Any]) -> Callable[..., Any]:
"""Wrapper that converts any dict args to dict item tuples."""
def dict_to_tuple(arg: Any) -> Any:
if isinstance(arg, dict):
return tuple(sorted(arg.items()))
return arg
def wrapper(*args: Any, **kwargs: Any) -> Any:
new_args = (dict_to_tuple(arg) for arg in args)
return user_function(*new_args, **kwargs)
return wrapper
def items_tuple_to_dict(user_function: Callable[..., Any]) -> Callable[..., Any]:
"""Wrapper that converts any dict items tuple args to dicts."""
def dict_items_to_dict(arg: Any) -> Any:
if isinstance(arg, tuple):
try:
return dict(arg)
except TypeError:
pass
return arg
def wrapper(*args: Any, **kwargs: Any) -> Any:
new_args = (dict_items_to_dict(arg) for arg in args)
new_kwargs = {key: dict_items_to_dict(val) for key, val in kwargs.items()}
return user_function(*new_args, **new_kwargs)
return wrapper
|
{
"content_hash": "4e5266afae7c97d639e928979de25a5f",
"timestamp": "",
"source": "github",
"line_count": 731,
"max_line_length": 103,
"avg_line_length": 38.36662106703147,
"alnum_prop": 0.6580617556870855,
"repo_name": "brainwane/zulip",
"id": "c95473134d56a72b9cdf5811621791baf5d79fed",
"size": "28124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from resume.models import Resume, Topic, Experience
# Register your models here.
admin.site.register(Resume)
admin.site.register(Topic)
admin.site.register(Experience)
|
{
"content_hash": "aee26465cdcc2b380f3b1aadf51884ec",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 25.375,
"alnum_prop": 0.812807881773399,
"repo_name": "jxnl/jxnlco-django",
"id": "cbe6eba37c1a1605d5160f11369a7998c61ce057",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resume/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10891"
},
{
"name": "Python",
"bytes": "16351"
},
{
"name": "Ruby",
"bytes": "822"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
from django.conf import settings as django_settings
from django.utils.functional import cached_property
from django_vest.fields import get_field
class Simple(object):
""" Simple wrapper around settings file.
"""
source = django_settings
def __getattr__(self, name):
return getattr(self.source, name)
@property
def CURRENT_THEME(self):
""" Trying to getting `CURRENT_THEME` parameter
from settings or os env.
"""
return getattr(self.source, 'CURRENT_THEME', None)
@property
def DEFAULT_THEME(self):
return getattr(self.source, 'DEFAULT_THEME', None)
class Env(object):
""" Receive settings for OS env.
"""
@property
def CURRENT_THEME(self):
""" Trying to getting `CURRENT_THEME` parameter
from settings or os env.
"""
return os.environ.get('DJANGO_VEST_CURRENT_THEME', None)
@property
def DEFAULT_THEME(self):
return os.environ.get('DJANGO_VEST_DEFAULT_THEME', None)
class Database(object):
""" Receive `CURRENT_THEME` for db field (django_vest.fields.VestField).
"""
@property
def CURRENT_THEME(self):
field = get_field()
settings = field.model.objects.first()
if settings:
return getattr(settings, field.name)
simple = Simple()
env = Env()
database = Database()
|
{
"content_hash": "3fe03840f3c249a9125f1029a263a18c",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 76,
"avg_line_length": 24.53448275862069,
"alnum_prop": 0.6416022487702038,
"repo_name": "zerc/django-vest",
"id": "ff1f8f26d031a5b9a90273fa491dc71a7c3e07b4",
"size": "1439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_vest/config/backends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2876"
},
{
"name": "Makefile",
"bytes": "1159"
},
{
"name": "Python",
"bytes": "32457"
}
],
"symlink_target": ""
}
|
import pickle
from datetime import datetime
from loguru import logger
from sqlalchemy import Column, DateTime, Index, Integer, String, Unicode, select
from flexget import db_schema, plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import json, serialization
from flexget.utils.database import entry_synonym
from flexget.utils.sqlalchemy_utils import table_add_column, table_schema
from flexget.utils.tools import parse_timedelta
logger = logger.bind(name='delay')
Base = db_schema.versioned_base('delay', 3)
class DelayedEntry(Base):
__tablename__ = 'delay'
id = Column(Integer, primary_key=True)
task = Column('feed', String)
title = Column(Unicode)
expire = Column(DateTime)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
def __repr__(self):
return '<DelayedEntry(title=%s)>' % self.title
Index('delay_feed_title', DelayedEntry.task, DelayedEntry.title)
# TODO: index "expire, task"
@db_schema.upgrade('delay')
def upgrade(ver, session):
if ver is None:
# Upgrade to version 0 was a failed attempt at cleaning bad entries from our table, better attempt in ver 1
ver = 1
if ver == 1:
table = table_schema('delay', session)
table_add_column(table, 'json', Unicode, session)
# Make sure we get the new schema with the added column
table = table_schema('delay', session)
failures = 0
for row in session.execute(select([table.c.id, table.c.entry])):
try:
p = pickle.loads(row['entry'])
session.execute(
table.update()
.where(table.c.id == row['id'])
.values(json=json.dumps(p, encode_datetime=True))
)
except (KeyError, ImportError):
failures += 1
if failures > 0:
logger.error(
'Error upgrading {} pickle objects. Some delay information has been lost.',
failures,
)
ver = 2
if ver == 2:
table = table_schema('delay', session)
for row in session.execute(select([table.c.id, table.c.json])):
if not row['json']:
# Seems there could be invalid data somehow. See #2590
continue
data = json.loads(row['json'], decode_datetime=True)
# If title looked like a date, make sure it's a string
title = str(data.pop('title'))
e = Entry(title=title, **data)
session.execute(
table.update().where(table.c.id == row['id']).values(json=serialization.dumps(e))
)
ver = 3
return ver
class FilterDelay:
"""
Add delay to a task. This is useful for de-prioritizing expensive / bad-quality tasks.
Format: n [minutes|hours|days|weeks]
Example::
delay: 2 hours
"""
schema = {'type': 'string', 'format': 'interval'}
def get_delay(self, config):
logger.debug('delay: {}', config)
try:
return parse_timedelta(config)
except ValueError:
raise plugin.PluginError('Invalid time format', logger)
@plugin.priority(-1)
def on_task_input(self, task, config):
"""Captures the current input then replaces it with entries that have passed the delay."""
if task.entries:
logger.verbose('Delaying {} new entries for {}', len(task.entries), config)
# Let details plugin know that it is ok if this task doesn't produce any entries
task.no_entries_ok = True
# First learn the current entries in the task to the database
expire_time = datetime.now() + self.get_delay(config)
for entry in task.entries:
logger.debug('Delaying {}', entry['title'])
# check if already in queue
if (
not task.session.query(DelayedEntry)
.filter(DelayedEntry.title == entry['title'])
.filter(DelayedEntry.task == task.name)
.first()
):
delay_entry = DelayedEntry()
delay_entry.title = entry['title']
delay_entry.entry = entry
delay_entry.task = task.name
delay_entry.expire = expire_time
task.session.add(delay_entry)
# Clear the current entries from the task now that they are stored
task.all_entries[:] = []
# Generate the list of entries whose delay has passed
passed_delay = (
task.session.query(DelayedEntry)
.filter(datetime.now() > DelayedEntry.expire)
.filter(DelayedEntry.task == task.name)
)
delayed_entries = [item.entry for item in passed_delay.all()]
for entry in delayed_entries:
entry['passed_delay'] = True
logger.debug('Releasing {}', entry['title'])
# Delete the entries from the db we are about to inject
passed_delay.delete()
if delayed_entries:
logger.verbose('Restoring {} entries that have passed delay.', len(delayed_entries))
# Return our delayed entries
return delayed_entries
@event('plugin.register')
def register_plugin():
plugin.register(FilterDelay, 'delay', api_ver=2)
|
{
"content_hash": "2d3a6f8f375c7771fbc3930f93b8476e",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 115,
"avg_line_length": 35.02614379084967,
"alnum_prop": 0.5947005038253406,
"repo_name": "crawln45/Flexget",
"id": "4e65b1ce58d9868a977a2a30b19710ead21e6810",
"size": "5359",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/plugins/filter/delay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1233"
},
{
"name": "HTML",
"bytes": "82565"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3761134"
},
{
"name": "SCSS",
"bytes": "11875"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1586"
}
],
"symlink_target": ""
}
|
'''
Test for deleting vm image check vm resize data volume.
@author: SyZhao
'''
import os
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
image1 = None
def test():
global image1
hosts = res_ops.query_resource(res_ops.HOST)
if len(hosts) <= 1:
test_util.test_skip("skip for host_num is not satisfy condition host_num>1")
bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid'])
image_name1 = 'image1_a'
image_option = test_util.ImageOption()
image_option.set_format('qcow2')
image_option.set_name(image_name1)
#image_option.set_system_tags('qemuga')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_url(os.environ.get('imageUrl_s'))
image_option.set_backup_storage_uuid_list([bss[0].uuid])
image_option.set_timeout(3600*1000)
image1 = zstack_image_header.ZstackTestImage()
image1.set_creation_option(image_option)
image1.add_root_volume_template()
image1.check()
image_name = os.environ.get('imageName_net')
l3_name = os.environ.get('l3VlanNetworkName1')
vm1 = test_stub.create_vm(image_name1, image_name, l3_name)
test_obj_dict.add_vm(vm1)
image1.delete()
image1.expunge()
#target_host = test_lib.lib_find_random_host(vm1.vm)
#vm1.migrate(target_host.uuid)
test_stub.vm_ops_test(vm1, "VM_TEST_RESIZE_DVOL")
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Create VM Image in Image Store Success')
#Will be called only if exception happens in test().
def error_cleanup():
global image1
test_lib.lib_error_cleanup(test_obj_dict)
try:
image1.delete()
except:
pass
|
{
"content_hash": "dfbc4653f655da85f913e92a77c3a10f",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 95,
"avg_line_length": 33.196969696969695,
"alnum_prop": 0.7133728890917389,
"repo_name": "zstackio/zstack-woodpecker",
"id": "ea749d9ce57ff2629938a664ea8c8a67552cc0ae",
"size": "2191",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/bs/test_del_expunge_img_vm_resize_data_vol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
}
|
'''
Given two words word1 and word2, find the minimum number of steps required to convert word1 to word2. (each operation is counted as 1 step.)
You have the following 3 operations permitted on a word:
a) Insert a character
b) Delete a character
c) Replace a character
'''
class Solution:
# @return an integer
def minDistance(self, word1, word2):
size1 = len(word1)
size2 = len(word2)
if size1 == 0:
return size2
elif size2 == 0:
return size1
dp_array = [ [0] * (size2 + 1) for index in range(size1 + 1) ]
dp_array[0][0] = 0 if word1[0] == word2[0] else 1
for index in range(size1 + 1):
dp_array[index][0] = index
for index in range(size2 + 1):
dp_array[0][index] = index
for index1 in range(1, size1+1):
for index2 in range(1, size2+1):
if word1[index1 - 1] == word2[index2 - 1]:
dp_array[index1][index2] = dp_array[index1-1][index2-1]
else:
dp_array[index1][index2] = min([dp_array[index1-1][index2-1], dp_array[index1-1][index2], dp_array[index1][index2-1]]) + 1
return dp_array[-1][-1]
if __name__ == '__main__':
solution = Solution()
print solution.minDistance("pneumonoultramicroscopicsilicovolcanoconiosis", "ultramicroscopically")
|
{
"content_hash": "9d7db3227210700243eb5fb0a9bd433c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 146,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.5743919885550787,
"repo_name": "shub0/algorithm-data-structure",
"id": "d1e1055856f8e8a7c763e521c0321a668b2eaf6e",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/edit_distance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "166293"
},
{
"name": "Python",
"bytes": "487573"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
sys.path.append(os.path.dirname(__file__))
from gerrit import GerritChange, argparse_gerrit_change_type # noqa: E402
def main():
parser = argparse.ArgumentParser(
description='Get a Gerrit change attribute')
parser.add_argument('change', type=argparse_gerrit_change_type,
help='the Gerrit change number and an optional patch '
'number (e.g. 1234 or 1234/1). If the patch '
'number is not supplied, the latest patch will '
'be used')
parser.add_argument('--attr',
required=True,
help='GerritChange object attribute name')
args = parser.parse_args()
change = GerritChange(args.change)
print(getattr(change, args.attr))
if __name__ == '__main__':
main()
|
{
"content_hash": "5d3398234357476c722de9ad56004d58",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 31.821428571428573,
"alnum_prop": 0.5757575757575758,
"repo_name": "SUSE-Cloud/automation",
"id": "c2181b9fe441e99d29801ee7f57b9275d9059afd",
"size": "914",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/jenkins/cloud/gerrit/gerrit_get.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3703"
},
{
"name": "Emacs Lisp",
"bytes": "135"
},
{
"name": "Groovy",
"bytes": "9471"
},
{
"name": "Jinja",
"bytes": "47616"
},
{
"name": "Makefile",
"bytes": "3438"
},
{
"name": "Perl",
"bytes": "35913"
},
{
"name": "Python",
"bytes": "435834"
},
{
"name": "Ruby",
"bytes": "29921"
},
{
"name": "Shell",
"bytes": "628238"
}
],
"symlink_target": ""
}
|
"""Tests for StructuredTensorSpec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.structured import structured_tensor
from tensorflow.python.ops.structured.structured_tensor import StructuredTensor
from tensorflow.python.ops.structured.structured_tensor import StructuredTensorSpec
from tensorflow.python.platform import googletest
# TypeSpecs consts for fields types.
T_3 = tensor_spec.TensorSpec([3])
T_1_2 = tensor_spec.TensorSpec([1, 2])
T_1_2_8 = tensor_spec.TensorSpec([1, 2, 8])
T_1_2_3_4 = tensor_spec.TensorSpec([1, 2, 3, 4])
T_2_3 = tensor_spec.TensorSpec([2, 3])
R_1_N = ragged_tensor.RaggedTensorSpec([1, None])
R_1_N_N = ragged_tensor.RaggedTensorSpec([1, None, None])
R_2_1_N = ragged_tensor.RaggedTensorSpec([2, 1, None])
# pylint: disable=g-long-lambda
@test_util.run_all_in_graph_and_eager_modes
class StructuredTensorSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
# TODO(edloper): Add a subclass of TensorFlowTestCase that overrides
# assertAllEqual etc to work with StructuredTensors.
def assertAllEqual(self, a, b, msg=None):
if not (isinstance(a, structured_tensor.StructuredTensor) or
isinstance(b, structured_tensor.StructuredTensor)):
return super(StructuredTensorSpecTest, self).assertAllEqual(a, b, msg)
if not (isinstance(a, structured_tensor.StructuredTensor) and
isinstance(b, structured_tensor.StructuredTensor)):
# TODO(edloper) Add support for this once structured_factory_ops is added.
raise ValueError('Not supported yet')
self.assertEqual(repr(a.shape), repr(b.shape))
self.assertEqual(set(a.field_names()), set(b.field_names()))
for field in a.field_names():
self.assertAllEqual(a.field_value(field), b.field_value(field))
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1_fields = dict(a=T_1_2_3_4)
spec1 = StructuredTensorSpec([1, 2, 3], spec1_fields)
self.assertEqual(spec1._shape, (1, 2, 3))
self.assertEqual(spec1._field_specs, spec1_fields)
spec2_fields = dict(a=T_1_2, b=T_1_2_8, c=R_1_N, d=R_1_N_N, s=spec1)
spec2 = StructuredTensorSpec([1, 2], spec2_fields)
self.assertEqual(spec2._shape, (1, 2))
self.assertEqual(spec2._field_specs, spec2_fields)
@parameterized.parameters([
(None, {}, r"StructuredTensor's shape must have known rank\."),
([], None, r'field_specs must be a dictionary\.'),
([], {1: tensor_spec.TensorSpec(None)},
r'field_specs must be a dictionary with string keys\.'),
([], {'x': 0},
r'field_specs must be a dictionary with TypeSpec values\.'),
])
def testConstructionErrors(self, shape, field_specs, error):
with self.assertRaisesRegex(TypeError, error):
structured_tensor.StructuredTensorSpec(shape, field_specs)
def testValueType(self):
spec1 = StructuredTensorSpec([1, 2, 3], dict(a=T_1_2))
self.assertEqual(spec1.value_type, StructuredTensor)
@parameterized.parameters([
(StructuredTensorSpec([1, 2, 3], {}),
(tensor_shape.TensorShape([1, 2, 3]), {})),
(StructuredTensorSpec([], {'a': T_1_2}),
(tensor_shape.TensorShape([]), {'a': T_1_2})),
(StructuredTensorSpec([1, 2], {'a': T_1_2, 'b': R_1_N}),
(tensor_shape.TensorShape([1, 2]), {'a': T_1_2, 'b': R_1_N})),
(StructuredTensorSpec([], {'a': T_1_2}),
(tensor_shape.TensorShape([]), {'a': T_1_2})),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# Note that we can only use assertEqual because none of our cases include
# a None dimension. A TensorShape with a None dimension is never equal
# to another TensorShape.
self.assertEqual(serialization, expected)
@parameterized.parameters([
(StructuredTensorSpec([1, 2, 3], {}), {}),
(StructuredTensorSpec([], {'a': T_1_2}), {'a': T_1_2}),
(StructuredTensorSpec([1, 2], {'a': T_1_2, 'b': R_1_N}),
{'a': T_1_2, 'b': R_1_N}),
(StructuredTensorSpec([], {'a': T_1_2}), {'a': T_1_2}),
]) # pyformat: disable
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
'shape': [],
'fields': dict(x=[[1.0, 2.0]]),
'field_specs': dict(x=T_1_2),
},
# TODO(edloper): Enable this test once we update StructuredTensorSpec
# to contain the shared row partitions.
#{
# 'shape': [1, 2, 3],
# 'fields': {},
# 'field_specs': {},
#},
{
'shape': [2],
'fields': dict(
a=ragged_factory_ops.constant_value([[1.0], [2.0, 3.0]]),
b=[[4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]),
'field_specs': dict(a=R_1_N, b=T_2_3),
},
]) # pyformat: disable
def testToFromComponents(self, shape, fields, field_specs):
components = fields
struct = StructuredTensor.from_fields(fields, shape)
spec = StructuredTensorSpec(shape, field_specs)
actual_components = spec._to_components(struct)
self.assertAllTensorsEqual(actual_components, components)
rt_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(struct, rt_reconstructed)
@parameterized.parameters([
{
'unbatched': StructuredTensorSpec([], {}),
'batch_size': 5,
'batched': StructuredTensorSpec([5], {}),
},
{
'unbatched': StructuredTensorSpec([1, 2], {}),
'batch_size': 5,
'batched': StructuredTensorSpec([5, 1, 2], {}),
},
{
'unbatched': StructuredTensorSpec([], dict(a=T_3, b=R_1_N)),
'batch_size': 2,
'batched': StructuredTensorSpec([2], dict(a=T_2_3, b=R_2_1_N)),
}
]) # pyformat: disable
def testBatchUnbatch(self, unbatched, batch_size, batched):
self.assertEqual(unbatched._batch(batch_size), batched)
self.assertEqual(batched._unbatch(), unbatched)
@parameterized.parameters([
{
'unbatched': lambda: [
StructuredTensor.from_fields({'a': 1, 'b': [5, 6]}),
StructuredTensor.from_fields({'a': 2, 'b': [7, 8]})],
'batch_size': 2,
'batched': lambda: StructuredTensor.from_fields(shape=[2], fields={
'a': [1, 2],
'b': [[5, 6], [7, 8]]}),
},
{
'unbatched': lambda: [
StructuredTensor.from_fields(shape=[3], fields={
'a': [1, 2, 3],
'b': [[5, 6], [6, 7], [7, 8]]}),
StructuredTensor.from_fields(shape=[3], fields={
'a': [2, 3, 4],
'b': [[2, 2], [3, 3], [4, 4]]})],
'batch_size': 2,
'batched': lambda: StructuredTensor.from_fields(shape=[2, 3], fields={
'a': [[1, 2, 3], [2, 3, 4]],
'b': [[[5, 6], [6, 7], [7, 8]],
[[2, 2], [3, 3], [4, 4]]]}),
},
{
'unbatched': lambda: [
StructuredTensor.from_fields(shape=[], fields={
'a': 1,
'b': StructuredTensor.from_fields({'x': [5]})}),
StructuredTensor.from_fields(shape=[], fields={
'a': 2,
'b': StructuredTensor.from_fields({'x': [6]})})],
'batch_size': 2,
'batched': lambda: StructuredTensor.from_fields(shape=[2], fields={
'a': [1, 2],
'b': StructuredTensor.from_fields(shape=[2], fields={
'x': [[5], [6]]})}),
},
]) # pyformat: disable
def testBatchUnbatchValues(self, unbatched, batch_size, batched):
batched = batched() # Deferred init because it creates tensors.
unbatched = unbatched() # Deferred init because it creates tensors.
# Test batching.
unbatched_spec = type_spec.type_spec_from_value(unbatched[0])
unbatched_tensor_lists = [unbatched_spec._to_tensor_list(st)
for st in unbatched]
batched_tensor_list = [array_ops.stack(tensors)
for tensors in zip(*unbatched_tensor_lists)]
actual_batched = unbatched_spec._batch(batch_size)._from_tensor_list(
batched_tensor_list)
self.assertAllEqual(actual_batched, batched)
# Test unbatching
batched_spec = type_spec.type_spec_from_value(batched)
batched_tensor_list = batched_spec._to_tensor_list(batched)
unbatched_tensor_lists = zip(
*[array_ops.unstack(tensor) for tensor in batched_tensor_list])
actual_unbatched = [
batched_spec._unbatch()._from_tensor_list(tensor_list)
for tensor_list in unbatched_tensor_lists]
self.assertLen(actual_unbatched, len(unbatched))
for (actual, expected) in zip(actual_unbatched, unbatched):
self.assertAllEqual(actual, expected)
if __name__ == '__main__':
googletest.main()
|
{
"content_hash": "18108a6173b6615b74a424c6ec9ea264",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 83,
"avg_line_length": 41.14718614718615,
"alnum_prop": 0.6094687006838506,
"repo_name": "aldian/tensorflow",
"id": "4637a1a51e5c0118fbabf88e15768297f07ef43b",
"size": "10194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/structured/structured_tensor_spec_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
}
|
from .__cmd_group import *
from ._create import *
from ._delete import *
from ._list import *
from ._search import *
from ._show import *
from ._update import *
|
{
"content_hash": "ec12b3775f8acbc479b3b39f0e5bdc71",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 26,
"avg_line_length": 23,
"alnum_prop": 0.6832298136645962,
"repo_name": "yugangw-msft/azure-cli",
"id": "10a024b2268bc60b8014c8969891fd81cd12fb6c",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/monitor/aaz/latest/monitor/log_analytics/query_pack/query/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
}
|
try:
from django_hasher import check_password, make_password
except ImportError:
import bcrypt
def make_password(password):
return bcrypt.hashpw(password, bcrypt.gensalt())
def check_password(password, hashed):
return bcrypt.hashpw(password, hashed) == hashed
|
{
"content_hash": "f7319803888696e09223d808d35d8226",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 59,
"avg_line_length": 29.4,
"alnum_prop": 0.7108843537414966,
"repo_name": "ymero/workin",
"id": "7b4233fa989ef7638f344326086cc9dc2af75f63",
"size": "341",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workin/exts/auth/hasher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3041"
},
{
"name": "HTML",
"bytes": "28209"
},
{
"name": "JavaScript",
"bytes": "272946"
},
{
"name": "Python",
"bytes": "102019"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from osmp.views import payment
urlpatterns = patterns('',
url(r'^payment/$', payment, name='osmp-payment'),
)
|
{
"content_hash": "9483ec37579d76870142d3463fbcfb95",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 22.285714285714285,
"alnum_prop": 0.7051282051282052,
"repo_name": "oxyum/django-payment-osmp",
"id": "7d5a00adfa3618865e92360069e29968874ad385",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osmp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6552"
}
],
"symlink_target": ""
}
|
import os
import threading
from multiprocessing import Queue
import multiprocessing
import time
import sys
from fuse import FUSE, FuseOSError, Operations
from optparse import OptionParser
from Queue import Empty
from fusecache import LoopBack
from fusecache import AccessInfo
from ..config import DiscoveryConst as DiscoveryConst
from ..log import logging
LOG = logging.getLogger(__name__)
_cache_monitor_instance = None
_fuse_instance = None
def get_instance():
global _cache_monitor_instance
global _fuse_instance
if _cache_monitor_instance is None:
LOG.info("[CACHE] FUSE mount at %s, which is loop back of %s" % \
(DiscoveryConst.CLOUDLET_FS_ROOT, DiscoveryConst.DFS_ROOT))
access_queue = Queue()
_fuse_instance = FuseLauncher(DiscoveryConst.CLOUDLET_FS_ROOT,\
DiscoveryConst.DFS_ROOT, access_queue)
_fuse_instance.start()
LOG.info("[CACHE] start Cache monitoring")
_cache_monitor_instance = _CacheMonitor(access_queue,\
DiscoveryConst.DFS_ROOT, print_out=False)
_cache_monitor_instance.start()
return _cache_monitor_instance
def terminate():
global _cache_monitor_instance
global _fuse_instance
if _cache_monitor_instance is not None:
_cache_monitor_instance.terminate()
_cache_monitor_instance = None
if _fuse_instance is not None:
_fuse_instance.terminate()
_fuse_instance = None
class CacheMonitorError(Exception):
pass
class _CacheMonitor(threading.Thread):
def __init__(self, access_queue, dfs_root, print_out=False):
self.access_queue = access_queue
self.dfs_root = dfs_root
self.print_out = print_out
self.stop = threading.Event()
self.cache_info_dict = dict() # inode:cache_status
threading.Thread.__init__(self, target=self.process)
def process(self):
while (self.stop.wait(0.01) is False):
try:
access = self.access_queue.get_nowait()
if access.cmd == AccessInfo.CMD_READ or \
access.cmd == AccessInfo.CMD_WRITE:
self.cache_info_dict[access.inode] = access
if self.print_out is True:
print access
except Empty:
continue
def cached_files(self):
file_list = list()
for (inode, access) in self.cache_info_dict.iteritems():
relpath = os.path.relpath(access.full_path, self.dfs_root)
file_list.append(relpath)
file_list.sort()
return file_list
def check_file(self, filename, is_abspath=False):
if is_abspath is True:
abspath = filename
else:
abspath = os.path.join(self.dfs_root, filename)
if os.path.exists(abspath) is False:
return False
else:
inode = os.stat(abspath).st_ino
access_info = self.cache_info_dict.get(inode, None)
if access_info is not None:
return True
else:
return False
def terminate(self):
LOG.info("get signal")
self.stop.set()
class CmdInterface(threading.Thread):
def __init__(self, cache_monitor):
self.cache_monitor = cache_monitor
threading.Thread.__init__(self, target=self.run)
def run(self):
while True:
user_input = raw_input("> ").lower().strip()
if user_input == "list":
print "\n".join(self.cache_monitor.cached_files())
elif len(user_input) == 0:
continue
elif user_input == 'q':
break
else:
print "Invalid command"
def terminate(self):
pass
class FuseLauncher(multiprocessing.Process):
def __init__(self, mountpoint, root, access_queue):
self.stop = threading.Event()
self.mountpoint = mountpoint
self.root = root
self.access_queue = access_queue
if os.path.isdir(self.root) is False or\
os.access(self.root, os.R_OK | os.W_OK) is False:
msg = "Failed to setup cache monitoring at %s\n" % self.root
msg += "Please create a directory for the distributed file system at %s\n" %\
self.root
msg += "Or you can change path to the directory at\n"
msg += "elijah-discovery/elijah/discovery/Const.py, DFS_ROOT variable"
raise CacheMonitorError(msg)
if os.path.isdir(self.mountpoint) is False or\
os.access(self.mountpoint, os.R_OK | os.W_OK) is False:
msg = "Failed to setup cache monitoring at %s\n" % self.mountpoint
msg += "Please create a directory for the loopback at %s\n" %\
self.mountpoint
msg += "Or you can change path to the directory at\n"
msg += "elijah-discovery/elijah/discovery/Const.py, "
msg += "CLOUDLET_FS_ROOT variable"
raise CacheMonitorError(msg)
multiprocessing.Process.__init__(self)
def run(self):
FUSE(LoopBack(self.root, self.access_queue), self.mountpoint, foreground=True)
def terminate(self):
self.stop.set()
def process_command_line(argv):
VERSION = '0.1'
DESCRIPTION = 'Cache monitor'
parser = OptionParser(usage='%prog [mount_point] [root] [options]',
version=VERSION, description=DESCRIPTION)
parser.add_option(
'-v', '--verbose', action='store_true', dest='print_console', default=False,
help="print out access info in realtime")
settings, args = parser.parse_args(argv)
if len(args) is not 2:
parser.error("Need mount point and root path")
mount_point = args[0]
lookback_path = args[1]
return mount_point, lookback_path, settings
def main():
mountpoint, root, settings = process_command_line(sys.argv[1:])
access_queue = Queue()
fuse = FuseLauncher(mountpoint, root, access_queue)
cache_monitor = _CacheMonitor(access_queue, settings.print_console)
cmdline_interface = None
if not settings.print_console:
cmdline_interface = CmdInterface(cache_monitor)
cmdline_interface.start()
fuse.start()
cache_monitor.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt as e:
print "User interrupt"
ret_code = 1
finally:
if cache_monitor is not None:
cache_monitor.terminate()
if fuse is not None:
fuse.terminate()
if cmdline_interface is not None:
cmdline_interface.terminate()
if __name__ == '__main__':
main()
|
{
"content_hash": "8699596cf10c2fa0025d5bb4580b678d",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 89,
"avg_line_length": 31.891509433962263,
"alnum_prop": 0.6024256766750481,
"repo_name": "OpenEdgeComputing/elijah-discovery-basic",
"id": "a9aa6270ed29b7f7d0b15ce1ed348df91a0dd9d2",
"size": "7495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "elijah/discovery/monitor/file_cache.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "18223"
},
{
"name": "Makefile",
"bytes": "296"
},
{
"name": "Python",
"bytes": "129226"
},
{
"name": "Shell",
"bytes": "332"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import pytest
from pytest import mark, param
import ibis.expr.datatypes as dt
from ibis import _
from ibis import literal as L
from ibis.udf.vectorized import reduction
@reduction(input_type=[dt.double], output_type=dt.double)
def mean_udf(s):
return s.mean()
aggregate_test_params = [
param(
lambda t: t.double_col.mean(),
lambda t: t.double_col.mean(),
id='mean',
),
param(
lambda t: mean_udf(t.double_col),
lambda t: t.double_col.mean(),
id='mean_udf',
marks=[
pytest.mark.notimpl(
[
"datafusion",
"postgres",
"clickhouse",
"impala",
"duckdb",
"polars",
"snowflake",
"mssql",
]
),
pytest.mark.never(["sqlite", "mysql"], reason="no udf support"),
],
),
param(
lambda t: t.double_col.min(),
lambda t: t.double_col.min(),
id='min',
),
param(
lambda t: t.double_col.max(),
lambda t: t.double_col.max(),
id='max',
),
param(
# int_col % 3 so there are no ties for most common value
lambda t: (t.int_col % 3).mode(),
lambda t: (t.int_col % 3).mode().iloc[0],
id='mode',
marks=pytest.mark.notyet(
[
"clickhouse",
"datafusion",
"impala",
"mysql",
"pyspark",
"sqlite",
"mssql",
]
),
),
param(
lambda t: (t.double_col + 5).sum(),
lambda t: (t.double_col + 5).sum(),
id='complex_sum',
),
param(
lambda t: t.timestamp_col.max(),
lambda t: t.timestamp_col.max(),
id='timestamp_max',
),
]
argidx_not_grouped_marks = [
"datafusion",
"impala",
"mysql",
"postgres",
"pyspark",
"sqlite",
"snowflake",
"polars",
"mssql",
]
argidx_grouped_marks = ["dask"] + argidx_not_grouped_marks
def make_argidx_params(marks):
marks = pytest.mark.notyet(marks)
return [
param(
lambda t: t.timestamp_col.argmin(t.int_col),
lambda s: s.timestamp_col.iloc[s.int_col.argmin()],
id='argmin',
marks=marks,
),
param(
lambda t: t.double_col.argmax(t.int_col),
lambda s: s.double_col.iloc[s.int_col.argmax()],
id='argmax',
marks=marks,
),
]
@pytest.mark.parametrize(
('result_fn', 'expected_fn'),
aggregate_test_params + make_argidx_params(argidx_not_grouped_marks),
)
def test_aggregate(backend, alltypes, df, result_fn, expected_fn):
expr = alltypes.aggregate(tmp=result_fn)
result = expr.execute()
# Create a single-row single-column dataframe with the Pandas `agg` result
# (to match the output format of Ibis `aggregate`)
expected = pd.DataFrame({'tmp': [expected_fn(df)]})
backend.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
('result_fn', 'expected_fn'),
aggregate_test_params + make_argidx_params(argidx_grouped_marks),
)
def test_aggregate_grouped(backend, alltypes, df, result_fn, expected_fn):
grouping_key_col = 'bigint_col'
# Two (equivalent) variations:
# 1) `group_by` then `aggregate`
# 2) `aggregate` with `by`
expr1 = alltypes.group_by(grouping_key_col).aggregate(tmp=result_fn)
expr2 = alltypes.aggregate(tmp=result_fn, by=grouping_key_col)
result1 = expr1.execute()
result2 = expr2.execute()
# Note: Using `reset_index` to get the grouping key as a column
expected = (
df.groupby(grouping_key_col).apply(expected_fn).rename('tmp').reset_index()
)
# Row ordering may differ depending on backend, so sort on the
# grouping key
result1 = result1.sort_values(by=grouping_key_col).reset_index(drop=True)
result2 = result2.sort_values(by=grouping_key_col).reset_index(drop=True)
expected = expected.sort_values(by=grouping_key_col).reset_index(drop=True)
backend.assert_frame_equal(result1, expected)
backend.assert_frame_equal(result2, expected)
@mark.notimpl(
[
"clickhouse",
"datafusion",
"duckdb",
"impala",
"mysql",
"postgres",
"pyspark",
"sqlite",
"snowflake",
"polars",
"mssql",
]
)
def test_aggregate_multikey_group_reduction_udf(backend, alltypes, df):
"""Tests .aggregate() on a multi-key group_by with a reduction
operation."""
@reduction(
input_type=[dt.double],
output_type=dt.Struct(['mean', 'std'], [dt.double, dt.double]),
)
def mean_and_std(v):
return v.mean(), v.std()
grouping_key_cols = ['bigint_col', 'int_col']
expr1 = alltypes.group_by(grouping_key_cols).aggregate(
mean_and_std(alltypes['double_col']).destructure()
)
result1 = expr1.execute()
# Note: Using `reset_index` to get the grouping key as a column
expected = (
df.groupby(grouping_key_cols)['double_col'].agg(['mean', 'std']).reset_index()
)
# Row ordering may differ depending on backend, so sort on the
# grouping key
result1 = result1.sort_values(by=grouping_key_cols).reset_index(drop=True)
expected = expected.sort_values(by=grouping_key_cols).reset_index(drop=True)
backend.assert_frame_equal(result1, expected)
@pytest.mark.parametrize(
('result_fn', 'expected_fn'),
[
param(
lambda t, where: t.bool_col.count(where=where),
lambda t, where: len(t.bool_col[where].dropna()),
id='count',
),
param(
lambda t, _: t.bool_col.any(),
lambda t, _: t.bool_col.any(),
id='any',
marks=pytest.mark.notimpl(["polars", "datafusion"]),
),
param(
lambda t, _: t.bool_col.notany(),
lambda t, _: ~t.bool_col.any(),
id='notany',
marks=pytest.mark.notimpl(["polars", "datafusion", "mssql"]),
),
param(
lambda t, _: -t.bool_col.any(),
lambda t, _: ~t.bool_col.any(),
id='any_negate',
marks=pytest.mark.notimpl(["polars", "datafusion", "mssql"]),
),
param(
lambda t, _: t.bool_col.all(),
lambda t, _: t.bool_col.all(),
id='all',
marks=pytest.mark.notimpl(["polars", "datafusion"]),
),
param(
lambda t, _: t.bool_col.notall(),
lambda t, _: ~t.bool_col.all(),
id='notall',
marks=pytest.mark.notimpl(["polars", "datafusion", "mssql"]),
),
param(
lambda t, _: -t.bool_col.all(),
lambda t, _: ~t.bool_col.all(),
id='all_negate',
marks=pytest.mark.notimpl(["polars", "datafusion", "mssql"]),
),
param(
lambda t, where: t.double_col.sum(where=where),
lambda t, where: t.double_col[where].sum(),
id='sum',
),
param(
lambda t, where: t.double_col.mean(where=where),
lambda t, where: t.double_col[where].mean(),
id='mean',
),
param(
lambda t, where: t.double_col.min(where=where),
lambda t, where: t.double_col[where].min(),
id='min',
),
param(
lambda t, where: t.double_col.max(where=where),
lambda t, where: t.double_col[where].max(),
id='max',
),
param(
# int_col % 3 so there are no ties for most common value
lambda t, where: (t.int_col % 3).mode(where=where),
lambda t, where: (t.int_col % 3)[where].mode().iloc[0],
id='mode',
marks=pytest.mark.notyet(
[
"clickhouse",
"datafusion",
"impala",
"mysql",
"pyspark",
"sqlite",
"mssql",
]
),
),
param(
lambda t, where: t.double_col.argmin(t.int_col, where=where),
lambda t, where: t.double_col[where].iloc[t.int_col[where].argmin()],
id='argmin',
marks=pytest.mark.notyet(
[
"impala",
"mysql",
"postgres",
"pyspark",
"sqlite",
"snowflake",
"polars",
"datafusion",
"mssql",
]
),
),
param(
lambda t, where: t.double_col.argmax(t.int_col, where=where),
lambda t, where: t.double_col[where].iloc[t.int_col[where].argmax()],
id='argmax',
marks=pytest.mark.notyet(
[
"impala",
"mysql",
"postgres",
"pyspark",
"sqlite",
"snowflake",
"polars",
"datafusion",
"mssql",
]
),
),
param(
lambda t, where: t.double_col.std(how='sample', where=where),
lambda t, where: t.double_col[where].std(ddof=1),
id='std',
marks=[mark.notimpl(["datafusion", "mssql"])],
),
param(
lambda t, where: t.double_col.var(how='sample', where=where),
lambda t, where: t.double_col[where].var(ddof=1),
id='var',
marks=[mark.notimpl(["datafusion", "mssql"])],
),
param(
lambda t, where: t.double_col.std(how='pop', where=where),
lambda t, where: t.double_col[where].std(ddof=0),
id='std_pop',
marks=[mark.notimpl(["datafusion", "mssql"])],
),
param(
lambda t, where: t.double_col.var(how='pop', where=where),
lambda t, where: t.double_col[where].var(ddof=0),
id='var_pop',
marks=[mark.notimpl(["datafusion", "mssql"])],
),
param(
lambda t, where: t.string_col.approx_nunique(where=where),
lambda t, where: t.string_col[where].nunique(),
id='approx_nunique',
marks=pytest.mark.notimpl(['polars', "datafusion"]),
),
param(
lambda t, where: t.double_col.arbitrary(how='first', where=where),
lambda t, where: t.double_col[where].iloc[0],
id='arbitrary_first',
marks=pytest.mark.notimpl(
[
'impala',
'postgres',
'mysql',
'sqlite',
'snowflake',
'polars',
'datafusion',
"mssql",
]
),
),
param(
lambda t, where: t.double_col.arbitrary(how='last', where=where),
lambda t, where: t.double_col[where].iloc[-1],
id='arbitrary_last',
marks=pytest.mark.notimpl(
[
'impala',
'postgres',
'mysql',
'sqlite',
'snowflake',
'polars',
'datafusion',
"mssql",
]
),
),
param(
lambda t, where: t.double_col.arbitrary(how='heavy', where=where),
lambda t, where: t.double_col[where].iloc[8],
id='arbitrary_heavy',
# only clickhouse implements this option
marks=pytest.mark.notimpl(
[
"dask",
"datafusion",
"duckdb",
"impala",
"mysql",
"pandas",
"postgres",
"pyspark",
"sqlite",
"snowflake",
"polars",
"mssql",
],
),
),
param(
lambda t, where: t.bigint_col.bit_and(where=where),
lambda t, where: np.bitwise_and.reduce(t.bigint_col[where].values),
id='bit_and',
marks=[
pytest.mark.notimpl(
["dask", "snowflake", "polars", "datafusion", "mssql"]
),
pytest.mark.notyet(["impala", "pyspark"]),
],
),
param(
lambda t, where: t.bigint_col.bit_or(where=where),
lambda t, where: np.bitwise_or.reduce(t.bigint_col[where].values),
id='bit_or',
marks=[
pytest.mark.notimpl(
["dask", "snowflake", "polars", "datafusion", "mssql"]
),
pytest.mark.notyet(["impala", "pyspark"]),
],
),
param(
lambda t, where: t.bigint_col.bit_xor(where=where),
lambda t, where: np.bitwise_xor.reduce(t.bigint_col[where].values),
id='bit_xor',
marks=[
pytest.mark.notimpl(
["dask", "snowflake", "polars", "datafusion", "mssql"]
),
pytest.mark.notyet(["impala", "pyspark"]),
],
),
param(
lambda t, where: t.count(where=where),
lambda t, where: len(t[where]),
id='count_star',
),
param(
lambda t, where: t.string_col.collect(where=where),
lambda t, where: t.string_col[where].tolist(),
id="collect",
marks=[
mark.notimpl(
[
"dask",
"impala",
"mysql",
"snowflake",
"sqlite",
"datafusion",
"mssql",
]
)
],
),
],
)
@pytest.mark.parametrize(
('ibis_cond', 'pandas_cond'),
[
param(lambda _: None, lambda _: slice(None), id='no_cond'),
param(
lambda t: t.string_col.isin(['1', '7']),
lambda t: t.string_col.isin(['1', '7']),
id='is_in',
marks=[mark.notimpl(["datafusion"])],
),
],
)
def test_reduction_ops(
backend,
alltypes,
df,
result_fn,
expected_fn,
ibis_cond,
pandas_cond,
):
expr = alltypes.agg(tmp=result_fn(alltypes, ibis_cond(alltypes))).tmp
result = expr.execute().squeeze()
expected = expected_fn(df, pandas_cond(df))
try:
np.testing.assert_allclose(result, expected, rtol=backend.reduction_tolerance)
except TypeError: # assert_allclose only handles numerics
# if we're not testing numerics, then the arrays should be exactly equal
np.testing.assert_array_equal(result, expected)
@pytest.mark.parametrize(
('result_fn', 'expected_fn'),
[
param(
lambda t, where: t.G.cov(t.RBI, where=where, how="pop"),
lambda t, where: t.G[where].cov(t.RBI[where], ddof=0),
id='covar_pop',
marks=[
pytest.mark.notimpl(["dask", "datafusion", "pandas", "polars"]),
pytest.mark.notyet(["mysql", "impala", "sqlite"]),
],
),
param(
lambda t, where: t.G.cov(t.RBI, where=where, how="sample"),
lambda t, where: t.G[where].cov(t.RBI[where], ddof=1),
id='covar_samp',
marks=[
pytest.mark.notimpl(["dask", "datafusion", "pandas", "polars"]),
pytest.mark.notyet(["mysql", "impala", "sqlite"]),
],
),
param(
lambda t, where: t.G.corr(t.RBI, where=where, how="pop"),
lambda t, where: t.G[where].corr(t.RBI[where]),
id='corr_pop',
marks=[
pytest.mark.notimpl(["dask", "datafusion", "pandas", "polars"]),
pytest.mark.notyet(
["clickhouse", "impala", "mysql", "pyspark", "sqlite"]
),
],
),
param(
lambda t, where: t.G.corr(t.RBI, where=where, how="sample"),
lambda t, where: t.G[where].corr(t.RBI[where]),
id='corr_samp',
marks=[
pytest.mark.notimpl(["dask", "datafusion", "pandas", "polars"]),
pytest.mark.notyet(
[
"duckdb",
"impala",
"mysql",
"postgres",
"sqlite",
"snowflake",
"polars",
]
),
],
),
param(
lambda t, where: (t.G > 34.0).cov(
t.G <= 34.0,
where=where,
how="pop",
),
lambda t, where: (t.G[where] > 34.0).cov(t.G[where] <= 34.0, ddof=0),
id='covar_pop_bool',
marks=[
pytest.mark.notimpl(["dask", "datafusion", "pandas", "polars"]),
pytest.mark.notyet(["mysql", "impala", "sqlite"]),
],
),
param(
lambda t, where: (t.G > 34.0).corr(
t.G <= 34.0,
where=where,
how="pop",
),
lambda t, where: (t.G[where] > 34.0).corr(t.G[where] <= 34.0),
id='corr_pop_bool',
marks=[
pytest.mark.notimpl(["dask", "datafusion", "pandas", "polars"]),
pytest.mark.notyet(
["clickhouse", "impala", "mysql", "pyspark", "sqlite"]
),
],
),
],
)
@pytest.mark.parametrize(
('ibis_cond', 'pandas_cond'),
[
param(lambda _: None, lambda _: slice(None), id='no_cond'),
param(
lambda t: t.yearID.isin([2009, 2015]),
lambda t: t.yearID.isin([2009, 2015]),
id='cond',
marks=[
pytest.mark.broken(
["snowflake"],
reason=("snowflake doesn't allow quoted columns in group_by"),
),
],
),
],
)
@pytest.mark.notimpl(["mssql"])
def test_corr_cov(
batting,
batting_df,
result_fn,
expected_fn,
ibis_cond,
pandas_cond,
):
expr = result_fn(batting, ibis_cond(batting))
result = expr.execute()
expected = expected_fn(batting_df, pandas_cond(batting_df))
# Backends use different algorithms for computing covariance each with
# different amounts of numerical stability.
#
# This makes a generic, precise and accurate comparison function incredibly
# fragile and tedious to write.
assert pytest.approx(result) == expected
@pytest.mark.notimpl(
[
"dask",
"datafusion",
"mysql",
"pandas",
"postgres",
"sqlite",
"snowflake",
"mssql",
]
)
def test_approx_median(alltypes):
expr = alltypes.double_col.approx_median()
result = expr.execute()
assert isinstance(result, float)
@mark.parametrize(
('result_fn', 'expected_fn'),
[
param(
lambda t, where, sep: (
t.group_by('bigint_col')
.aggregate(tmp=lambda t: t.string_col.group_concat(sep, where=where))
.order_by('bigint_col')
),
lambda t, where, sep: (
(
t
if isinstance(where, slice)
else t.assign(string_col=t.string_col.where(where))
)
.groupby('bigint_col')
.string_col.agg(
lambda s: (np.nan if pd.isna(s).all() else sep.join(s.values))
)
.rename('tmp')
.sort_index()
.reset_index()
),
id='group_concat',
)
],
)
@mark.parametrize(
("ibis_sep", "pandas_sep"),
[
param(":", ":", id="const"),
param(
L(":") + ":",
"::",
id="expr",
marks=mark.notyet(["duckdb", "mysql", "pyspark"]),
),
],
)
@mark.parametrize(
('ibis_cond', 'pandas_cond'),
[
param(lambda _: None, lambda _: slice(None), id='no_cond'),
param(
lambda t: t.string_col.isin(['1', '7']),
lambda t: t.string_col.isin(['1', '7']),
marks=mark.notimpl(["dask"]),
id='is_in',
),
param(
lambda t: t.string_col.notin(['1', '7']),
lambda t: ~t.string_col.isin(['1', '7']),
marks=mark.notimpl(["dask"]),
id='not_in',
),
],
)
@mark.notimpl(["datafusion", "snowflake", "polars", "mssql"])
def test_group_concat(
backend,
alltypes,
df,
result_fn,
expected_fn,
ibis_cond,
pandas_cond,
ibis_sep,
pandas_sep,
):
expr = result_fn(alltypes, ibis_cond(alltypes), ibis_sep)
result = expr.execute()
expected = expected_fn(df, pandas_cond(df), pandas_sep)
backend.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
('result_fn', 'expected_fn'),
[
param(
lambda t: t.string_col.topk(3),
lambda t: t.groupby('string_col')['string_col'].count().head(3),
id='string_col_top3',
)
],
)
@mark.notimpl(["pandas", "dask"])
def test_topk_op(alltypes, df, result_fn, expected_fn):
# TopK expression will order rows by "count" but each backend
# can have different result for that.
# Note: Maybe would be good if TopK could order by "count"
# and the field used by TopK
t = alltypes.order_by(alltypes.string_col)
df = df.sort_values('string_col')
result = result_fn(t).execute()
expected = expected_fn(df)
assert all(result['count'].values == expected.values)
@pytest.mark.parametrize(
('result_fn', 'expected_fn'),
[
param(
lambda t: t.semi_join(t.string_col.topk(3), "string_col"),
lambda t: t[
t.string_col.isin(
t.groupby('string_col')['string_col'].count().head(3).index
)
],
id='string_col_filter_top3',
)
],
)
@mark.notimpl(["datafusion", "pandas", "dask"])
def test_topk_filter_op(alltypes, df, result_fn, expected_fn):
# TopK expression will order rows by "count" but each backend
# can have different result for that.
# Note: Maybe would be good if TopK could order by "count"
# and the field used by TopK
t = alltypes.order_by(alltypes.string_col)
df = df.sort_values('string_col')
expr = result_fn(t)
result = expr.execute()
expected = expected_fn(df)
assert result.shape[0] == expected.shape[0]
@pytest.mark.parametrize(
'agg_fn',
[
param(lambda s: list(s), id='agg_to_list'),
param(lambda s: np.array(s), id='agg_to_ndarray'),
],
)
@mark.notimpl(
[
"clickhouse",
"datafusion",
"duckdb",
"impala",
"mysql",
"postgres",
"sqlite",
"snowflake",
"polars",
"mssql",
]
)
def test_aggregate_list_like(backend, alltypes, df, agg_fn):
"""Tests .aggregate() where the result of an aggregation is a list-like.
We expect the list / np.array to be treated as a scalar (in other
words, the resulting table expression should have one element, which
is the list / np.array).
"""
udf = reduction(input_type=[dt.double], output_type=dt.Array(dt.double))(agg_fn)
expr = alltypes.aggregate(result_col=udf(alltypes.double_col))
result = expr.execute()
# Expecting a 1-row DataFrame
expected = pd.DataFrame({'result_col': [agg_fn(df.double_col)]})
backend.assert_frame_equal(result, expected)
@mark.notimpl(
[
"clickhouse",
"datafusion",
"duckdb",
"impala",
"mysql",
"postgres",
"sqlite",
"snowflake",
"polars",
"mssql",
]
)
def test_aggregate_mixed_udf(backend, alltypes, df):
"""Tests .aggregate() with multiple aggregations with mixed result types.
(In particular, one aggregation that results in an array, and other
aggregation(s) that result in a non-array)
"""
@reduction(input_type=[dt.double], output_type=dt.double)
def sum_udf(v):
return np.sum(v)
@reduction(input_type=[dt.double], output_type=dt.Array(dt.double))
def collect_udf(v):
return np.array(v)
expr = alltypes.aggregate(
sum_col=sum_udf(alltypes.double_col),
collect_udf=collect_udf(alltypes.double_col),
)
result = expr.execute()
expected = pd.DataFrame(
{
'sum_col': [sum_udf.func(df.double_col)],
'collect_udf': [collect_udf.func(df.double_col)],
}
)
backend.assert_frame_equal(result, expected, check_like=True)
@pytest.mark.notimpl(["datafusion", "pyspark"])
def test_binds_are_cast(alltypes):
expr = alltypes.aggregate(
high_line_count=(
alltypes.string_col.case().when('1-URGENT', 1).else_(0).end().sum()
)
)
expr.execute()
def test_agg_sort(alltypes):
query = alltypes.aggregate(count=alltypes.count())
query = query.order_by(alltypes.year)
query.execute()
def test_filter(backend, alltypes, df):
expr = (
alltypes[_.string_col == "1"]
.mutate(x=L(1, "int64"))
.group_by(_.x)
.aggregate(_.double_col.sum())
)
# TODO: The pyspark backend doesn't apply schemas to outputs
result = expr.execute().astype({"x": "int64"})
expected = (
df.loc[df.string_col == "1", :]
.assign(x=1)
.groupby("x")
.double_col.sum()
.rename("sum")
.reset_index()
)
backend.assert_frame_equal(result, expected, check_like=True)
|
{
"content_hash": "40e74dbc3053d35066081807ea8f808a",
"timestamp": "",
"source": "github",
"line_count": 883,
"max_line_length": 86,
"avg_line_length": 30.032842582106454,
"alnum_prop": 0.4855386703872695,
"repo_name": "ibis-project/ibis",
"id": "a5561b3bc7258e2e5d04cf459e4f6d3b0302cc31",
"size": "26519",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ibis/backends/tests/test_aggregation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44931"
},
{
"name": "CMake",
"bytes": "1862"
},
{
"name": "Dockerfile",
"bytes": "70"
},
{
"name": "JavaScript",
"bytes": "2713"
},
{
"name": "Nix",
"bytes": "11917"
},
{
"name": "Python",
"bytes": "2958224"
},
{
"name": "Shell",
"bytes": "3167"
}
],
"symlink_target": ""
}
|
"""Control Spotify with i3."""
import sys
from subprocess import Popen
import gi
if True:
gi.require_version('Playerctl', '2.0')
from gi.repository import Playerctl
try:
player = Playerctl.Player.new('spotify')
except: # noqa # pylint:disable=bare-except
try:
player = Playerctl.Player.new('spotifyd')
except: # noqa # pylint:disable=bare-except
sys.exit(1)
title = player.get_title()
artist = player.get_artist()
album = player.get_album()
status = player.get_property('status')
playing = ''
if status.startswith('Playing'):
playing = '▶'
elif status.startswith('Paused'):
playing = '⏸'
elif status.startswith('Stopped'):
playing = '⏹'
Popen(['dunstify', '-a', 'Spotify', f'{playing} {title}', f'{artist}\n{album}'])
sys.exit(0)
|
{
"content_hash": "3b1cd730d431dbf68430763937e9fad8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 23.818181818181817,
"alnum_prop": 0.6590330788804071,
"repo_name": "petobens/dotfiles",
"id": "8ad7063e314246effeedd8365bfeabf4cc9b8f47",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arch/config/i3/spotify_track.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AutoHotkey",
"bytes": "9975"
},
{
"name": "JavaScript",
"bytes": "15602"
},
{
"name": "Lua",
"bytes": "17374"
},
{
"name": "Python",
"bytes": "95033"
},
{
"name": "Shell",
"bytes": "100314"
},
{
"name": "Vim Script",
"bytes": "389184"
},
{
"name": "Vim Snippet",
"bytes": "135665"
}
],
"symlink_target": ""
}
|
import logging
### memory对象最简单就是本模块的一个记录
### 或者redis数据库
memory = {}
class memorized_property(property):
def __init__(self,*args,**kwargs):
super(memorized_property,self).__init__(*args,**kwargs)
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
if obj.gen_memoryid is not None:
self.name = obj.gen_memoryid() + '::' + self.fget.__name__
else:
self.name = '::' + self.fget.__name__
if self.name in memory:
logging.debug('from memory------------------------------')
return memory[self.name]
else:
logging.debug('from computing##########################')
value = memory[self.name] = self.fget(obj)
return value
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError("can't set attribute")
if obj.gen_memoryid is not None:
self.name = obj.gen_memoryid() + '::' + self.fset.__name__
else:
self.name = '::' + self.fget.__name__
memory[self.name] = value
def __delete__(self, obj):
if self.fdel is None:
raise AttributeError("can't delete attribute")
if obj.gen_memoryid is not None:
self.name = obj.gen_memoryid() + '::' + self.fdel.__name__
else:
self.name = '::' + self.fget.__name__
del memory[self.name]
class Api(object):
def gen_memoryid(self):
'''create a id string'''
raise NotImplementedError
|
{
"content_hash": "55089e6d014eeeb52ac17fee95ea9d75",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 70,
"avg_line_length": 29.413793103448278,
"alnum_prop": 0.5070339976553341,
"repo_name": "a358003542/expython",
"id": "b69f82316249f88d8e914623345549b8626c7864",
"size": "1794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expython/web/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "401"
},
{
"name": "Python",
"bytes": "230333"
}
],
"symlink_target": ""
}
|
from libsaas.services import base
from .resource import ReadonlyResource
from .calendar import CalendarLists, CalendarList
class SettingResource(ReadonlyResource):
path = 'settings'
class User(ReadonlyResource):
path = 'users/me'
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.resource(SettingResource)
def settings(self):
"""
Return the resource corresponding to all the settings
"""
return SettingResource(self)
@base.resource(SettingResource)
def setting(self, setting):
"""
Return the resource corresponding to a single setting
"""
return SettingResource(self, setting)
@base.resource(CalendarLists)
def calendar_lists(self):
"""
Return the resource corresponding to all the calendar lists
"""
return CalendarLists(self)
@base.resource(CalendarList)
def calendar_list(self, calendar_id):
"""
Return the resource corresponding to a single calendar list
"""
return CalendarList(self, calendar_id)
|
{
"content_hash": "68fef8b8912fee81af34a6f353489beb",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 67,
"avg_line_length": 25.86046511627907,
"alnum_prop": 0.6573741007194245,
"repo_name": "livingbio/libsaas",
"id": "444a28168f8db2bdd86aa4cc08e47a953253b839",
"size": "1112",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "libsaas/services/googlecalendar/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "954103"
}
],
"symlink_target": ""
}
|
from . import TweetAdapter
from ttp import ttp
import re
def breakMessage(messageToBreak, tweetId, userId, userDataStrategy):
if len(messageToBreak) <= 140:
return [ messageToBreak ]
username = TweetAdapter.getUsernameForTweet(tweetId, userId, userDataStrategy)
splitMessageList = []
urls = getUrls(messageToBreak)
messageToBreak = transformMessageLinksToShortUrls(messageToBreak, urls)
while messageToBreak!="":
if len(messageToBreak) > 140:
if messageToBreak[140] != " ":
indexToSplitMessageAt = messageToBreak[0:140 + 1].rfind(" ")
else:
indexToSplitMessageAt = 140
messageToAppend = messageToBreak[0:indexToSplitMessageAt].rstrip()
messageToBreak = "@%s %s"%(username, messageToBreak[indexToSplitMessageAt:].lstrip())
messageToAppend, urls = transformShortUrlsBackToOriginalLinks(messageToAppend, urls[:])
splitMessageList.append(messageToAppend)
else:
if messageToBreak.rstrip() != "@%s"%(username):
splitMessageList.append(transformShortUrlsBackToOriginalLinks(messageToBreak.rstrip(), urls[:])[0])
break
return splitMessageList
def transformShortUrlsBackToOriginalLinks(messageToTransform, urls):
findUrlRegex = re.compile('http://short\.co[#]+')
for foundUrl in findUrlRegex.findall(messageToTransform):
messageToTransform = messageToTransform.replace(foundUrl, urls[0])
urls.pop(0)
return messageToTransform, urls
def getUrls(messageToParse):
p = ttp.Parser()
parsed = p.parse(messageToParse)
urls = parsed.urls
return urls
def transformMessageLinksToShortUrls(messageToBreak, urls):
urlLength, urlLengthHttps = TweetAdapter.getUrlLengths()
for url in urls:
if url.startswith('https://'):
lengthOfUrl = urlLengthHttps
else:
lengthOfUrl = urlLength
messageToBreak = messageToBreak.replace(url, 'http://short.co'+'#'*(lengthOfUrl - 15))
return messageToBreak
|
{
"content_hash": "b160a0315fdb7ebf1af8f3cea037ed77",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 115,
"avg_line_length": 41.42,
"alnum_prop": 0.6837276677933366,
"repo_name": "kiriappeee/reply-later",
"id": "336ac49e3808895a9f2dfe24a051946caf8835ce",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/messager/MessageBreaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "55055"
},
{
"name": "JavaScript",
"bytes": "219"
},
{
"name": "Python",
"bytes": "80169"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
import unittest
import openrtb
import openrtb.base
BRQ = {
'id': u'testbrqid',
'tmax': 100,
'at': 2,
'app': {
'id': u'appid',
'name': u'appname',
'cat': [u'IAB1', u'IAB2-2'],
'publisher': {
'id': u'pubid',
'cat': [u'IAB3']
},
'content': {
'id': u'contentid',
'episode': 1,
'producer': {
'id': u'pubid',
'cat': [u'IAB3']
}
},
'keywords': u'key,word'
},
'device': {
'ip': u'123.1.2.3',
'make': u'Apple',
'devicetype': 1,
'geo': {
'lat': 54.3123,
'lon': 32.12312,
'country': u'US'
}
},
'user': {
'id': u'userid',
'yob': 2012,
'data': [
{
'id': u'dataid',
'segment': [{
'id': u'segmentid',
'name': u'yob',
'value': u'2012',
}]
}
]
},
'imp': [
{
'id': u'testimpid',
'bidfloorcur': u'USD',
'banner': {
'w': 320,
'h': 50,
'pos': 1,
'mimes': [u'mime/type']
}
}
],
'ext': {}
}
class TestFields(unittest.TestCase):
def test_passthrough(self):
self.assertEqual(openrtb.base.Field(int).deserialize(1), 1)
def test_convert(self):
self.assertEqual(openrtb.base.Field(int).deserialize('1'), 1)
def test_convert_fail(self):
with self.assertRaises(openrtb.base.ValidationError):
openrtb.base.Field(int).deserialize('asd')
def test_convert_enum_fail(self):
with self.assertRaises(openrtb.base.ValidationError):
openrtb.base.Field(openrtb.base.Enum).deserialize('asd')
def test_convert_enum(self):
self.assertEqual(openrtb.base.Field(openrtb.base.Enum).deserialize('1'), 1)
def test_deserialize(self):
class O(object):
v = None
@staticmethod
def deserialize(v):
O.v = v
return 'test'
self.assertEqual(openrtb.base.Field(O).deserialize('1'), 'test')
self.assertEqual(O.v, '1')
def test_unicode(self):
self.assertEqual(openrtb.base.String(u'uni'), u'uni')
def test_ascii(self):
self.assertEqual(openrtb.base.String(b'uni'), u'uni')
def test_utf8(self):
self.assertEqual(openrtb.base.String(u'утф'.encode('utf-8')), u'утф')
def test_bad_utf8(self):
self.assertEqual(openrtb.base.String(b'x\xff'), u'x')
def test_convert_to_unicode(self):
self.assertEqual(openrtb.base.String(1), u'1')
def test_default_array(self):
self.assertEqual(openrtb.base.Array(int)(None), [])
def test_enum_int(self):
self.assertEqual(openrtb.base.Enum(1), 1)
def test_enum_convert_to_int(self):
self.assertEqual(openrtb.base.Enum('1'), 1)
def test_enum_convert_to_int_fail(self):
with self.assertRaises(ValueError):
openrtb.base.Enum('x')
class TestObjects(unittest.TestCase):
def test_required(self):
with self.assertRaises(openrtb.base.ValidationError):
openrtb.request.BidRequest()
def test_extra(self):
s = openrtb.request.Site(extra='extra')
self.assertEqual(s.extra, 'extra')
def test_ds_extra(self):
s = openrtb.request.Site.deserialize({'extra': 'extra'})
self.assertEqual(s.extra, 'extra')
def test_missing(self):
s = openrtb.request.Site()
self.assertEqual(s.extra, None)
def test_ds_none(self):
s = openrtb.request.Site.deserialize({'id': None})
self.assertEqual(s.id, None)
def test_bid_request_serialize_cycle(self):
self.maxDiff = None
brq = openrtb.request.BidRequest.deserialize(BRQ)
self.assertDictEqual(BRQ, brq.serialize())
class TestGetters(unittest.TestCase):
def test_brq_user(self):
brq = openrtb.request.BidRequest.minimal('i', 'i')
self.assertEqual(brq.get_user().__class__,
openrtb.request.User)
brq.user = openrtb.request.User(id='t')
self.assertEqual(brq.get_user().id, 't')
def test_brq_app(self):
brq = openrtb.request.BidRequest.minimal('i', 'i')
self.assertEqual(brq.get_app().__class__,
openrtb.request.App)
brq.app = openrtb.request.App(id='t')
self.assertEqual(brq.get_app().id, 't')
def test_brq_site(self):
brq = openrtb.request.BidRequest.minimal('i', 'i')
self.assertEqual(brq.get_site().__class__,
openrtb.request.Site)
brq.site = openrtb.request.Site(id='t')
self.assertEqual(brq.get_site().id, 't')
def test_brq_device(self):
brq = openrtb.request.BidRequest.minimal('i', 'i')
self.assertEqual(brq.get_device().__class__,
openrtb.request.Device)
brq.device = openrtb.request.Device(id='t')
self.assertEqual(brq.get_device().id, 't')
def test_banner_btypes(self):
self.assertEqual(openrtb.request.Banner().blocked_types(), set())
self.assertEqual(openrtb.request.Banner(btype=[openrtb.constants.BannerType.BANNER]).blocked_types(),
{openrtb.constants.BannerType.BANNER})
def test_banner_size(self):
self.assertEqual(openrtb.request.Banner().size(), None)
self.assertEqual(openrtb.request.Banner(w=1, h=2).size(), (1, 2))
def test_device_geo(self):
self.assertEqual(openrtb.request.Device().get_geo().__class__,
openrtb.request.Geo)
geo = openrtb.request.Geo()
self.assertEqual(openrtb.request.Device(geo=geo).get_geo(), geo)
def test_device_oncellular(self):
self.assertFalse(openrtb.request.Device().is_on_cellular())
self.assertTrue(openrtb.request.Device(connectiontype=openrtb.constants.ConnectionType.CELLULAR_2G).is_on_cellular())
self.assertFalse(openrtb.request.Device(connectiontype=openrtb.constants.ConnectionType.WIFI).is_on_cellular())
def test_geo_loc(self):
self.assertEqual(openrtb.request.Geo().loc(), None)
self.assertEqual(openrtb.request.Geo(lat=1, lon=2).loc(), (1, 2))
class TestMobileAdapter(unittest.TestCase):
def test_adapter(self):
mbrq = openrtb.mobile.BidRequest(
id='mbrqid',
imp=[
openrtb.mobile.Impression(
impid='impid',
w=320,
h=50,
btype=[openrtb.constants.BannerType.BANNER],
battr=[openrtb.constants.CreativeAttribute.AUDIO_AUTOPLAY],
pos=openrtb.constants.AdPosition.OFFSCREEN
)
],
device=openrtb.mobile.Device(
loc='1.23,4.56',
country='US',
make='Apple'
),
site=openrtb.mobile.Site(
sid='siteid',
pub='sitepub',
pid='sitepubid'
),
app=openrtb.mobile.App(
aid='appid',
pub='apppub',
pid='apppubid',
),
user=openrtb.mobile.User(
country='RU',
zip='123456',
uid='userid',
),
restrictions=openrtb.mobile.Restrictions(
bcat=['cat'],
badv=['adv'],
)
)
a = openrtb.mobile.OpenRTB20Adapter(mbrq)
self.assertEqual(a.id, 'mbrqid')
self.assertEqual(a.imp[0].banner.w, 320)
self.assertEqual(a.imp[0].banner.h, 50)
self.assertEqual(a.imp[0].banner.btype, [openrtb.constants.BannerType.BANNER])
self.assertEqual(a.imp[0].banner.pos, openrtb.constants.AdPosition.OFFSCREEN)
self.assertEqual(a.device.geo.country, 'US')
self.assertEqual(a.device.geo.lat, 1.23)
self.assertEqual(a.device.geo.lon, 4.56)
self.assertEqual(a.site.publisher.id, 'sitepubid')
self.assertEqual(a.site.publisher.name, 'sitepub')
self.assertEqual(a.site.id, 'siteid')
self.assertEqual(a.app.id, 'appid')
self.assertEqual(a.user.geo.country, 'RU')
self.assertEqual(a.user.geo.zip, '123456')
self.assertEqual(a.user.id, 'userid')
self.assertEqual(a.bcat, ['cat'])
self.assertEqual(a.badv, ['adv'])
self.assertEqual(a.brq.serialize(),
openrtb.mobile.OpenRTB20Adapter.deserialize(mbrq.serialize()).brq.serialize())
class TestConstants(unittest.TestCase):
def test_init(self):
self.assertEqual(openrtb.constants.AdPosition(2).name, 'MAYBE_VISIBLE')
def test_clone(self):
self.assertEqual(openrtb.constants.AdPosition(openrtb.constants.AdPosition.OFFSCREEN).name, 'OFFSCREEN')
def test_int(self):
self.assertEqual(int(openrtb.constants.ConnectionType(2)), 2)
def test_str(self):
self.assertEqual(str(openrtb.constants.AdPosition(2)), 'MAYBE_VISIBLE')
def test_hash(self):
self.assertEqual({openrtb.constants.AdPosition.OFFSCREEN: 'test'}[3], 'test')
def test_unknown_str(self):
self.assertIn('Unknown', str(openrtb.constants.BannerType(123)))
def test_none_equal(self):
self.assertFalse(None == openrtb.constants.BannerType.JS)
def test_int_equal(self):
self.assertEqual(openrtb.constants.BannerType.JS, 3)
def test_constant_equal(self):
self.assertEqual(openrtb.constants.BannerType.JS, openrtb.constants.BannerType(3))
def test_wrong_type(self):
with self.assertRaises(TypeError):
openrtb.constants.BannerType.JS == openrtb.constants.CreativeAttribute.EXPAND_AUTO
class TestIAB(unittest.TestCase):
def test_tier1(self):
self.assertEqual(openrtb.iab.from_string('IAB1'), 'Arts & Entertainment')
self.assertEqual(openrtb.iab.from_string('IAB18'), 'Style & Fashion')
def test_tier2(self):
self.assertEqual(openrtb.iab.from_string('IAB17-33'), 'Sports: Scuba Diving')
def test_noprefix(self):
self.assertEqual(openrtb.iab.from_string('7-32'), 'Health & Fitness: Nutrition')
def test_bad(self):
self.assertEqual(openrtb.iab.from_string('IAB99-99'), 'IAB99-99')
class TestMacros(unittest.TestCase):
TPL = ('${AUCTION_ID}/${AUCTION_BID_ID}/${AUCTION_IMP_ID}/'
'${AUCTION_SEAT_ID}/${AUCTION_AD_ID}/${AUCTION_PRICE}/${AUCTION_CURRENCY}')
def test_sub(self):
brq = openrtb.request.BidRequest.minimal('reqid', 'impid')
brp = openrtb.response.BidResponse(
id='wharrgarbl',
seatbid=[openrtb.response.SeatBid(
seat='seatid',
bid=[openrtb.response.Bid(
id='bidid',
impid='impid',
adid='adid',
price=0
)]
)],
bidid='bidid'
)
self.assertEqual(openrtb.macros.substitution(brq, brp, 0.1, self.TPL),
'reqid/bidid/impid/seatid/adid/0.1/USD')
def test_nonmacro(self):
self.assertEqual(openrtb.macros.substitution(
openrtb.request.BidRequest.minimal('r', 'i'),
openrtb.response.BidResponse.minimal('id', 'bidid', 'impid', 0.1),
0.2,
'${AUCTION_TEST}'
), '${AUCTION_TEST}')
def test_empty(self):
self.assertEqual(openrtb.macros.substitution(
openrtb.request.BidRequest.minimal('rid', 'rimpid'),
openrtb.response.BidResponse.minimal('respid', 'bidid', 'impid', 0.1),
0.2,
self.TPL
), 'rid//impid///0.2/USD')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "5e38fcdcc2f30181d013f24353a5417e",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 125,
"avg_line_length": 33.69553072625698,
"alnum_prop": 0.5604741772361768,
"repo_name": "anossov/openrtb",
"id": "29fbe29d6edc2dc99ea1ece5fdabf66bc9cc96a0",
"size": "12094",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "80826"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.8-alpha',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
|
{
"content_hash": "c9e3a917280b32f3fd259ea5fd929496",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 26.25581395348837,
"alnum_prop": 0.62533215234721,
"repo_name": "kirberich/djangae",
"id": "41bc131a1b179c26c5d510575ff5f50191f8c903",
"size": "1129",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "277"
},
{
"name": "Python",
"bytes": "667411"
},
{
"name": "Shell",
"bytes": "368"
}
],
"symlink_target": ""
}
|
from tornado.options import _LogFormatter as TornadoLogFormatter
import logging, logging.handlers
import os.path
import types
import dictconfig
# Pulled from commonware.log we don't have to import that, which drags with
# it Django dependencies.
class RemoteAddressFormatter(logging.Formatter):
"""Formatter that makes sure REMOTE_ADDR is available."""
def format(self, record):
if ('%(REMOTE_ADDR)' in self._fmt
and 'REMOTE_ADDR' not in record.__dict__):
record.__dict__['REMOTE_ADDR'] = None
return logging.Formatter.format(self, record)
class UTF8SafeFormatter(RemoteAddressFormatter):
def __init__(self, fmt=None, datefmt=None, encoding='utf-8'):
logging.Formatter.__init__(self, fmt, datefmt)
self.encoding = encoding
def formatException(self, e):
r = logging.Formatter.formatException(self, e)
if type(r) in [types.StringType]:
r = r.decode(self.encoding, 'replace') # Convert to unicode
return r
def format(self, record):
t = RemoteAddressFormatter.format(self, record)
if type(t) in [types.UnicodeType]:
t = t.encode(self.encoding, 'replace')
return t
class NullHandler(logging.Handler):
def emit(self, record):
pass
def initialize_logging(syslog_tag, syslog_facility, loggers,
log_level=logging.INFO, use_syslog=False):
if os.path.exists('/dev/log'):
syslog_device = '/dev/log'
elif os.path.exists('/var/run/syslog'):
syslog_device = '/var/run/syslog'
base_fmt = ('%(name)s:%(levelname)s %(message)s:%(pathname)s:%(lineno)s')
cfg = {
'version': 1,
'filters': {},
'formatters': {
'debug': {
'()': UTF8SafeFormatter,
'datefmt': '%H:%M:%s',
'format': '%(asctime)s ' + base_fmt,
},
'prod': {
'()': UTF8SafeFormatter,
'datefmt': '%H:%M:%s',
'format': '%s: [%%(REMOTE_ADDR)s] %s' % (syslog_tag, base_fmt),
},
'tornado': {
'()': TornadoLogFormatter,
'color': True
},
},
'handlers': {
'console': {
'()': logging.StreamHandler,
'formatter': 'tornado'
},
'null': {
'()': NullHandler,
},
'syslog': {
'()': logging.handlers.SysLogHandler,
'facility': syslog_facility,
'address': syslog_device,
'formatter': 'prod',
},
},
'loggers': {
}
}
for key, value in loggers.items():
cfg[key].update(value)
# Set the level and handlers for all loggers.
for logger in cfg['loggers'].values():
if 'handlers' not in logger:
logger['handlers'] = ['syslog' if use_syslog else 'console']
if 'level' not in logger:
logger['level'] = log_level
if 'propagate' not in logger:
logger['propagate'] = False
dictconfig.dictConfig(cfg)
|
{
"content_hash": "03e9bb20203008f92e35772154591c21",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 32,
"alnum_prop": 0.5337752525252525,
"repo_name": "peplin/trinity",
"id": "4907964a63c7ac01ddcda9578de8e66f7f93a39a",
"size": "3168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logconfig/logconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54550"
}
],
"symlink_target": ""
}
|
"""
Classes for generating random values for thrift types
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import random
import sys
import six
import six.moves as sm
from thrift import Thrift
from thrift.util.type_inspect import get_spec, ThriftPyTypeSpec
INFINITY = float("inf")
if sys.version_info[0] >= 3:
unicode = None
def deep_dict_update(base, update) -> None:
"""Similar to dict.update(base, update), but if any values in base are
dictionaries, they are updated too instead of replaced.
Destructive on base, but non-destructive on base's values.
"""
for key, val in six.iteritems(update):
if key in base and isinstance(base[key], dict) and isinstance(val, dict):
# Copy base[key] (non-destructive on base's values)
updated = dict(base[key])
deep_dict_update(updated, val)
val = updated
base[key] = val
class BaseRandomizer(object):
"""
The BaseRandomizer class is an abstract class whose subclasses implement
a randomizer for a specific Thrift Type. Instances of a class may have
different spec_args and constraints.
Class Attributes:
ttype (int (enum)): The attribute of Thrift.TTypes corresponding to the type
default_constraints (dict): Default values for randomizers' constraint
dictionary. Constraints affect the behavior of the randomize() method.
Instance Attributes:
type_spec (ThriftTypeSpec): The thrift spec wrapper. Provides additional
information about the type beyond thrift type.
state (RandomizerState): State attributes to be preserved across randomizer
components in recursive and nested randomizer structures. Includes
initialization cache and recursion depth trace.
constraints (dict): Map of constraint names to constraint values. This
is equivalent to cls.default_constraints if an empty constraint dictionary
is passed to __init__. Otherwise, it is equal to cls.default_constraints
recursively updated with the key/value pairs in the constraint dict passed
to __init__.
"""
ttype = None
default_constraints = {
"seeds": [],
"p_random": 0.08, # If seeded, chance of ignoring seed
"p_fuzz": 1, # If seed not ignored, chance of fuzzing seed
}
def __init__(self, type_spec, state, constraints):
"""
spec_args: thrift arguments for this field
state: RandomizerState instance
constraints: dict of constraints specific to this randomizer
"""
self.type_spec = type_spec
self.state = state
self.type_name = type_spec.get_type_name()
self.constraints = self.flatten_constraints(constraints)
self.preprocessing_done = False
def _preprocess_constraints(self):
pass
def _init_subrandomizers(self):
pass
def preprocess(self):
if self.preprocessing_done:
return
# Push type rules that may affect subrandomizers' constraints
pushed = self.state.push_type_constraints(self.constraints)
self._preprocess_constraints()
self._init_subrandomizers()
self.state.pop_type_constraints(pushed)
self.preprocessing_done = True
def flatten_constraints(self, constraints):
"""Return a single constraint dictionary by combining default
constraints with overriding constraints."""
cls = self.__class__
flattened = {}
deep_dict_update(flattened, cls.default_constraints)
# Put the default constraints of the whole stack
deep_dict_update(flattened, self.state.default_constraints)
type_name = self.type_name
for type_constraints in self.state.type_constraint_stacks[type_name]:
deep_dict_update(flattened, type_constraints)
deep_dict_update(flattened, constraints)
return flattened
def __eq__(self, other):
"""Check if this randomizer is equal to `other` randomizer. If two
randomizers are equal, they have the same type and constraints and
are expected to behave identically (up to random number generation.)"""
return (self.type_spec == other.type_spec) and (
self.constraints == other.constraints
)
@property
def universe_size(self):
"""
Return (or estimate) the range of the random variable. If this
randomizer is used for sets or map keys, the size of the container
will be limited to this value.
"""
raise NotImplementedError(
"_universe_size not implemented for %s" % (self.__class__.__name__)
)
def generate(self, seed=None):
"""Generate a value, possibly based on a seed.
If seed is not None, use it as the seed. Otherwise, if the seeds
constraint is non-empty, pick a random element as the seed.
If there are no seeds, return the result of randomize()
If there are seeds, use the p_random constraint to determine the
chance of returning the result of randomize() and use the p_fuzz
constraint to determine the chance of returning the result of fuzz(seed)
Otherwise, return the seed.
"""
if seed is None:
seeds = self.constraints["seeds"]
else:
seeds = [seed]
if not seeds or (random.random() < self.constraints["p_random"]):
return self._randomize()
seed = random.choice(seeds)
if random.random() < self.constraints["p_fuzz"]:
return self._fuzz(seed)
else:
return self.eval_seed(seed)
def _randomize(self):
"""Generate a random value of the type, given the spec args"""
raise NotImplementedError(
"randomize not implemented for %s" % (self.__class__.__name__)
)
def _fuzz(self, seed):
"""Randomly modify the given seed value.
By default, this method calls _randomize() and returns a completely
randomized value.
However, subclasses for types whose values can be "close" to each
other should override this method to randomly generate a value
that is "close" to the seed. For example, an int randomizer might
fuzz the seed 1000 by returning 1001. A string randomizer might fuzz
the seed "foo" to "fOo".
"""
return self._randomize()
def eval_seed(self, seed):
"""Evaluate a seed without fuzzing it.
Seeds must be specified as JSON, so they may not always match
the type that this randomizer is expected to generate. This method
converts the result of json.loads(seed) to a value with the expected
thrift type.
For example,
an int seed may be "3", which evaluates to 3. A Point struct seed may
be {"x": 4, "y": 2}, which evaluates to Point(x=4, y=2).
"""
return seed
class ScalarTypeRandomizer(BaseRandomizer):
"""Randomizer for types that do not constain other types, including
enum, byte, i16, i32, i64, float, double and string. Bool is excluded
because it does not need to inherit any properties from this class"""
default_constraints = dict(BaseRandomizer.default_constraints)
default_constraints.update({"choices": []})
def _randomize(self):
"""Basic types support the choices constraint, which restricts
the range of the randomizer to an explicit list"""
choices = self.constraints["choices"]
if choices:
return random.choice(choices)
else:
return None
class BoolRandomizer(BaseRandomizer):
ttype = Thrift.TType.BOOL
default_constraints = dict(BaseRandomizer.default_constraints)
default_constraints.update({"p_true": 0.5})
@property
def universe_size(self):
return 2
def _randomize(self):
return random.random() < self.constraints["p_true"]
def eval_seed(self, seed):
if isinstance(seed, bool):
return seed
elif isinstance(seed, six.integer_types):
return bool(seed)
elif seed == "true":
return True
elif seed == "false":
return False
else:
raise ValueError("Invalid bool seed: %s" % seed)
def _random_int_factory(k):
"""Return a function that generates a random k-bit signed int"""
min_ = -(1 << (k - 1)) # -2**(k-1)
max_ = 1 << (k - 1) - 1 # +2**(k-1)-1
def random_int_k_bits():
return random.randint(min_, max_)
return random_int_k_bits
class EnumRandomizer(ScalarTypeRandomizer):
ttype = Thrift.TType.I32
random_int_32 = staticmethod(_random_int_factory(32))
default_constraints = dict(ScalarTypeRandomizer.default_constraints)
default_constraints.update(
{
# Probability of generating an i32 with no corresponding Enum name
"p_invalid": 0.01,
}
)
def _preprocess_constraints(self):
self._whiteset = set()
for val in six.itervalues(self.type_spec.names_to_values()):
self._whiteset.add(val)
self._whitelist = list(self._whiteset)
def _randomize(self):
cls = self.__class__
val = super(EnumRandomizer, self)._randomize()
if val is not None:
if isinstance(val, six.string_types):
return self.type_spec.names_to_values()[val]
else:
return self.type_spec.construct_instance(val)
if random.random() < self.constraints["p_invalid"]:
# Generate an i32 value that does not correspond to an enum member
n = None
while (n in self._whiteset) or (n is None):
n = cls.random_int_32()
return self.type_spec.construct_instance(n)
else:
return self.type_spec.construct_instance(random.choice(self._whitelist))
def eval_seed(self, seed):
if isinstance(seed, six.string_types):
seed = self.type_spec.names_to_values()[seed]
elif not isinstance(seed, int):
# Assume the seed is given in its native type
return seed
return self.type_spec.construct_instance(seed)
@property
def universe_size(self):
return len(self._whitelist)
def _integer_randomizer_factory(name, ttype, n_bits):
_universe_size = 2**n_bits
_min = -(2 ** (n_bits - 1))
_max = (2 ** (n_bits - 1)) - 1
_name = name
_ttype = ttype
_n_bits = n_bits
_random_i32 = _random_int_factory(_n_bits)
class NBitIntegerRandomizer(ScalarTypeRandomizer):
ttype = _ttype
default_constraints = dict(ScalarTypeRandomizer.default_constraints)
default_constraints.update({"range": [], "fuzz_max_delta": 4})
def _randomize(self):
val = super(NBitIntegerRandomizer, self)._randomize()
if val is not None:
return val
range_ = self.constraints["range"]
if range_:
min_, max_ = range_
return random.randint(min_, max_)
return _random_i32()
def _flip_bit(self, seed):
"""Fuzz seed by flipping one bit, excluding the sign bit"""
flipper = 1 << random.randint(0, _n_bits - 2)
return seed ^ flipper
def _add_delta(self, seed):
"""Fuzz seed by adding a small number"""
max_delta = self.constraints["fuzz_max_delta"]
delta = random.randint(-max_delta, max_delta)
fuzzed = seed + delta
# Make sure fuzzed is in [_min, _max] to avoid overflow
return max(min(_max, fuzzed), _min)
def _fuzz(self, seed):
"""Apply a random fuzzer function"""
seed = self.eval_seed(seed)
fuzz_fn = random.choice([self._flip_bit, self._add_delta])
return fuzz_fn(seed)
@property
def universe_size(self):
return _universe_size
def eval_seed(self, seed):
if isinstance(seed, six.string_types):
return int(seed)
elif isinstance(seed, six.integer_types):
return seed
else:
raise TypeError("Invalid %s seed: %s" % (_name, seed))
if sys.version_info[0] == 2 and isinstance(_name, unicode):
NBitIntegerRandomizer.__name__ = "{}Randomizer".format(_name).encode("utf8")
else:
NBitIntegerRandomizer.__name__ = "{}Randomizer".format(_name)
return NBitIntegerRandomizer
ByteRandomizer = _integer_randomizer_factory("i8", Thrift.TType.BYTE, 8)
I16Randomizer = _integer_randomizer_factory("i16", Thrift.TType.I16, 16)
I32Randomizer = _integer_randomizer_factory("i32", Thrift.TType.I32, 32)
I64Randomizer = _integer_randomizer_factory("i64", Thrift.TType.I64, 64)
del _integer_randomizer_factory
class FloatingPointRandomizer(ScalarTypeRandomizer):
"""Abstract class for floating point types"""
unreals = [float("nan"), float("inf"), float("-inf")]
default_constraints = dict(ScalarTypeRandomizer.default_constraints)
default_constraints.update(
{
"p_zero": 0.01,
"p_unreal": 0.01,
"mean": 0.0,
"std_deviation": 1e8,
}
)
@property
def universe_size(self):
return self.__class__._universe_size
def _randomize(self):
cls = self.__class__
val = super(FloatingPointRandomizer, self)._randomize()
if val is not None:
return val
if random.random() < self.constraints["p_unreal"]:
return random.choice(cls.unreals)
if random.random() < self.constraints["p_zero"]:
return 0.0
return random.normalvariate(
self.constraints["mean"], self.constraints["std_deviation"]
)
def eval_seed(self, seed):
if isinstance(seed, six.string_types):
return float(seed)
elif isinstance(seed, (float,) + six.integer_types):
return seed
else:
raise TypeError("Invalid %s seed: %s" % (self.__class__.name, seed))
class SinglePrecisionFloatRandomizer(FloatingPointRandomizer):
ttype = Thrift.TType.FLOAT
_universe_size = 2**32
class DoublePrecisionFloatRandomizer(FloatingPointRandomizer):
ttype = Thrift.TType.DOUBLE
_universe_size = 2**64
class CollectionTypeRandomizer(BaseRandomizer):
"""Superclass for ttypes with lengths"""
default_constraints = dict(BaseRandomizer.default_constraints)
default_constraints.update({"mean_length": 12})
@property
def universe_size(self):
return INFINITY
def _get_length(self):
mean = self.constraints["mean_length"]
if mean == 0:
return 0
else:
val = int(random.expovariate(1 / mean))
max_len = self.constraints.get("max_length", None)
if max_len is not None and val > max_len:
val = max_len
return val
class StringRandomizer(CollectionTypeRandomizer, ScalarTypeRandomizer):
ttype = Thrift.TType.STRING
ascii_range = (0, 127)
default_constraints = dict(CollectionTypeRandomizer.default_constraints)
default_constraints.update(ScalarTypeRandomizer.default_constraints)
def _randomize(self):
cls = self.__class__
val = ScalarTypeRandomizer._randomize(self)
if val is not None:
return val
length = self._get_length()
chars = []
for _ in sm.xrange(length):
chars.append(chr(random.randint(*cls.ascii_range)))
return "".join(chars)
def eval_seed(self, seed):
if isinstance(seed, six.string_types):
return seed
elif isinstance(seed, six.binary_type):
return seed
else:
raise TypeError("Invalid string seed: %s" % seed)
class BinaryRandomizer(CollectionTypeRandomizer, ScalarTypeRandomizer):
ttype = Thrift.TType.UTF8
byte_range = (0, 255)
default_constraints = dict(CollectionTypeRandomizer.default_constraints)
default_constraints.update(ScalarTypeRandomizer.default_constraints)
def _randomize(self):
val = ScalarTypeRandomizer._randomize(self)
if val is not None:
return self.type_spec.construct_instance(val)
length = self._get_length()
bs = []
for _ in sm.xrange(length):
bs.append(six.int2byte(random.randint(*self.byte_range)))
return self.type_spec.construct_instance(six.ensure_binary("").join(bs))
def eval_seed(self, seed):
if isinstance(seed, six.string_types):
return self.type_spec.construct_instance(six.ensure_binary(seed))
elif isinstance(seed, six.binary_type):
return self.type_spec.construct_instance(seed)
else:
raise TypeError("Invalid binary seed: %s" % seed)
class NonAssociativeContainerRandomizer(CollectionTypeRandomizer):
"""Randomizer class for lists and sets"""
default_constraints = dict(CollectionTypeRandomizer.default_constraints)
default_constraints.update({"element": {}})
def _init_subrandomizers(self):
elem_spec = self.type_spec.get_subtypes()[ThriftPyTypeSpec.SUBTYPE_ELEMENT]
elem_constraints = self.constraints["element"]
self._element_randomizer = self.state.get_randomizer_for_spec(
elem_spec, elem_constraints
)
class ListRandomizer(NonAssociativeContainerRandomizer):
ttype = Thrift.TType.LIST
def _randomize(self):
length = self._get_length()
elements = []
for _ in sm.xrange(length):
element = self._element_randomizer.generate()
if element is not None:
elements.append(element)
return self.type_spec.construct_instance(elements)
def _fuzz_insert(self, seed):
"""Fuzz a list seed by inserting a random element at a random index"""
randomizer = self._element_randomizer
new_elem = randomizer.generate()
insertion_index = random.randint(0, len(seed))
seed.insert(insertion_index, new_elem)
return seed
def _fuzz_delete(self, seed):
"""Fuzz a list seed by deleting a random element
Requires len(seed) >= 1"""
delete_index = random.randint(0, len(seed) - 1)
seed.pop(delete_index)
return seed
def _fuzz_one_element(self, seed):
"""Fuzz a list seed by fuzzing one element
Requires len(seed) >= 1"""
fuzz_index = random.randint(0, len(seed) - 1)
randomizer = self._element_randomizer
fuzzed_elem = randomizer.generate(seed=seed[fuzz_index])
seed[fuzz_index] = fuzzed_elem
return seed
def _fuzz(self, seed):
seed = self.eval_seed(seed)
# Convert to list if needed. The thrift list type may be immutable
if not isinstance(seed, list):
seed = list(seed)
if len(seed) == 0:
# Seed is an empty list. The only valid fuzzer function
# is the insert function
fuzzed = self._fuzz_insert(seed)
else:
# All fuzzer functions are valid
fuzz_fn = random.choice(
[self._fuzz_insert, self._fuzz_delete, self._fuzz_one_element]
)
fuzzed = fuzz_fn(seed)
return self.type_spec.construct_instance(fuzzed)
def eval_seed(self, seed):
return self.type_spec.construct_instance(
[self._element_randomizer.eval_seed(e) for e in seed]
)
class SetRandomizer(NonAssociativeContainerRandomizer):
ttype = Thrift.TType.SET
def _randomize(self):
element_randomizer = self._element_randomizer
length = self._get_length()
length = min(length, element_randomizer.universe_size)
elements = set()
# If it is possible to get `length` unique elements,
# in N = k*length iterations we will reach `length`
# with high probability.
i = 0
k = 10
N = k * length
while len(elements) < length and i < N:
element = element_randomizer.generate()
if element is not None:
elements.add(element)
i += 1
return self.type_spec.construct_instance(elements)
def eval_seed(self, seed):
return self.type_spec.construct_instance(
{self._element_randomizer.eval_seed(e) for e in seed}
)
class MapRandomizer(CollectionTypeRandomizer):
ttype = Thrift.TType.MAP
default_constraints = dict(CollectionTypeRandomizer.default_constraints)
default_constraints.update({"key": {}, "value": {}})
def _init_subrandomizers(self):
subtypes = self.type_spec.get_subtypes()
key_spec = subtypes[ThriftPyTypeSpec.SUBTYPE_KEY]
val_spec = subtypes[ThriftPyTypeSpec.SUBTYPE_VALUE]
key_constraints = self.constraints["key"]
val_constraints = self.constraints["value"]
self._key_randomizer = self.state.get_randomizer_for_spec(
key_spec, key_constraints
)
self._val_randomizer = self.state.get_randomizer_for_spec(
val_spec, val_constraints
)
def _randomize(self):
key_randomizer = self._key_randomizer
val_randomizer = self._val_randomizer
length = self._get_length()
length = min(length, key_randomizer.universe_size)
elements = {}
i = 0
k = 10
N = k * length
while len(elements) < length and i < N:
key = key_randomizer.generate()
val = val_randomizer.generate()
try:
if key is not None and val is not None:
elements[key] = val
except TypeError:
# If we have a type error here it means that the key
# can't be hashed. There can be structs that have
# keys python doesn't like.
#
# For now just bail out.
return self.type_spec.construct_instance(elements)
i += 1
return self.type_spec.construct_instance(elements)
def eval_seed(self, seed):
res = {}
for key, val in six.iteritems(seed):
key = self._key_randomizer.eval_seed(key)
val = self._val_randomizer.eval_seed(val)
res[key] = val
return self.type_spec.construct_instance(res)
class StructRandomizer(BaseRandomizer):
ttype = Thrift.TType.STRUCT
default_constraints = dict(BaseRandomizer.default_constraints)
default_constraints.update(
{"p_include": 0.99, "max_recursion_depth": 3, "per_field": {}}
)
@property
def universe_size(self):
return INFINITY
def _init_subrandomizers(self):
subtypes = self.type_spec.get_subtypes()
requiredness = self.type_spec.get_subtype_requiredness()
field_rules = {}
for name, field_spec in six.iteritems(subtypes):
field_required = requiredness[name]
field_constraints = self.constraints.get(name, {})
field_randomizer = self.state.get_randomizer_for_spec(
field_spec, field_constraints
)
field_rules[name] = {
"required": field_required,
"randomizer": field_randomizer,
}
field_rules[name].update(self.constraints["per_field"].get(name, {}))
self._field_rules = field_rules
self._is_union = self.type_spec.is_union
self._field_names = list(self._field_rules)
def _increase_recursion_depth(self):
"""Increase the depth in the recursion trace for this struct type.
Returns:
(is_top_level, max_depth_reached)
If is_top_level is True, when decrease_recursion_depth is called
the entry in the trace dictionary will be removed to indicate
that this struct type is no longer being recursively generated.
If max_depth_reached is True, the call to increase_recursion_depth
has "failed" indicating that this randomizer is trying to generate
a value that is too deep in the recursive tree and should return None.
In this case, the recursion trace dictionary is not modified.
"""
trace = self.state.recursion_trace
name = self.type_name
if name in trace:
# There is another struct of this type higher up in
# the generation tree
is_top_level = False
else:
is_top_level = True
trace[name] = self.constraints["max_recursion_depth"]
depth = trace[name]
if depth == 0:
# Reached maximum depth
if is_top_level:
del trace[name]
max_depth_reached = True
else:
depth -= 1
trace[name] = depth
max_depth_reached = False
return (is_top_level, max_depth_reached)
def _decrease_recursion_depth(self, is_top_level):
"""Decrease the depth in the recursion trace for this struct type.
If is_top_level is True, the entry in the recursion trace is deleted.
Otherwise, the entry is incremented.
"""
trace = self.state.recursion_trace
name = self.type_name
if is_top_level:
del trace[name]
else:
trace[name] += 1
def _randomize(self):
"""Return randomized fields as a dict of {field_name: value}
If fields cannot be generated due to an unsatisfiable
constraint, return None.
"""
(is_top_level, max_depth_reached) = self._increase_recursion_depth()
if max_depth_reached:
return None
fields = {}
fields_to_randomize = self._field_names
p_include = self.constraints["p_include"]
if self._is_union:
if fields_to_randomize and random.random() < p_include:
fields_to_randomize = [random.choice(fields_to_randomize)]
p_include = 1.0
else:
fields_to_randomize = []
for field_name in fields_to_randomize:
rule = self._field_rules[field_name]
required = rule["required"]
field_p_include = rule.get("p_include", p_include)
if not required and not (random.random() < field_p_include):
continue
value = rule["randomizer"].generate()
if value is None:
# Randomizer was unable to generate a value
if required:
# Cannot generate the struct
fields = None
break
else:
# Ignore the field
continue
else:
fields[field_name] = value
self._decrease_recursion_depth(is_top_level)
if (fields is None) or (self._is_union and not fields):
return None
else:
if self._is_union:
for f in self._field_names:
fields.setdefault(f, None)
return self.type_spec.construct_instance(**fields)
def _fuzz(self, seed):
"""Fuzz a single field of the struct at random"""
fields = {}
field_rules = self._field_rules
seed = self.type_spec.value_to_dict(seed)
if self._is_union:
if not seed:
# The seed could be malformed.
# If that's the case just return none
return None
# The seed should be a single key/value pair
field_name, seed_val = six.next(six.iteritems(seed))
field_randomizer = field_rules[field_name]["randomizer"]
fuzzed_val = field_randomizer.generate(seed=seed_val)
fields[field_name] = fuzzed_val
for f in self._field_names:
fields.setdefault(f, None)
elif field_rules:
# Fuzz only one field and leave the rest with the seed value
fuzz_field_name = random.choice(list(field_rules))
fuzz_field_rule = field_rules[fuzz_field_name]
fuzz_field_randomizer = fuzz_field_rule["randomizer"]
fuzz_seed_val = seed.get(fuzz_field_name, None)
fuzzed_value = fuzz_field_randomizer.generate(seed=fuzz_seed_val)
if fuzzed_value is None:
if fuzz_field_rule["required"]:
# Cannot generate the struct
return None
else:
fields[fuzz_field_name] = fuzzed_value
for field_name, seed_val in six.iteritems(seed):
if field_name == fuzz_field_name or field_name not in field_rules:
continue
field_randomizer = field_rules[field_name]["randomizer"]
fields[field_name] = field_randomizer.eval_seed(seed_val)
return self.type_spec.construct_instance(**fields)
def eval_seed(self, seed):
fields = {}
seed = self.type_spec.value_to_dict(seed)
for key, val in six.iteritems(seed):
if key not in self._field_rules:
continue
field_randomizer = self._field_rules[key]["randomizer"]
val = field_randomizer.eval_seed(val)
fields[key] = val
if self._is_union:
for f in self._field_names:
fields.setdefault(f, None)
return self.type_spec.construct_instance(**fields)
_ttype_to_randomizer = {}
def _init_types() -> None:
# Find classes that subclass BaseRandomizer
global_names = globals().keys()
for name in global_names:
value = globals()[name]
if not isinstance(value, type):
continue
cls = value
if issubclass(cls, BaseRandomizer):
if cls.ttype is not None:
_ttype_to_randomizer[cls.ttype] = cls
_init_types()
def _get_randomizer_class(type_spec):
# Special case: i32 and enum have separate classes but the same ttype
if type_spec.is_enum():
return EnumRandomizer
return _ttype_to_randomizer[type_spec.ttype]
class RandomizerState(object):
"""A wrapper around randomizer_map and recursion_trace
All randomizers are initialized with a state. If a state is not explicitly
specified, a clean one will be created. When randomizers create sub-
randomizers, they should pass on their state object in order to share
memoization and recursion trace information.
--
`randomizers` maps ttype to a list of already-constructed randomizer
instances. This allows for memoization: calls to get_randomizer with
identical arguments and state will always return the same randomizer
instance.
--
`recursion_trace` maps a struct name to an int indicating the current
remaining depth of recursion for the struct with that name.
Struct randomizers use this information to bound the recursion depth
of generated structs.
If a struct name has no entry in the recursion trace, that struct
is not currently being generated at any depth in the generation tree.
When the top level randomizer for a struct type is entered, that
randomizer's constraints are used to determine the maximum recursion
depth and the maximum depth is inserted into the trace dictionary.
At each level of recursion, the entry in the trace dictionary is
decremented. When it reaches zero, the maximum depth has been reached
and no more structs of that type are generated.
--
type_constraint_stacks maps type_name strings to
constraint dictionaries that should be applied to all randomizers
with type type_name. The items at the top of the stack
(higher indices) were pushed most recently and thus override the
constraints lower in the stack.
Randomizer instances are responsible for pushing to and popping from
their respective constraint stacks according to the type rules in
their constraint dictionaries.
"""
def __init__(self, default_constraints=None):
self.randomizers = collections.defaultdict(list)
self.recursion_trace = {}
self.type_constraint_stacks = collections.defaultdict(list)
self.default_constraints = default_constraints or {}
def get_randomizer(self, ttype, spec_args, constraints):
type_spec = get_spec(ttype, spec_args)
return self.get_randomizer_for_spec(type_spec, constraints)
def get_randomizer_for_spec(self, type_spec, constraints):
"""Get a randomizer object.
Return an already-preprocessed randomizer if available and create a new
one and preprocess it otherwise"""
randomizer_class = _get_randomizer_class(type_spec)
randomizer = randomizer_class(type_spec, self, constraints)
# Check if this randomizer is already in self.randomizers
randomizers = self.randomizers[randomizer.__class__.ttype]
for other in randomizers:
if other == randomizer:
return other
# No match. Register and preprocess this randomizer
randomizers.append(randomizer)
randomizer.preprocess()
return randomizer
def push_type_constraints(self, constraints):
"""Push type constraints onto the type constraint stack
Return a list of stacks that need to be popped from
Return `pushed`, a variable that should be passed back to
pop_type_constraints when leaving the type constraints' scope.
"""
pushed = []
for key, val in six.iteritems(constraints):
if key.startswith("|"):
# This is a type constraint
type_name = key[1:]
stack = self.type_constraint_stacks[type_name]
stack.append(val)
pushed.append(stack)
return pushed
def pop_type_constraints(self, pushed):
for stack in pushed:
stack.pop()
|
{
"content_hash": "80ea4f631ad0324e5f3ad1dce4c85e7d",
"timestamp": "",
"source": "github",
"line_count": 1020,
"max_line_length": 84,
"avg_line_length": 33.5735294117647,
"alnum_prop": 0.6156227186450577,
"repo_name": "facebook/fbthrift",
"id": "71cfddcf4fe4ae2cb06691fea291237e25e49ec7",
"size": "34860",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "thrift/lib/py/util/randomizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15608"
},
{
"name": "C++",
"bytes": "10658844"
},
{
"name": "CMake",
"bytes": "147347"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "Cython",
"bytes": "339005"
},
{
"name": "Emacs Lisp",
"bytes": "11229"
},
{
"name": "Go",
"bytes": "447092"
},
{
"name": "Hack",
"bytes": "313122"
},
{
"name": "Java",
"bytes": "1990062"
},
{
"name": "JavaScript",
"bytes": "38872"
},
{
"name": "Mustache",
"bytes": "1269560"
},
{
"name": "Python",
"bytes": "1623026"
},
{
"name": "Ruby",
"bytes": "6111"
},
{
"name": "Rust",
"bytes": "283392"
},
{
"name": "Shell",
"bytes": "6615"
},
{
"name": "Thrift",
"bytes": "1859041"
},
{
"name": "Vim Script",
"bytes": "2887"
}
],
"symlink_target": ""
}
|
"""
Herald HTTP transport implementation
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.3
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 3)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald HTTP
from . import ACCESS_ID, SERVICE_HTTP_RECEIVER, SERVICE_HTTP_TRANSPORT, \
CONTENT_TYPE_JSON
# HTTP requests
import requests.exceptions
# Herald Core
from herald.exceptions import InvalidPeerAccess
import herald
import herald.beans as beans
import herald.utils as utils
import herald.transports.http
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Property, BindField, Validate, Invalidate, Instantiate, RequiresBest
from pelix.utilities import to_str
import pelix.utilities
import pelix.threadpool
import pelix.misc.jabsorb as jabsorb
# Standard library
import json
import logging
import time
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory('herald-http-transport-factory')
@RequiresBest('_probe', herald.SERVICE_PROBE)
@Requires('_directory', herald.SERVICE_DIRECTORY)
@Requires('_local_recv', SERVICE_HTTP_RECEIVER)
@Provides((herald.SERVICE_TRANSPORT, SERVICE_HTTP_TRANSPORT))
@Property('_access_id', herald.PROP_ACCESS_ID, ACCESS_ID)
@Instantiate('herald-http-transport')
class HttpTransport(object):
"""
HTTP sender for Herald.
"""
def __init__(self):
"""
Sets up the transport
"""
# Herald Core directory
self._directory = None
# Debug probe
self._probe = None
# Properties
self._access_id = ACCESS_ID
# Local UID
self.__peer_uid = None
# Request send pool
self.__pool = pelix.threadpool.ThreadPool(5, logname="herald-http")
# Requests session
self.__session = requests.Session()
# Local access information
self.__access_port = None
self.__access_path = None
@BindField('_local_recv')
def _bind_local_receiver(self, _, service, svc_ref):
"""
The local receiver has been bound
"""
access = service.get_access_info()
self.__access_port = access[1]
self.__access_path = access[2]
@Validate
def _validate(self, _):
"""
Component validated
"""
self.__peer_uid = self._directory.local_uid
self.__session = requests.Session()
self.__session.stream = False
self.__pool.start()
@Invalidate
def _invalidate(self, _):
"""
Component invalidated
"""
self.__peer_uid = None
self.__session.close()
self.__pool.stop()
def __get_access(self, peer, extra=None):
"""
Computes the URL to access the Herald servlet on the given peer
:param peer: A Peer bean
:param extra: Extra information, given for replies
:return: A URL, or None
"""
host = None
port = 0
path = None
if extra is not None:
# Try to use extra information
host = extra.get('host')
port = extra.get('port')
path = extra.get('path')
if not host:
try:
# Use the directory
host, port, path = peer.get_access(ACCESS_ID).access
except (KeyError, AttributeError):
# Invalid access: stop here
return None
# Normalize arguments
if ':' in host:
# IPv6 address
host = '[{0}]'.format(host)
if port == 0:
port = 80
if path[0] == '/':
path = path[1:]
return 'http://{0}:{1}/{2}'.format(host, port, path)
def __prepare_message(self, message, parent_uid=None, target_peer=None, target_group=None):
"""
Prepares a HTTP request.
:param message: The Message bean to send
:param parent_uid: UID of the message this one replies to (optional)
:return: A (headers, content) tuple
"""
# Prepare headers
"""
headers = {'content-type': CONTENT_TYPE_JSON,
'herald-subject': message.subject,
'herald-uid': message.uid,
'herald-sender-uid': self.__peer_uid,
'herald-timestamp': int(time.time() * 1000),
'herald-port': self.__access_port,
'herald-path': self.__access_path}
"""
headers = {'content-type': CONTENT_TYPE_JSON}
message.add_header(herald.MESSAGE_HEADER_SENDER_UID, self.__peer_uid)
message.add_header(herald.transports.http.MESSAGE_HEADER_PORT, self.__access_port)
message.add_header(herald.transports.http.MESSAGE_HEADER_PATH, self.__access_path)
if parent_uid:
#headers['herald-reply-to'] = parent_uid
message.add_header(herald.MESSAGE_HEADER_REPLIES_TO, parent_uid)
# update target peer header
if target_peer is not None:
message.add_header(herald.MESSAGE_HEADER_TARGET_PEER, target_peer.uid)
# update target peer header
if target_group is not None:
message.add_header(herald.MESSAGE_HEADER_TARGET_GROUP, target_group)
if message.subject in herald.SUBJECTS_RAW:
content = utils.to_str(message.content)
else:
# Convert content to JSON
# print("debug: message: {}".format(message))
# print(type(message))
# print(message.content)
content = utils.to_json(message)
return headers, content
def __post_message(self, url, content, headers):
"""
Method called directly or in a thread to send a POST HTTP request
:param url: Target URL
:param content: Request body
:param headers: Request headers
:return: A response bean
"""
try:
return self.__session.post(url, content, headers=headers)
except requests.exceptions.ConnectionError as ex:
# Connection aborted during request
_logger.error("Connection error while posting a message: %s", ex)
return None
def fire(self, peer, message, extra=None):
"""
Fires a message to a peer
:param peer: A Peer bean
:param message: Message bean to send
:param extra: Extra information used in case of a reply
:raise InvalidPeerAccess: No information found to access the peer
:raise Exception: Error sending the request or on the server side
"""
# Get the request message UID, if any
parent_uid = None
if extra is not None:
parent_uid = extra.get('parent_uid')
# print('='*20)
# print(message)
# print('-'*20)
# print(message.content)
# Try to read extra information
url = self.__get_access(peer, extra)
if not url:
# No HTTP access description
raise InvalidPeerAccess(beans.Target(peer=peer),
"No '{0}' access found"
.format(self._access_id))
# Send the HTTP request (blocking) and raise an error if necessary
headers, content = self.__prepare_message(message, parent_uid, target_peer=peer)
# Log before sending
self._probe.store(
herald.PROBE_CHANNEL_MSG_SEND,
{"uid": message.uid, "timestamp": time.time(),
"transport": ACCESS_ID, "subject": message.subject,
"target": peer.uid if peer else "<unknown>",
"transportTarget": url, "repliesTo": parent_uid or ""})
self._probe.store(
herald.PROBE_CHANNEL_MSG_CONTENT,
{"uid": message.uid, "content": content}
)
response = self.__post_message(url, content, headers)
if response is None:
# The error has been logged in post_message
raise IOError("Error sending message {0}".format(message.uid))
else:
# Raise an error if the status isn't 2XX
response.raise_for_status()
def fire_group(self, group, peers, message):
"""
Fires a message to a group of peers
:param group: Name of a group
:param peers: Peers to communicate with
:param message: Message to send
:return: The list of reached peers
"""
# Prepare the message
headers, content = self.__prepare_message(message, target_group=group)
# The list of peers having been reached
accessed_peers = set()
countdown = pelix.utilities.CountdownEvent(len(peers))
def peer_result(_, exception, target_peer):
"""
Called back once the request has been posted
"""
if exception is None:
# No exception => success
accessed_peers.add(target_peer)
# In any case: update the count down
countdown.step()
# Store the message once
self._probe.store(
herald.PROBE_CHANNEL_MSG_CONTENT,
{"uid": message.uid, "content": content}
)
# Send a request to each peers
for peer in peers:
# Try to read extra information
url = self.__get_access(peer)
if url:
# Log before sending
self._probe.store(
herald.PROBE_CHANNEL_MSG_SEND,
{"uid": message.uid, "timestamp": time.time(),
"transport": ACCESS_ID, "subject": message.subject,
"target": peer.uid, "transportTarget": url,
"repliesTo": ""})
# Send the HTTP requests (from the thread pool)
future = self.__pool.enqueue(self.__post_message,
url, content, headers)
future.set_callback(peer_result, peer)
else:
# No HTTP access description
_logger.debug("No '%s' access found for %s", self._access_id,
peer)
# Wait for the requests to be sent (no more than 30s)
if not countdown.wait(10):
_logger.warning("Not all peers have been reached after 10s...")
return set(accessed_peers)
|
{
"content_hash": "f2bc32133729e18d256f4e2e90d7ff81",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 95,
"avg_line_length": 33.19825072886297,
"alnum_prop": 0.5676648810046544,
"repo_name": "librallu/cohorte-herald",
"id": "c2c7c6b323b25552a0fdde6fa673c8665ce4766e",
"size": "11437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/herald/transports/http/transport.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7273"
},
{
"name": "Java",
"bytes": "363366"
},
{
"name": "Makefile",
"bytes": "7458"
},
{
"name": "Python",
"bytes": "552917"
},
{
"name": "Shell",
"bytes": "800"
},
{
"name": "TeX",
"bytes": "8"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.db.models import signals
from composition import CompositionField
D = dict
class CommentCountField(CompositionField):
def __init__(self, verbose_name=None, editable=False):
def _do(host, comment, signal):
from turbion.bits.blogs.models import Comment
return host.comments.filter(Comment.published.get_lookup()).count()
super(CommentCountField, self).__init__(
native=models.PositiveIntegerField(
default=0, editable=editable, verbose_name=verbose_name
),
trigger=[
D(
on=(signals.post_save, signals.post_delete),
do=_do
)
],
commons=D(
sender_model='turbion.Comment',
field_holder_getter=lambda comment: comment.post,
),
commit=True,
update_method=D(
do=0,
initial=0,
queryset=lambda host: host.comments.count(),
name="sync_comment_count"
)
)
class PostCountField(CompositionField):
def __init__(self, verbose_name=None, editable=False):
def _do(host, comment, signal):
from turbion.bits.blogs.models import Post
return host.posts.filter(Post.published.get_lookup()).count()
super(PostCountField, self).__init__(
native=models.PositiveIntegerField(
default=0, editable=editable, verbose_name=verbose_name
),
trigger=[
D(
on=(signals.post_save, signals.post_delete),
do=_do
)
],
commons=D(
sender_model='turbion.Post',
field_holder_getter=lambda post: post.tags.all(),
),
commit=True,
update_method=D(
do=0,
initial=0,
queryset=lambda host: host.posts.count(),
name="sync_post_count"
)
)
|
{
"content_hash": "3808c7af76779bfb0de45706629fb117",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 32.18181818181818,
"alnum_prop": 0.512241054613936,
"repo_name": "strogo/turbion",
"id": "9f6fe81a504d8599d2b3f0ecd0b505ebf24ced3c",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbion/bits/blogs/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os #For creating folder
import urllib #For accessing the web
import re #For regex
from urllib.request import urlopen, URLopener
import threading #For downloading multiple files at once
import queue #To create a queue for the threads to pull data from
import time #For timing operations
#Returns list of lists that contain picture URL endings (eg "ufufuf.jpg") and the extension of the picture (eg "jpg")
#Takes in the url of the album or gallery, and the name that the user wants the directory to be named. Defaults to album key if not specified
def create_pic_list(album_url, dir_name=False):
# Check the URL is actually imgur:
'''
match = re.match("(https?)\:\/\/(www\.)?(?:m\.)?imgur\.com/a/([a-zA-Z0-9]+)(#[0-9]+)?", album_url)
if not match:
#raise ImgurAlbumException("URL must be a valid Imgur Album")
print("URL was not valid imgur URL")
protocol = match.group(1)
album_key = match.group(3)
'''
#Get unique album key
if "/gallery/" in album_url:
album_key = album_url.replace("http://imgur.com/gallery/", "")
elif "/a/" in album_url:
album_key = album_url.replace("http://imgur.com/a/", "")
else:
print("Not valid imgur album URL")
return False
#Change current directory to new folder named after
current_dir = os.getcwd()
#If no directory name specified, use album_key
if not dir_name:
dl_path = current_dir + "\\" + album_key
else:
dl_path = current_dir + "\\" + dir_name
##This should allow the download location to be changed so that the program can be run off locked flash drive
#Test to see if directory exists for program already, if not, create one
if not os.path.exists(dl_path):
os.makedirs(dl_path)
#Change working directory to new one
os.chdir(dl_path)
# Read the no-script version of the page for all the images:
no_script_url = "http://imgur.com/a/" + album_key + "/noscript"
#Get html code from album
try:
response = urlopen(url=no_script_url)
response_code = response.getcode()
except Exception as e:
response = False
#response_code = e.code
print("Error " + str(e))
if not response: # or response.getcode() != 200:
#raise ImgurAlbumException("Error reading Imgur: Error Code %d" % response_code)
print("Error reading Imgur")
return False
# Read in the images using findall regex
html = response.read().decode('utf-8')
#Images now holds a list of all urls of each image
regexImages = re.findall('<img src="(\/\/i\.imgur\.com\/([a-zA-Z0-9]+\.(jpg|jpeg|png|gif)))(\?[0-9]+)?"', html)
#Remove extra (1st and 4th) columns from finding algorithm (orig. format is like: ('//i.imgur.com/ufufufh.jpg', ufufufh.jpg', 'jpg', '')
#Creates a list of lists that has only the unique picture key from URL, and then the extension of the picture
#Also replace the h.jpg at the end of the picture key (which only gets low res pics) with f.jpg (Which gets high res pics)
images = []
for row in regexImages:
images.append([row[1].replace("h.", "."), row[2]])
return images
#Downloads picture into current directory, naming it the "name" passed in
#Takes the ending key of the picture to be downloaded (eg s1K6Ny.jpg) as well as the name that the file should be called
def download_pic(pic_key, name):
#Format of url: http://i.imgur.com/KTqYYKVh.jpg
#Create full URL for download of picture
url = "http://i.imgur.com/" + pic_key
#Check if there is a picture with this name already
#Either rename or skip
if os.path.isfile(name):
return True
#return False
'''
#Add "_1" to the end of the picture name if name is taken
name = "1_" + name
'''
#Create object to open url
picture = URLopener()
#Try to download picture and save as name
try:
picture.retrieve(url, name)
except: #Error in downloading picture
return False
#Return True if process completes, meaning that picture downloaded
return True
# The worker thread pulls an item from the queue and downloads it
def worker():
while True:
item = pics_queue.get()
#Create name for picture using position in album and picture name
pic_name = str(item[1]) + "_" + item[0]
#If the download fails, add to queue of failed downloads
if not download_pic(item[0], pic_name):
failed_dl.put(item)
#Use lock to serialize output
with lock:
print("FAILED:", pic_name, "in", threading.current_thread().name)
#If download completes successfully, print that it has
else:
#Use lock to serialize output
with lock:
print("Downloaded", pic_name, "in", threading.current_thread().name)
pics_queue.task_done()
if __name__ == '__main__':
#Test http://imgur.com/a/poltD
url_in = input("Input url: ")
if input("Name the folder different than the album key? (y or n) ") == "y":
dir_name_in = input("What name should the folder have? ")
pics = create_pic_list(url_in, dir_name_in)
else:
pics = create_pic_list(url_in)
#If the list could be created:
if pics:
#Print stats about info
print(len(pics), "images in gallery")
#Get number of each extension type
extension_count = {}
for i in pics:
#If key is already in dict
if i[1] in extension_count:
#Increment count of extension
extension_count[i[1]] = extension_count[i[1]] + 1
#If first occurrence of extension, create entry
else:
extension_count[i[1]] = 1
#Print dict of extensions by iterating through
for key in extension_count:
print(extension_count[key], key, "images")
#Create queue of data so that threads can all access data
#Each queue entry will contain the unique image key and the number in the album that the picture is
pics_queue = queue.Queue()
for i,image in enumerate(pics):
pics_queue.put([image[0], i+1])
#Create empty queue to store failed download keys
failed_dl = queue.Queue()
#Lock to serialize console output
lock = threading.Lock()
#Number of threads to start to process data
num_of_threads = 8
#num_of_threads = int(input("Input number of threads: "))
print("Beginning download of album with", num_of_threads, "threads")
#Start timer (using perf_counter for precision
start = time.perf_counter()
#Create number of threads specified
for i in range(num_of_threads):
t = threading.Thread(target=worker)
t.daemon = True # thread dies when main thread (only non-daemon thread) exits.
t.start()
#Wait until all tasks have finished, lock until done
pics_queue.join()
print("Time:", round(time.perf_counter() - start, 3), "seconds")
print("For", len(pics), "pictures using", num_of_threads, "threads")
print(failed_dl.qsize(), "failed downloads")
#Retry if downloads failed
if failed_dl.qsize() > 0:
#Run until break
while True:
retry = input("Try downloading " + str(failed_dl.qsize()) + " failed downloads again?(y for yes) ")
#If user wishes to stop, break
if retry != "y":
break
#If there are not any failed items left, force break
if failed_dl.qsize() == 0:
break
#Reload the download queue from the failed_dl queue
while failed_dl.qsize() > 0:
pics_queue.put(failed_dl.get())
#Start downloading process again
#Start timer (using perf_counter for precision
start = time.perf_counter()
#Create number of threads specified
for i in range(num_of_threads):
t = threading.Thread(target=worker)
t.daemon = True # thread dies when main thread (only non-daemon thread) exits.
t.start()
#Wait until all tasks have finished, lock until done
pics_queue.join()
print("Time:", round(time.perf_counter() - start, 3), "seconds")
print(failed_dl.qsize(), "failed downloads")
else:
print("Failed to create list of images in gallery from html")
|
{
"content_hash": "5b63dda03c9a2d788f6393f2e1f64bf9",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 141,
"avg_line_length": 36.43333333333333,
"alnum_prop": 0.6906286759900666,
"repo_name": "mitchtz/Imgur_Album_Downloader",
"id": "705fd88d4a09bfa0ea6d5f17ff607bf92ba9621b",
"size": "7651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Imgur_Album_Downloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7651"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import datetime
import mock
from django.core.cache import cache
from ..extensions.rq import livewatch_update_rq_task
from ..tasks import livewatch_update_celery_task
class TestRqTask:
key = 'livewatch_rq'
def teardown(self):
cache.delete(self.key)
def test_livewatch_update_rq_task(self):
assert cache.get(self.key) is None
livewatch_update_rq_task(self.key)
assert cache.get(self.key) is not None
@mock.patch('livewatch.extensions.cache.cache.set')
def test_livewatch_update_rq_task_cache_not_set(self, cache_mock):
assert cache.get(self.key) is None
livewatch_update_rq_task(self.key)
assert cache.get(self.key) is None
class TestCeleryTask:
key = 'livewatch_task'
def teardown(self):
cache.delete(self.key)
def test_livewatch_update_celery_task(self):
assert cache.get(self.key) is None
livewatch_update_celery_task(self.key)
assert isinstance(cache.get(self.key), datetime) is True
|
{
"content_hash": "6ce11961a33c8cae3666efb80ea9d791",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 27.46153846153846,
"alnum_prop": 0.6918767507002801,
"repo_name": "moccu/django-livewatch",
"id": "99ab47f33a3977443f090a146cb0a34a2d69bc55",
"size": "1071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "livewatch/tests/test_tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1056"
},
{
"name": "Python",
"bytes": "22278"
}
],
"symlink_target": ""
}
|
from setuptools import setup
import os
rootdir = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r", encoding="utf8") as file:
long_description = file.read()
# Write a versions.py file for class attribute
VERSION = "0.0.6"
def write_version_py(filename=None):
doc = ("\"\"\"\n" +
"This is a VERSION file and should NOT be manually altered" +
"\n\"\"\"")
doc += "\nversion = \"%s\"" % VERSION
if not filename:
filename = os.path.join(
os.path.dirname(__file__), "rvlib", "version.py")
f = open(filename, "w")
try:
f.write(doc)
finally:
f.close()
write_version_py()
# Setup
setup(name="rvlib",
packages=["rvlib"],
setup_requires=["cffi>=1.0.0","PyYAML"],
scripts=["./build_interface.py"],
cffi_modules=["build_lib.py:ffi"],
install_requires=["cffi>=1.0.0", "numba>=0.49", "numpy", "PyYAML"],
include_package_data=True,
version=VERSION,
description="Probability distributions mimicking Distrbutions.jl",
author="Daniel Csaba, Spencer Lyon",
author_email="daniel.csaba@nyu.edu, spencer.lyon@stern.nyu.edu",
url="https://github.com/QuantEcon/rvlib", # URL to the github repo
keywords=["statistics", "distributions"],
long_description=long_description,
long_description_content_type='text/markdown')
|
{
"content_hash": "38399772eed1b7811dfea4046d0b7c49",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 30.26086956521739,
"alnum_prop": 0.6149425287356322,
"repo_name": "QuantEcon/rvlib",
"id": "46064dcfc78b202d6b5f291edc0b07ac54bd468d",
"size": "1392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "884"
},
{
"name": "C",
"bytes": "559952"
},
{
"name": "C++",
"bytes": "11011"
},
{
"name": "Jupyter Notebook",
"bytes": "25761"
},
{
"name": "Makefile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "134774"
}
],
"symlink_target": ""
}
|
import types
from botocore import session
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class BotocoreClientBase(object):
def __init__(self, region, access, secret):
self.region = region
self.connection_data = {
'config_file': (None, 'AWS_CONFIG_FILE', None),
'region': ('region', 'BOTO_DEFAULT_REGION', self.region),
}
if not access or not secret:
raise Exception('Auth params did not provided')
self.session = session.get_session(self.connection_data)
self.session.set_credentials(access, secret)
def __getattr__(self, name):
"""Automatically creates methods for the allowed methods set."""
op = self.service.get_operation(name)
if not op:
raise AttributeError(name)
def func(self, *args, **kwargs):
return op.call(self.endpoint, *args, **kwargs)
func.__name__ = name
setattr(self, name, types.MethodType(func, self, self.__class__))
return getattr(self, name)
class APIClientEC2(BotocoreClientBase):
url = None
def __init__(self, url, region, access, secret, *args, **kwargs):
super(APIClientEC2, self).__init__(region, access, secret,
*args, **kwargs)
self.url = url
self.service = self.session.get_service('ec2')
self.endpoint = self.service.get_endpoint(
region_name=self.region,
endpoint_url=url)
def get_url(self):
return self.url
|
{
"content_hash": "f5fa59e5aac2be42a0511054c4210f39",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 73,
"avg_line_length": 30.173076923076923,
"alnum_prop": 0.5933715742511153,
"repo_name": "MayankGo/ec2-api",
"id": "49baa2963e2bdd2c0b8c7fc00d65fb89940bd8f7",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/kilo",
"path": "ec2api/tests/functional/botocoreclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1460572"
},
{
"name": "Shell",
"bytes": "28903"
}
],
"symlink_target": ""
}
|
from .models import xray_backends
from ..core.models import base_decorator
from .mock_client import MockXrayClient, XRaySegment # noqa
xray_backend = xray_backends["us-east-1"]
mock_xray = base_decorator(xray_backends)
mock_xray_client = MockXrayClient()
|
{
"content_hash": "3b215288abb1b0268e922ccd56ad491d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 60,
"avg_line_length": 36.714285714285715,
"alnum_prop": 0.7821011673151751,
"repo_name": "spulec/moto",
"id": "e47d7642b617d56e11797a1ed5df66898a06a155",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/xray/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
}
|
import os
import shutil, errno
def read_file(file_path):
f = open(file_path, 'r')
contents = f.read()
f.close()
return contents
#unused
def write_file(file_path, contents):
f = open(file_path, 'w+')
f.write(contents)
f.close()
def copydir(src, dest):
try:
shutil.copytree(src, dest)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dest)
else:
raise
def cleanup_dir(dir_path):
for the_file in os.listdir(dir_path):
path = os.path.join(dir_path, the_file)
try:
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
except Exception as e:
raise e
def is_dir_empty(dir_path):
return len(os.listdir(dir_path)) == 0
#unused
def rmdir(dir_path):
shutil.rmtree(dir_path)
def get_dir_hierarchy(dir_path):
dir_path_parts = dir_path.split(os.path.sep)
dir_tree = []
for i in range(1, len(dir_path_parts)):
dir_tree.append(os.path.sep.join(dir_path_parts[0:i+1]))
dir_tree = [ os.path.sep ] + dir_tree
return dir_tree
def find_files(dir_path, extension):
dir_path_len = len(dir_path + '/')
paths = []
for root, dirs, files in os.walk(dir_path):
for fname in files:
if fname.endswith('.' + extension):
paths.append(os.path.join(root, fname))
paths = [ path[dir_path_len:] for path in paths ]
return paths
|
{
"content_hash": "5dcb0b1c12583d48fb914875b892d42d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 64,
"avg_line_length": 25.716666666666665,
"alnum_prop": 0.5793907971484121,
"repo_name": "mcmlxxxiii/chooh",
"id": "7c3cb7330c5fcd9cc15b1fad927e1a047377ef58",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chooh/util/fs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20806"
}
],
"symlink_target": ""
}
|
"""Exceptions modules for all plugins."""
class NagiosCritical(Exception):
"""
Raise to fire a CRITICAL event to Nagios and stop plugin execution.
:param msg: Output message in Nagios
:type msg: string
"""
def __init__(self, msg):
print "CRITICAL - %s" % msg
raise SystemExit(2)
class NagiosWarning(Exception):
"""
Raise to fire a WARNING event to Nagios and stop plugin execution.
:param msg: Output message in Nagios
:type msg: string
"""
def __init__(self, msg):
print "WARNING - %s" % msg
raise SystemExit(1)
class NagiosUnknown(Exception):
"""
Raise to fire a UNKNOWN event to Nagios and stop plugin execution.
:param msg: Output message in Nagios
:type msg: string
"""
def __init__(self, msg):
print "UNKNOWN - %s" % msg
raise SystemExit(3)
class NagiosOk(Exception):
"""
Raise to fire a OK event to Nagios and stop plugin execution.
:param msg: Output message in Nagios
:type msg: string
"""
def __init__(self, msg):
print "OK - %s" % msg
raise SystemExit(0)
class PluginError(StandardError):
"""
Exception when a plugin error occur.
:param output: Message to show in Nagios status information output.
:type output: str
:param longoutput: Message to show in long output (extra infos).
:type longoutput: str
"""
def __init__(self, output, longoutput, *args, **kwargs):
super(PluginError, self).__init__(*args, **kwargs)
self.message = '%s\n%s' % (output, longoutput)
def __str__(self):
return self.message
|
{
"content_hash": "374c5f5c4ae9e1d9261a702ef5e33e79",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 71,
"avg_line_length": 23.281690140845072,
"alnum_prop": 0.6146400483968542,
"repo_name": "bigbrozer/monitoring.nagios",
"id": "b682712d1cf764c76405b26928ed44d78d4b497a",
"size": "2797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring/nagios/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "133"
},
{
"name": "JavaScript",
"bytes": "1675"
},
{
"name": "Python",
"bytes": "119298"
}
],
"symlink_target": ""
}
|
import mechanize
import random
import time
import httplib
import urllib
import json
# nested dictionnaries in URL are not implemented in urllib
def recursive_urlencode(data):
def r_urlencode(data, parent=None, pairs=None):
if pairs is None:
pairs = {}
if parent is None:
parents = []
else:
parents = parent
for key, value in data.items():
if hasattr(value, 'values'):
parents.append(key)
r_urlencode(value, parents, pairs)
parents.pop()
else:
pairs[renderKey(parents + [key])] = renderVal(value)
return pairs
return urllib.urlencode(r_urlencode(data))
def renderKey(parents):
depth, outStr = 0, ''
for x in parents:
str = "[%s]" if depth > 0 else "%s"
outStr += str % renderVal(x)
depth += 1
return outStr
def renderVal(val):
return urllib.quote(unicode(val))
class Transaction(object):
def __init__(self):
pass
def loginGroup(self, password):
options = {
'password': password,
'action': 'checkPassword',
'getTeams': None
}
post_body=recursive_urlencode(options)
start_timer = time.time()
req = mechanize.Request(url='http://dyna-castor.eroux.fr/data.php', data=post_body)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
resp = mechanize.urlopen(req)
resjson = resp.read()
try:
self.custom_timers['loginGroup'] = time.time() - start_timer
except AttributeError:
pass
res = json.loads(resjson)
assert (res[u'success'] == True), 'success = false in loginGroup!'
return int(res[u'groupID'])
def createTeam(self, groupID, contestants):
options = {
'action': 'createTeam',
'groupID': groupID,
'contestants': contestants
}
post_body=recursive_urlencode(options)
start_timer = time.time()
req = mechanize.Request(url='http://dyna-castor.eroux.fr/data.php', data=post_body)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
resp = mechanize.urlopen(req)
resjson = resp.read()
try:
self.custom_timers['createTeam'] = time.time() - start_timer
except AttributeError:
pass
res = json.loads(resjson)
assert (res['success'] == True), 'success = false in createTeam!'
def loadContestData(self):
post_body=recursive_urlencode({'action': 'loadContestData'})
start_timer = time.time()
req = mechanize.Request(url='http://dyna-castor.eroux.fr/data.php', data=post_body)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
resp = mechanize.urlopen(req)
resjson = resp.read()
res = json.loads(resjson)
assert (res['success'] == True), 'success = false in loadContestData!'
try:
self.custom_timers['loadContestData'] = time.time() - start_timer
except AttributeError:
pass
def run(self):
groupID = self.loginGroup('bnx9bzvn')
contestants = {0:{'lastName': 'testLastName', 'firstName': 'testFirstName', 'genre': 'testGenre'}}
self.createTeam(35, contestants)
self.loadContestData()
if __name__ == '__main__':
trans = Transaction()
trans.run()
#print trans.custom_timers
|
{
"content_hash": "56725feac7416c18d6bc0b13f8cd5ef5",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 106,
"avg_line_length": 31.90990990990991,
"alnum_prop": 0.5810276679841897,
"repo_name": "be-oi/beoi-contest-platform",
"id": "5859b6f595706478d15524adc5cbd238dce36ff9",
"size": "3817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/access_contest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12479"
},
{
"name": "Dockerfile",
"bytes": "1187"
},
{
"name": "HTML",
"bytes": "27132"
},
{
"name": "Hack",
"bytes": "6361"
},
{
"name": "JavaScript",
"bytes": "207821"
},
{
"name": "PHP",
"bytes": "573292"
},
{
"name": "Python",
"bytes": "18764"
},
{
"name": "Ruby",
"bytes": "7117"
},
{
"name": "Shell",
"bytes": "4495"
}
],
"symlink_target": ""
}
|
"""This example gets all alerts for all clients of an MCC account. This example
assumes the email and password belong to an MCC.
Tags: AlertService.get
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
PAGE_SIZE = 500
def main(client):
# Initialize appropriate service.
alert_service = client.GetAlertService(
'https://adwords-sandbox.google.com', 'v201206')
# Construct selector and get all alerts.
offset = 0
selector = {
'query': {
'clientSpec': 'ALL',
'filterSpec': 'ALL',
'types': ['ACCOUNT_BUDGET_BURN_RATE', 'ACCOUNT_BUDGET_ENDING',
'ACCOUNT_ON_TARGET', 'CAMPAIGN_ENDED', 'CAMPAIGN_ENDING',
'CREDIT_CARD_EXPIRING', 'DECLINED_PAYMENT',
'KEYWORD_BELOW_MIN_CPC', 'MANAGER_LINK_PENDING',
'MISSING_BANK_REFERENCE_NUMBER', 'PAYMENT_NOT_ENTERED',
'TV_ACCOUNT_BUDGET_ENDING', 'TV_ACCOUNT_ON_TARGET',
'TV_ZERO_DAILY_SPENDING_LIMIT', 'USER_INVITE_ACCEPTED',
'USER_INVITE_PENDING', 'ZERO_DAILY_SPENDING_LIMIT'],
'severities': ['GREEN', 'YELLOW', 'RED'],
'triggerTimeSpec': 'ALL_TIME'
},
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
while more_pages:
page = alert_service.Get(selector)[0]
# Display results.
if 'entries' in page:
for alert in page['entries']:
print ('Alert of type \'%s\' and severity \'%s\' for account \'%s\' was'
' found.' % (alert['alertType'], alert['alertSeverity'],
alert['clientCustomerId']))
else:
print 'No alerts were found.'
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
client.use_mcc = True
main(client)
|
{
"content_hash": "4e8b3cb558953f436f550b21c16db026",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 33.57746478873239,
"alnum_prop": 0.5750838926174496,
"repo_name": "nearlyfreeapps/python-googleadwords",
"id": "279cfc231490a9c3132d91efcf30f63fe70aaa11",
"size": "3002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/adwords/v201206/account_management/get_account_alerts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "1394721"
}
],
"symlink_target": ""
}
|
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
# Must mention ALLOWED_HOSTS in production!
ALLOWED_HOSTS = ['172.16.0.66','cellexpress.cgm.ntu.edu.tw']
# Cache the templates in memory for speed-up
loaders = [
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0]['OPTIONS'].update({"debug": False})
TEMPLATES[0]['APP_DIRS'] = False
# Email settings
EMAIL_BACKEND = env.str('EMAIL_BACKEND')
EMAIL_HOST = env.str('EMAIL_HOST')
EMAIL_HOST_USER = env.str('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env.str('EMAIL_HOST_PASSWORD')
EMAIL_PORT = env.int('EMAIL_PORT')
EMAIL_USE_SSL = env.bool('EMAIL_USE_SSL')
EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS')
DEFAULT_FROM_EMAIL = SERVER_EMAIL = '{name} <{addr}>'.format(
name='BioCloud Dev',
addr='biocloud@liang2.io',
)
# Securiy related settings
# SECURE_HSTS_SECONDS = 2592000
# SECURE_BROWSER_XSS_FILTER = True
# SECURE_CONTENT_TYPE_NOSNIFF=True
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# X_FRAME_OPTIONS = 'DENY'
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(BASE_DIR, 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': (
'[%(asctime)s] %(levelname)s '
'[%(pathname)s:%(lineno)s] %(message)s'
),
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'django_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'django.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django_log_file', ],
'propagate': True,
'level': 'DEBUG',
},
}
}
for app in LOCAL_APPS:
app_handler = '%s_log_file' % app
app_log_filepath = '%s.log' % app
LOGGING['loggers'][app] = {
'handlers': [app_handler, 'console', ],
'level': 'DEBUG',
}
LOGGING['handlers'][app_handler] = {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, app_log_filepath),
'formatter': 'verbose',
}
logging.config.dictConfig(LOGGING)
|
{
"content_hash": "691a8c5a8a7c3ead9a730341b90e1baf",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 63,
"avg_line_length": 26.10810810810811,
"alnum_prop": 0.5700483091787439,
"repo_name": "LeeYiFang/Carkinos",
"id": "29876bbb24ceecbd5f72ed884ecfeb44b4a032bc",
"size": "3010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Carkinos/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4881"
},
{
"name": "HTML",
"bytes": "185937"
},
{
"name": "JavaScript",
"bytes": "40494"
},
{
"name": "Jupyter Notebook",
"bytes": "492936"
},
{
"name": "Python",
"bytes": "164478"
},
{
"name": "R",
"bytes": "1386"
}
],
"symlink_target": ""
}
|
from scientifica import Scientifica
|
{
"content_hash": "9fff6a741716378060f0c88998ebc55c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.8888888888888888,
"repo_name": "mgraupe/acq4",
"id": "589e21367d1e3bf55f23d1e78d8eade6a0087601",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "acq4/drivers/Scientifica/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "247"
},
{
"name": "C",
"bytes": "757367"
},
{
"name": "C++",
"bytes": "1222891"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "MATLAB",
"bytes": "1752"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "6110588"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
import os
from django.core.handlers.wsgi import WSGIHandler
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = WSGIHandler()
|
{
"content_hash": "e41a3e9ef309ea140be6eef487365cd7",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 59,
"avg_line_length": 29.6,
"alnum_prop": 0.8108108108108109,
"repo_name": "antofik/Wartech",
"id": "409c16a9b7838ff801d4312e25df3cff671ebc22",
"size": "148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "WartechWeb/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2920"
},
{
"name": "CoffeeScript",
"bytes": "2141"
},
{
"name": "JavaScript",
"bytes": "217988"
},
{
"name": "PHP",
"bytes": "144"
},
{
"name": "Python",
"bytes": "58357"
},
{
"name": "Shell",
"bytes": "178"
}
],
"symlink_target": ""
}
|
"""Collect macro definitions from header files.
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import re
from typing import Dict, IO, Iterable, Iterator, List, Optional, Pattern, Set, Tuple, Union
class ReadFileLineException(Exception):
def __init__(self, filename: str, line_number: Union[int, str]) -> None:
message = 'in {} at {}'.format(filename, line_number)
super(ReadFileLineException, self).__init__(message)
self.filename = filename
self.line_number = line_number
class read_file_lines:
# Dear Pylint, conventionally, a context manager class name is lowercase.
# pylint: disable=invalid-name,too-few-public-methods
"""Context manager to read a text file line by line.
```
with read_file_lines(filename) as lines:
for line in lines:
process(line)
```
is equivalent to
```
with open(filename, 'r') as input_file:
for line in input_file:
process(line)
```
except that if process(line) raises an exception, then the read_file_lines
snippet annotates the exception with the file name and line number.
"""
def __init__(self, filename: str, binary: bool = False) -> None:
self.filename = filename
self.file = None #type: Optional[IO[str]]
self.line_number = 'entry' #type: Union[int, str]
self.generator = None #type: Optional[Iterable[Tuple[int, str]]]
self.binary = binary
def __enter__(self) -> 'read_file_lines':
self.file = open(self.filename, 'rb' if self.binary else 'r')
self.generator = enumerate(self.file)
return self
def __iter__(self) -> Iterator[str]:
assert self.generator is not None
for line_number, content in self.generator:
self.line_number = line_number
yield content
self.line_number = 'exit'
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
if self.file is not None:
self.file.close()
if exc_type is not None:
raise ReadFileLineException(self.filename, self.line_number) \
from exc_value
class PSAMacroEnumerator:
"""Information about constructors of various PSA Crypto types.
This includes macro names as well as information about their arguments
when applicable.
This class only provides ways to enumerate expressions that evaluate to
values of the covered types. Derived classes are expected to populate
the set of known constructors of each kind, as well as populate
`self.arguments_for` for arguments that are not of a kind that is
enumerated here.
"""
#pylint: disable=too-many-instance-attributes
def __init__(self) -> None:
"""Set up an empty set of known constructor macros.
"""
self.statuses = set() #type: Set[str]
self.lifetimes = set() #type: Set[str]
self.locations = set() #type: Set[str]
self.persistence_levels = set() #type: Set[str]
self.algorithms = set() #type: Set[str]
self.ecc_curves = set() #type: Set[str]
self.dh_groups = set() #type: Set[str]
self.key_types = set() #type: Set[str]
self.key_usage_flags = set() #type: Set[str]
self.hash_algorithms = set() #type: Set[str]
self.mac_algorithms = set() #type: Set[str]
self.ka_algorithms = set() #type: Set[str]
self.kdf_algorithms = set() #type: Set[str]
self.pake_algorithms = set() #type: Set[str]
self.aead_algorithms = set() #type: Set[str]
self.sign_algorithms = set() #type: Set[str]
# macro name -> list of argument names
self.argspecs = {} #type: Dict[str, List[str]]
# argument name -> list of values
self.arguments_for = {
'mac_length': [],
'min_mac_length': [],
'tag_length': [],
'min_tag_length': [],
} #type: Dict[str, List[str]]
# Whether to include intermediate macros in enumerations. Intermediate
# macros serve as category headers and are not valid values of their
# type. See `is_internal_name`.
# Always false in this class, may be set to true in derived classes.
self.include_intermediate = False
def is_internal_name(self, name: str) -> bool:
"""Whether this is an internal macro. Internal macros will be skipped."""
if not self.include_intermediate:
if name.endswith('_BASE') or name.endswith('_NONE'):
return True
if '_CATEGORY_' in name:
return True
return name.endswith('_FLAG') or name.endswith('_MASK')
def gather_arguments(self) -> None:
"""Populate the list of values for macro arguments.
Call this after parsing all the inputs.
"""
self.arguments_for['hash_alg'] = sorted(self.hash_algorithms)
self.arguments_for['mac_alg'] = sorted(self.mac_algorithms)
self.arguments_for['ka_alg'] = sorted(self.ka_algorithms)
self.arguments_for['kdf_alg'] = sorted(self.kdf_algorithms)
self.arguments_for['aead_alg'] = sorted(self.aead_algorithms)
self.arguments_for['sign_alg'] = sorted(self.sign_algorithms)
self.arguments_for['curve'] = sorted(self.ecc_curves)
self.arguments_for['group'] = sorted(self.dh_groups)
self.arguments_for['persistence'] = sorted(self.persistence_levels)
self.arguments_for['location'] = sorted(self.locations)
self.arguments_for['lifetime'] = sorted(self.lifetimes)
@staticmethod
def _format_arguments(name: str, arguments: Iterable[str]) -> str:
"""Format a macro call with arguments.
The resulting format is consistent with
`InputsForTest.normalize_argument`.
"""
return name + '(' + ', '.join(arguments) + ')'
_argument_split_re = re.compile(r' *, *')
@classmethod
def _argument_split(cls, arguments: str) -> List[str]:
return re.split(cls._argument_split_re, arguments)
def distribute_arguments(self, name: str) -> Iterator[str]:
"""Generate macro calls with each tested argument set.
If name is a macro without arguments, just yield "name".
If name is a macro with arguments, yield a series of
"name(arg1,...,argN)" where each argument takes each possible
value at least once.
"""
try:
if name not in self.argspecs:
yield name
return
argspec = self.argspecs[name]
if argspec == []:
yield name + '()'
return
argument_lists = [self.arguments_for[arg] for arg in argspec]
arguments = [values[0] for values in argument_lists]
yield self._format_arguments(name, arguments)
# Dear Pylint, enumerate won't work here since we're modifying
# the array.
# pylint: disable=consider-using-enumerate
for i in range(len(arguments)):
for value in argument_lists[i][1:]:
arguments[i] = value
yield self._format_arguments(name, arguments)
arguments[i] = argument_lists[i][0]
except BaseException as e:
raise Exception('distribute_arguments({})'.format(name)) from e
def distribute_arguments_without_duplicates(
self, seen: Set[str], name: str
) -> Iterator[str]:
"""Same as `distribute_arguments`, but don't repeat seen results."""
for result in self.distribute_arguments(name):
if result not in seen:
seen.add(result)
yield result
def generate_expressions(self, names: Iterable[str]) -> Iterator[str]:
"""Generate expressions covering values constructed from the given names.
`names` can be any iterable collection of macro names.
For example:
* ``generate_expressions(['PSA_ALG_CMAC', 'PSA_ALG_HMAC'])``
generates ``'PSA_ALG_CMAC'`` as well as ``'PSA_ALG_HMAC(h)'`` for
every known hash algorithm ``h``.
* ``macros.generate_expressions(macros.key_types)`` generates all
key types.
"""
seen = set() #type: Set[str]
return itertools.chain(*(
self.distribute_arguments_without_duplicates(seen, name)
for name in names
))
class PSAMacroCollector(PSAMacroEnumerator):
"""Collect PSA crypto macro definitions from C header files.
"""
def __init__(self, include_intermediate: bool = False) -> None:
"""Set up an object to collect PSA macro definitions.
Call the read_file method of the constructed object on each header file.
* include_intermediate: if true, include intermediate macros such as
PSA_XXX_BASE that do not designate semantic values.
"""
super().__init__()
self.include_intermediate = include_intermediate
self.key_types_from_curve = {} #type: Dict[str, str]
self.key_types_from_group = {} #type: Dict[str, str]
self.algorithms_from_hash = {} #type: Dict[str, str]
@staticmethod
def algorithm_tester(name: str) -> str:
"""The predicate for whether an algorithm is built from the given constructor.
The given name must be the name of an algorithm constructor of the
form ``PSA_ALG_xxx`` which is used as ``PSA_ALG_xxx(yyy)`` to build
an algorithm value. Return the corresponding predicate macro which
is used as ``predicate(alg)`` to test whether ``alg`` can be built
as ``PSA_ALG_xxx(yyy)``. The predicate is usually called
``PSA_ALG_IS_xxx``.
"""
prefix = 'PSA_ALG_'
assert name.startswith(prefix)
midfix = 'IS_'
suffix = name[len(prefix):]
if suffix in ['DSA', 'ECDSA']:
midfix += 'RANDOMIZED_'
elif suffix == 'RSA_PSS':
suffix += '_STANDARD_SALT'
return prefix + midfix + suffix
def record_algorithm_subtype(self, name: str, expansion: str) -> None:
"""Record the subtype of an algorithm constructor.
Given a ``PSA_ALG_xxx`` macro name and its expansion, if the algorithm
is of a subtype that is tracked in its own set, add it to the relevant
set.
"""
# This code is very ad hoc and fragile. It should be replaced by
# something more robust.
if re.match(r'MAC(?:_|\Z)', name):
self.mac_algorithms.add(name)
elif re.match(r'KDF(?:_|\Z)', name):
self.kdf_algorithms.add(name)
elif re.search(r'0x020000[0-9A-Fa-f]{2}', expansion):
self.hash_algorithms.add(name)
elif re.search(r'0x03[0-9A-Fa-f]{6}', expansion):
self.mac_algorithms.add(name)
elif re.search(r'0x05[0-9A-Fa-f]{6}', expansion):
self.aead_algorithms.add(name)
elif re.search(r'0x09[0-9A-Fa-f]{2}0000', expansion):
self.ka_algorithms.add(name)
elif re.search(r'0x08[0-9A-Fa-f]{6}', expansion):
self.kdf_algorithms.add(name)
# "#define" followed by a macro name with either no parameters
# or a single parameter and a non-empty expansion.
# Grab the macro name in group 1, the parameter name if any in group 2
# and the expansion in group 3.
_define_directive_re = re.compile(r'\s*#\s*define\s+(\w+)' +
r'(?:\s+|\((\w+)\)\s*)' +
r'(.+)')
_deprecated_definition_re = re.compile(r'\s*MBEDTLS_DEPRECATED')
def read_line(self, line):
"""Parse a C header line and record the PSA identifier it defines if any.
This function analyzes lines that start with "#define PSA_"
(up to non-significant whitespace) and skips all non-matching lines.
"""
# pylint: disable=too-many-branches
m = re.match(self._define_directive_re, line)
if not m:
return
name, parameter, expansion = m.groups()
expansion = re.sub(r'/\*.*?\*/|//.*', r' ', expansion)
if parameter:
self.argspecs[name] = [parameter]
if re.match(self._deprecated_definition_re, expansion):
# Skip deprecated values, which are assumed to be
# backward compatibility aliases that share
# numerical values with non-deprecated values.
return
if self.is_internal_name(name):
# Macro only to build actual values
return
elif (name.startswith('PSA_ERROR_') or name == 'PSA_SUCCESS') \
and not parameter:
self.statuses.add(name)
elif name.startswith('PSA_KEY_TYPE_') and not parameter:
self.key_types.add(name)
elif name.startswith('PSA_KEY_TYPE_') and parameter == 'curve':
self.key_types_from_curve[name] = name[:13] + 'IS_' + name[13:]
elif name.startswith('PSA_KEY_TYPE_') and parameter == 'group':
self.key_types_from_group[name] = name[:13] + 'IS_' + name[13:]
elif name.startswith('PSA_ECC_FAMILY_') and not parameter:
self.ecc_curves.add(name)
elif name.startswith('PSA_DH_FAMILY_') and not parameter:
self.dh_groups.add(name)
elif name.startswith('PSA_ALG_') and not parameter:
if name in ['PSA_ALG_ECDSA_BASE',
'PSA_ALG_RSA_PKCS1V15_SIGN_BASE']:
# Ad hoc skipping of duplicate names for some numerical values
return
self.algorithms.add(name)
self.record_algorithm_subtype(name, expansion)
elif name.startswith('PSA_ALG_') and parameter == 'hash_alg':
self.algorithms_from_hash[name] = self.algorithm_tester(name)
elif name.startswith('PSA_KEY_USAGE_') and not parameter:
self.key_usage_flags.add(name)
else:
# Other macro without parameter
return
_nonascii_re = re.compile(rb'[^\x00-\x7f]+')
_continued_line_re = re.compile(rb'\\\r?\n\Z')
def read_file(self, header_file):
for line in header_file:
m = re.search(self._continued_line_re, line)
while m:
cont = next(header_file)
line = line[:m.start(0)] + cont
m = re.search(self._continued_line_re, line)
line = re.sub(self._nonascii_re, rb'', line).decode('ascii')
self.read_line(line)
class InputsForTest(PSAMacroEnumerator):
# pylint: disable=too-many-instance-attributes
"""Accumulate information about macros to test.
enumerate
This includes macro names as well as information about their arguments
when applicable.
"""
def __init__(self) -> None:
super().__init__()
self.all_declared = set() #type: Set[str]
# Identifier prefixes
self.table_by_prefix = {
'ERROR': self.statuses,
'ALG': self.algorithms,
'ECC_CURVE': self.ecc_curves,
'DH_GROUP': self.dh_groups,
'KEY_LIFETIME': self.lifetimes,
'KEY_LOCATION': self.locations,
'KEY_PERSISTENCE': self.persistence_levels,
'KEY_TYPE': self.key_types,
'KEY_USAGE': self.key_usage_flags,
} #type: Dict[str, Set[str]]
# Test functions
self.table_by_test_function = {
# Any function ending in _algorithm also gets added to
# self.algorithms.
'key_type': [self.key_types],
'block_cipher_key_type': [self.key_types],
'stream_cipher_key_type': [self.key_types],
'ecc_key_family': [self.ecc_curves],
'ecc_key_types': [self.ecc_curves],
'dh_key_family': [self.dh_groups],
'dh_key_types': [self.dh_groups],
'hash_algorithm': [self.hash_algorithms],
'mac_algorithm': [self.mac_algorithms],
'cipher_algorithm': [],
'hmac_algorithm': [self.mac_algorithms, self.sign_algorithms],
'aead_algorithm': [self.aead_algorithms],
'key_derivation_algorithm': [self.kdf_algorithms],
'key_agreement_algorithm': [self.ka_algorithms],
'asymmetric_signature_algorithm': [self.sign_algorithms],
'asymmetric_signature_wildcard': [self.algorithms],
'asymmetric_encryption_algorithm': [],
'pake_algorithm': [self.pake_algorithms],
'other_algorithm': [],
'lifetime': [self.lifetimes],
} #type: Dict[str, List[Set[str]]]
mac_lengths = [str(n) for n in [
1, # minimum expressible
4, # minimum allowed by policy
13, # an odd size in a plausible range
14, # an even non-power-of-two size in a plausible range
16, # same as full size for at least one algorithm
63, # maximum expressible
]]
self.arguments_for['mac_length'] += mac_lengths
self.arguments_for['min_mac_length'] += mac_lengths
aead_lengths = [str(n) for n in [
1, # minimum expressible
4, # minimum allowed by policy
13, # an odd size in a plausible range
14, # an even non-power-of-two size in a plausible range
16, # same as full size for at least one algorithm
63, # maximum expressible
]]
self.arguments_for['tag_length'] += aead_lengths
self.arguments_for['min_tag_length'] += aead_lengths
def add_numerical_values(self) -> None:
"""Add numerical values that are not supported to the known identifiers."""
# Sets of names per type
self.algorithms.add('0xffffffff')
self.ecc_curves.add('0xff')
self.dh_groups.add('0xff')
self.key_types.add('0xffff')
self.key_usage_flags.add('0x80000000')
# Hard-coded values for unknown algorithms
#
# These have to have values that are correct for their respective
# PSA_ALG_IS_xxx macros, but are also not currently assigned and are
# not likely to be assigned in the near future.
self.hash_algorithms.add('0x020000fe') # 0x020000ff is PSA_ALG_ANY_HASH
self.mac_algorithms.add('0x03007fff')
self.ka_algorithms.add('0x09fc0000')
self.kdf_algorithms.add('0x080000ff')
self.pake_algorithms.add('0x0a0000ff')
# For AEAD algorithms, the only variability is over the tag length,
# and this only applies to known algorithms, so don't test an
# unknown algorithm.
def get_names(self, type_word: str) -> Set[str]:
"""Return the set of known names of values of the given type."""
return {
'status': self.statuses,
'algorithm': self.algorithms,
'ecc_curve': self.ecc_curves,
'dh_group': self.dh_groups,
'key_type': self.key_types,
'key_usage': self.key_usage_flags,
}[type_word]
# Regex for interesting header lines.
# Groups: 1=macro name, 2=type, 3=argument list (optional).
_header_line_re = \
re.compile(r'#define +' +
r'(PSA_((?:(?:DH|ECC|KEY)_)?[A-Z]+)_\w+)' +
r'(?:\(([^\n()]*)\))?')
# Regex of macro names to exclude.
_excluded_name_re = re.compile(r'_(?:GET|IS|OF)_|_(?:BASE|FLAG|MASK)\Z')
# Additional excluded macros.
_excluded_names = set([
# Macros that provide an alternative way to build the same
# algorithm as another macro.
'PSA_ALG_AEAD_WITH_DEFAULT_LENGTH_TAG',
'PSA_ALG_FULL_LENGTH_MAC',
# Auxiliary macro whose name doesn't fit the usual patterns for
# auxiliary macros.
'PSA_ALG_AEAD_WITH_DEFAULT_LENGTH_TAG_CASE',
])
def parse_header_line(self, line: str) -> None:
"""Parse a C header line, looking for "#define PSA_xxx"."""
m = re.match(self._header_line_re, line)
if not m:
return
name = m.group(1)
self.all_declared.add(name)
if re.search(self._excluded_name_re, name) or \
name in self._excluded_names or \
self.is_internal_name(name):
return
dest = self.table_by_prefix.get(m.group(2))
if dest is None:
return
dest.add(name)
if m.group(3):
self.argspecs[name] = self._argument_split(m.group(3))
_nonascii_re = re.compile(rb'[^\x00-\x7f]+') #type: Pattern
def parse_header(self, filename: str) -> None:
"""Parse a C header file, looking for "#define PSA_xxx"."""
with read_file_lines(filename, binary=True) as lines:
for line in lines:
line = re.sub(self._nonascii_re, rb'', line).decode('ascii')
self.parse_header_line(line)
_macro_identifier_re = re.compile(r'[A-Z]\w+')
def generate_undeclared_names(self, expr: str) -> Iterable[str]:
for name in re.findall(self._macro_identifier_re, expr):
if name not in self.all_declared:
yield name
def accept_test_case_line(self, function: str, argument: str) -> bool:
#pylint: disable=unused-argument
undeclared = list(self.generate_undeclared_names(argument))
if undeclared:
raise Exception('Undeclared names in test case', undeclared)
return True
@staticmethod
def normalize_argument(argument: str) -> str:
"""Normalize whitespace in the given C expression.
The result uses the same whitespace as
` PSAMacroEnumerator.distribute_arguments`.
"""
return re.sub(r',', r', ', re.sub(r' +', r'', argument))
def add_test_case_line(self, function: str, argument: str) -> None:
"""Parse a test case data line, looking for algorithm metadata tests."""
sets = []
if function.endswith('_algorithm'):
sets.append(self.algorithms)
if function == 'key_agreement_algorithm' and \
argument.startswith('PSA_ALG_KEY_AGREEMENT('):
# We only want *raw* key agreement algorithms as such, so
# exclude ones that are already chained with a KDF.
# Keep the expression as one to test as an algorithm.
function = 'other_algorithm'
sets += self.table_by_test_function[function]
if self.accept_test_case_line(function, argument):
for s in sets:
s.add(self.normalize_argument(argument))
# Regex matching a *.data line containing a test function call and
# its arguments. The actual definition is partly positional, but this
# regex is good enough in practice.
_test_case_line_re = re.compile(r'(?!depends_on:)(\w+):([^\n :][^:\n]*)')
def parse_test_cases(self, filename: str) -> None:
"""Parse a test case file (*.data), looking for algorithm metadata tests."""
with read_file_lines(filename) as lines:
for line in lines:
m = re.match(self._test_case_line_re, line)
if m:
self.add_test_case_line(m.group(1), m.group(2))
|
{
"content_hash": "901acbe64a6e7cde76e20c3940260278",
"timestamp": "",
"source": "github",
"line_count": 550,
"max_line_length": 91,
"avg_line_length": 43.341818181818184,
"alnum_prop": 0.595477808540985,
"repo_name": "Mbed-TLS/mbedtls",
"id": "3cad2a3f6238590ce2748508697c4f8a1422731d",
"size": "23838",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "scripts/mbedtls_dev/macro_collector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1715"
},
{
"name": "C",
"bytes": "9488130"
},
{
"name": "CMake",
"bytes": "54038"
},
{
"name": "Dockerfile",
"bytes": "5761"
},
{
"name": "GDB",
"bytes": "2772"
},
{
"name": "Jinja",
"bytes": "92635"
},
{
"name": "Makefile",
"bytes": "117684"
},
{
"name": "Perl",
"bytes": "57780"
},
{
"name": "Python",
"bytes": "587343"
},
{
"name": "Shell",
"bytes": "1844663"
},
{
"name": "SmPL",
"bytes": "354"
},
{
"name": "Tcl",
"bytes": "110"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from .._serialization import Deserializer, Serializer
from ._configuration import PrivateDnsManagementClientConfiguration
from .operations import PrivateZonesOperations, RecordSetsOperations, VirtualNetworkLinksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PrivateDnsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""The Private DNS Management Client.
:ivar private_zones: PrivateZonesOperations operations
:vartype private_zones: azure.mgmt.privatedns.aio.operations.PrivateZonesOperations
:ivar virtual_network_links: VirtualNetworkLinksOperations operations
:vartype virtual_network_links:
azure.mgmt.privatedns.aio.operations.VirtualNetworkLinksOperations
:ivar record_sets: RecordSetsOperations operations
:vartype record_sets: azure.mgmt.privatedns.aio.operations.RecordSetsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2020-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = PrivateDnsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.private_zones = PrivateZonesOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_links = VirtualNetworkLinksOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.record_sets = RecordSetsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PrivateDnsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
{
"content_hash": "d33bfa7f50543508b8586cea3471c85c",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 115,
"avg_line_length": 48.215053763440864,
"alnum_prop": 0.715878679750223,
"repo_name": "Azure/azure-sdk-for-python",
"id": "812013baff831d2931588f2e8fdb01b0a926eb9b",
"size": "4952",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/network/azure-mgmt-privatedns/azure/mgmt/privatedns/aio/_private_dns_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import json
import copy
import collections
class QueryError(Exception):
'''Query error base class'''
def __init__(self, message, status_code=None):
super(QueryError, self).__init__(message)
if status_code:
self.status_code = status_code
class QueryResourceDoesNotExist(QueryError):
'''Query returned no results'''
pass
class QueryResourceMultipleResultsReturned(QueryError):
'''Query was supposed to return unique result, returned more than one'''
pass
class QueryManager(object):
def __init__(self, model_class):
self.model_class = model_class
def _fetch(self, **kw):
klass = self.model_class
uri = self.model_class.ENDPOINT_ROOT
return [klass(**it) for it in klass.GET(uri, **kw).get('results')]
def _count(self, **kw):
kw.update({"count": 1})
return self.model_class.GET(self.model_class.ENDPOINT_ROOT, **kw).get('count')
def all(self):
return Queryset(self)
def filter(self, **kw):
return self.all().filter(**kw)
def fetch(self):
return self.all().fetch()
def get(self, **kw):
return self.filter(**kw).get()
class Queryset(object):
OPERATORS = [
'lt', 'lte', 'gt', 'gte', 'ne', 'in', 'nin', 'exists', 'select', 'dontSelect', 'all', 'relatedTo', 'nearSphere'
]
@staticmethod
def convert_to_parse(value):
from parse_rest.datatypes import ParseType
return ParseType.convert_to_parse(value, as_pointer=True)
@classmethod
def extract_filter_operator(cls, parameter):
for op in cls.OPERATORS:
underscored = '__%s' % op
if parameter.endswith(underscored):
return parameter[:-len(underscored)], op
return parameter, None
def __init__(self, manager):
self._manager = manager
self._where = collections.defaultdict(dict)
self._select_related = []
self._options = {}
self._result_cache = None
def __deepcopy__(self, memo):
q = self.__class__(self._manager)
q._where = copy.deepcopy(self._where, memo)
q._options = copy.deepcopy(self._options, memo)
q._select_related.extend(self._select_related)
return q
def __iter__(self):
return iter(self._fetch())
def __len__(self):
#don't use count query for len operator
#count doesn't return real size of result in all cases (eg if query contains skip option)
return len(self._fetch())
def __getitem__(self, key):
if isinstance(key, slice):
raise AttributeError("Slice is not supported for now.")
return self._fetch()[key]
def _fetch(self, count=False):
if self._result_cache is not None:
return len(self._result_cache) if count else self._result_cache
"""
Return a list of objects matching query, or if count == True return
only the number of objects matching.
"""
options = dict(self._options) # make a local copy
if self._where:
# JSON encode WHERE values
options['where'] = json.dumps(self._where)
if self._select_related:
options['include'] = ','.join(self._select_related)
if count:
return self._manager._count(**options)
self._result_cache = self._manager._fetch(**options)
return self._result_cache
def filter(self, **kw):
q = copy.deepcopy(self)
for name, value in kw.items():
parse_value = Queryset.convert_to_parse(value)
attr, operator = Queryset.extract_filter_operator(name)
if operator is None:
q._where[attr] = parse_value
elif operator == 'relatedTo':
q._where['$' + operator] = {'object': parse_value, 'key': attr}
else:
if not isinstance(q._where[attr], dict):
q._where[attr] = {}
q._where[attr]['$' + operator] = parse_value
return q
def limit(self, value):
q = copy.deepcopy(self)
q._options['limit'] = int(value)
return q
def skip(self, value):
q = copy.deepcopy(self)
q._options['skip'] = int(value)
return q
def order_by(self, order, descending=False):
q = copy.deepcopy(self)
# add a minus sign before the order value if descending == True
q._options['order'] = descending and ('-' + order) or order
return q
def select_related(self, *fields):
q = copy.deepcopy(self)
q._select_related.extend(fields)
return q
def count(self):
return self._fetch(count=True)
def exists(self):
return bool(self)
def get(self):
results = self._fetch()
if len(results) == 0:
error_message = 'Query against %s returned no results' % (
self._manager.model_class.ENDPOINT_ROOT)
raise QueryResourceDoesNotExist(error_message,
status_code=404)
if len(results) >= 2:
error_message = 'Query against %s returned multiple results' % (
self._manager.model_class.ENDPOINT_ROOT)
raise QueryResourceMultipleResultsReturned(error_message,
status_code=404)
return results[0]
def __repr__(self):
return repr(self._fetch())
|
{
"content_hash": "2700811a74df9f62bb0057fda4207501",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 119,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.570961887477314,
"repo_name": "felix-dumit/campusbot",
"id": "c363ebcbb4c4accd52c29c7951cad7f4aa14cb13",
"size": "6175",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "yowsup2/parse_rest/query.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "13787"
},
{
"name": "Python",
"bytes": "584218"
},
{
"name": "Shell",
"bytes": "254"
}
],
"symlink_target": ""
}
|
from mlabns.db import model
from mlabns.util import constants
from mlabns.util import message
import logging
import os
import socket
import sys
sys.path.insert(1, os.path.abspath(os.path.join(
os.path.dirname(__file__), '../third_party/pygeoip')))
import pygeoip
class GeoRecord:
def __init__(self, city=None, country=None, latitude=None, longitude=None):
self.city = city
self.country = country
self.latitude = latitude
self.longitude = longitude
def __eq__(self, other):
return ((self.city == other.city) and
(self.country == other.country) and
(self.latitude == other.latitude) and
(self.longitude == other.longitude))
def __ne__(self, other):
return not self.__eq__(other)
def get_ip_geolocation(ip_address, address_family):
"""Returns the geolocation data associated with an IP address from MaxMind.
Args:
ip_address: A string describing an IP address (v4 or v6).
address_family: the ip_address format, either ipv4 or ipv6
Returns:
A populated GeoRecord if matching geolocation data is found for the
IP address. Otherwise, an empty GeoRecord.
"""
maxmind_file = None
if address_family == message.ADDRESS_FAMILY_IPv4:
maxmind_file = constants.GEOLOCATION_MAXMIND_CITY_FILE_IPv4
elif address_family == message.ADDRESS_FAMILY_IPv6:
maxmind_file = constants.GEOLOCATION_MAXMIND_CITY_FILE_IPv6
logging.debug('Looking for geolocation in this file: %s', maxmind_file)
try:
# Code relies on modules for input validation, which throws one of two
# Exceptions for invalid input:
# 1.) socket.error: Generated by pygeoip in parsing addresses from test
# to binary form while searching the MaxMind database; and,
# 2.) TypeError: Passed a non-string IP address, such as None.
geo_city_block = pygeoip.GeoIP(
maxmind_file,
flags=pygeoip.const.STANDARD).record_by_addr(ip_address)
except (socket.error, TypeError) as e:
logging.error('MaxMind Geolocation failed on query (%s) with error: %s',
ip_address, e)
return GeoRecord()
if not geo_city_block:
logging.error('IP %s not found in the MaxMind database.', ip_address)
return GeoRecord()
return GeoRecord(city=geo_city_block['city'],
country=geo_city_block['country_code'],
latitude=geo_city_block['latitude'],
longitude=geo_city_block['longitude'])
def get_country_geolocation(country, country_table=model.CountryCode):
"""Returns the geolocation data associated with a country code.
Args:
country: A string describing a two alphanumeric country code.
country_table: Datastore table from which to retrieve country
information.
Returns:
A GeoRecord containing the geolocation data if found, otherwise an
empty GeoRecord.
"""
geo_record = GeoRecord()
logging.info('Retrieving geolocation info for country %s.', country)
location = country_table.get_by_key_name(country)
if location is not None:
geo_record.city = constants.UNKNOWN_CITY
geo_record.country = location.alpha2_code
geo_record.latitude = location.latitude
geo_record.longitude = location.longitude
return geo_record
def get_city_geolocation(city, country, city_table=model.MaxmindCityLocation):
"""Returns the geolocation data associated with a city and country code.
Args:
city: A string specifying the name of the city.
country: A string describing a two alphanumeric country code.
city_table: Datastore table from which to retrieve city information.
Returns:
A GeoRecord containing the geolocation data if found, otherwise an empty
GeoRecord.
"""
geo_record = GeoRecord()
logging.info('Retrieving geolocation info for country %s, city %s.', city,
country)
location = city_table.gql('WHERE city = :city AND country = :country',
city=city,
country=country).get()
if location is None:
logging.error('%s, %s not found in the database.', city, country)
return geo_record
geo_record.city = location.city
geo_record.country = location.country
geo_record.latitude = location.latitude
geo_record.longitude = location.longitude
return geo_record
|
{
"content_hash": "114147be1bc4554300d085e8434fd680",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 80,
"avg_line_length": 35.6796875,
"alnum_prop": 0.6522881541493322,
"repo_name": "fernandalavalle/mlab-ns",
"id": "0a6c3b338f12f3e53767425051a082cd6ef75318",
"size": "4567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/mlabns/util/maxmind.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14542"
},
{
"name": "HTML",
"bytes": "36134"
},
{
"name": "JavaScript",
"bytes": "59657"
},
{
"name": "Python",
"bytes": "1404409"
},
{
"name": "Shell",
"bytes": "747"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Coupon'
db.create_table('coupons_coupon', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.IntegerField')()),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30, blank=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=20)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('redeemed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('coupons', ['Coupon'])
def backwards(self, orm):
# Deleting model 'Coupon'
db.delete_table('coupons_coupon')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'coupons.coupon': {
'Meta': {'ordering': "['created_at']", 'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['coupons']
|
{
"content_hash": "a07397e61aef4f565117539bcbd8574e",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 182,
"avg_line_length": 65.97402597402598,
"alnum_prop": 0.5604330708661417,
"repo_name": "byteweaver/django-coupons",
"id": "1bdeb637c44a1a879aff3bc5c8b2e683b06c693d",
"size": "5104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "coupons/south_migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2404"
},
{
"name": "Makefile",
"bytes": "603"
},
{
"name": "Python",
"bytes": "73488"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy import linalg
from distutils.version import LooseVersion
from .mixin import TransformerMixin
class CSP(TransformerMixin):
"""M/EEG signal decomposition using the Common Spatial Patterns (CSP)
This object can be used as a supervised decomposition to estimate
spatial filters for feature extraction in a 2 class decoding problem.
See [1].
Parameters
----------
n_components : int, default 4
The number of components to decompose M/EEG signals.
This number should be set by cross-validation.
reg : float, str, None
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('lws') or
Oracle Approximating Shrinkage ('oas')
log : bool
If true, apply log to standardize the features.
If false, features are just z-scored.
Attributes
----------
`filters_` : ndarray
If fit, the CSP components used to decompose the data, else None.
`patterns_` : ndarray
If fit, the CSP patterns used to restore M/EEG signals, else None.
`mean_` : ndarray
If fit, the mean squared power for each component.
`std_` : ndarray
If fit, the std squared power for each component.
References
----------
[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
"""
def __init__(self, n_components=4, reg=None, log=True):
self.n_components = n_components
self.reg = reg
self.log = log
self.filters_ = None
self.patterns_ = None
self.mean_ = None
self.std_ = None
def fit(self, epochs_data, y):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
epochs_data : array, shape=(n_epochs, n_channels, n_times)
The data to estimate the CSP on.
y : array
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
classes = np.unique(y)
if len(classes) != 2:
raise ValueError("More than two different classes in the data.")
# concatenate epochs
class_1 = np.transpose(epochs_data[y == classes[0]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
class_2 = np.transpose(epochs_data[y == classes[1]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
if self.reg is None:
# compute empirical covariance
cov_1 = np.dot(class_1, class_1.T)
cov_2 = np.dot(class_2, class_2.T)
else:
# use sklearn covariance estimators
if isinstance(self.reg, float):
if (self.reg < 0) or (self.reg > 1):
raise ValueError('0 <= shrinkage <= 1 for '
'covariance regularization.')
try:
import sklearn
sklearn_version = LooseVersion(sklearn.__version__)
from sklearn.covariance import ShrunkCovariance
except ImportError:
raise Exception('the scikit-learn package is missing and '
'required for covariance regularization.')
if sklearn_version < '0.12':
skl_cov = ShrunkCovariance(shrinkage=self.reg,
store_precision=False)
else:
# init sklearn.covariance.ShrunkCovariance estimator
skl_cov = ShrunkCovariance(shrinkage=self.reg,
store_precision=False,
assume_centered=True)
elif isinstance(self.reg, str):
if self.reg == 'lws':
try:
from sklearn.covariance import LedoitWolf
except ImportError:
raise Exception('the scikit-learn package is missing '
'and required for regularization.')
# init sklearn.covariance.LedoitWolf estimator
skl_cov = LedoitWolf(store_precision=False,
assume_centered=True)
elif self.reg == 'oas':
try:
from sklearn.covariance import OAS
except ImportError:
raise Exception('the scikit-learn package is missing '
'and required for regularization.')
# init sklearn.covariance.OAS estimator
skl_cov = OAS(store_precision=False,
assume_centered=True)
else:
raise ValueError("regularization parameter should be "
"of type str (got %s)." % type(self.reg))
else:
raise ValueError("regularization parameter should be "
"of type str (got %s)." % type(self.reg))
# compute regularized covariance using sklearn
cov_1 = skl_cov.fit(class_1.T).covariance_
cov_2 = skl_cov.fit(class_2.T).covariance_
# then fit on covariance
self._fit(cov_1, cov_2)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def _fit(self, cov_a, cov_b):
"""Aux Function (modifies cov_a and cov_b in-place)"""
cov_a /= np.trace(cov_a)
cov_b /= np.trace(cov_b)
# computes the eigen values
lambda_, u = linalg.eigh(cov_a + cov_b)
# sort them
ind = np.argsort(lambda_)[::-1]
lambda2_ = lambda_[ind]
u = u[:, ind]
p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
# Compute the generalized eigen value problem
w_a = np.dot(np.dot(p, cov_a), p.T)
w_b = np.dot(np.dot(p, cov_b), p.T)
# and solve it
vals, vecs = linalg.eigh(w_a, w_b)
# sort vectors by discriminative power using eigen values
ind = np.argsort(np.maximum(vals, 1. / vals))[::-1]
vecs = vecs[:, ind]
# and project
w = np.dot(vecs.T, p)
self.filters_ = w
self.patterns_ = linalg.pinv(w).T
def transform(self, epochs_data, y=None):
"""Estimate epochs sources given the CSP filters
Parameters
----------
epochs_data : array, shape=(n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray of shape (n_epochs, n_sources)
The CSP features averaged over time.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
if self.log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
|
{
"content_hash": "809156588e19a61c7ea58eb527ae634b",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 78,
"avg_line_length": 39.22380952380952,
"alnum_prop": 0.5298045404880417,
"repo_name": "agramfort/mne-python",
"id": "fce9cfab4e52bd0daf257cf23b4112c89acb56a0",
"size": "8390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/decoding/csp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "3751581"
},
{
"name": "Shell",
"bytes": "4011"
}
],
"symlink_target": ""
}
|
"""
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Example Status Report:
[{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}]
"""
import os
import os.path
import shlex
import sys
import re
try:
import imp as imp
except ImportError:
import importlib as imp
import base64
import json
import tempfile
import time
from os.path import join
import Utils.WAAgentUtil
from Utils.WAAgentUtil import waagent
import logging
import logging.handlers
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from common import CommonVariables
import platform
import subprocess
import datetime
import Utils.Status
from MachineIdentity import MachineIdentity
import ExtensionErrorCodeHelper
import traceback
DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ"
class HandlerContext:
def __init__(self,name):
self._name = name
self._version = '0.0'
return
class HandlerUtility:
telemetry_data = {}
serializable_telemetry_data = []
ExtErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success
SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none
HealthStatusCode = -1
def __init__(self, log, error, short_name):
self._log = log
self._error = error
self.log_message = ""
self._short_name = short_name
self.patching = None
self.storageDetailsObj = None
self.partitioncount = 0
self.logging_file = None
self.pre_post_enabled = False
def _get_log_prefix(self):
return '[%s-%s]' % (self._context._name, self._context._version)
def _get_current_seq_no(self, config_folder):
seq_no = -1
cur_seq_no = -1
freshest_time = None
for subdir, dirs, files in os.walk(config_folder):
for file in files:
try:
if(file.endswith('.settings')):
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(freshest_time == None):
freshest_time = os.path.getmtime(join(config_folder,file))
seq_no = cur_seq_no
else:
current_file_m_time = os.path.getmtime(join(config_folder,file))
if(current_file_m_time > freshest_time):
freshest_time = current_file_m_time
seq_no = cur_seq_no
except ValueError:
continue
return seq_no
def get_last_seq(self):
if(os.path.isfile('mrseq')):
seq = waagent.GetFileContents('mrseq')
if(seq):
return int(seq)
return -1
def exit_if_same_seq(self):
current_seq = int(self._context._seq_no)
last_seq = self.get_last_seq()
if(current_seq == last_seq):
self.log("the sequence number are same, so skip, current:" + str(current_seq) + "== last:" + str(last_seq))
self.update_settings_file()
sys.exit(0)
def log(self, message,level='Info'):
try:
self.log_with_no_try_except(message, level)
except IOError:
pass
except Exception as e:
try:
errMsg='Exception in hutil.log'
self.log_with_no_try_except(errMsg, 'Warning')
except Exception as e:
pass
def log_with_no_try_except(self, message, level='Info'):
WriteLog = self.get_strvalue_from_configfile('WriteLog','True')
if (WriteLog == None or WriteLog == 'True'):
if sys.version_info > (3,):
if self.logging_file is not None:
self.log_py3(message)
else:
pass
else:
self._log(self._get_log_prefix() + message)
message = "{0} {1} {2} \n".format(str(datetime.datetime.utcnow()) , level , message)
self.log_message = self.log_message + message
def log_py3(self, msg):
if type(msg) is not str:
msg = str(msg, errors="backslashreplace")
msg = str(datetime.datetime.utcnow()) + " " + str(self._get_log_prefix()) + msg + "\n"
try:
with open(self.logging_file, "a+") as C :
C.write(msg)
except IOError:
pass
def error(self, message):
self._error(self._get_log_prefix() + message)
def fetch_log_message(self):
return self.log_message
def _parse_config(self, ctxt):
config = None
try:
config = json.loads(ctxt)
except:
self.error('JSON exception decoding ' + ctxt)
if config == None:
self.error("JSON error processing settings file:" + ctxt)
else:
handlerSettings = config['runtimeSettings'][0]['handlerSettings']
if 'protectedSettings' in handlerSettings and \
"protectedSettingsCertThumbprint" in handlerSettings and \
handlerSettings['protectedSettings'] is not None and \
handlerSettings["protectedSettingsCertThumbprint"] is not None:
protectedSettings = handlerSettings['protectedSettings']
thumb = handlerSettings['protectedSettingsCertThumbprint']
cert = waagent.LibDir + '/' + thumb + '.crt'
pkey = waagent.LibDir + '/' + thumb + '.prv'
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
waagent.SetFileContents(f.name,config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])
cleartxt = None
if 'NS-BSD' in platform.system():
# base64 tool is not available with NSBSD, use openssl
cleartxt = waagent.RunGetOutput(self.patching.openssl_path + " base64 -d -A -in " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
else:
cleartxt = waagent.RunGetOutput(self.patching.base64_path + " -d " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
jctxt = {}
try:
jctxt = json.loads(cleartxt)
except:
self.error('JSON exception decoding ' + cleartxt)
handlerSettings['protectedSettings'] = jctxt
self.log('Config decoded correctly.')
return config
def do_parse_context(self, operation, seqNo):
self.operation = operation
_context = self.try_parse_context(seqNo)
getWaagentPathUsed = Utils.WAAgentUtil.GetPathUsed()
if(getWaagentPathUsed == 0):
self.log("waagent old path is used")
else:
self.log("waagent new path is used")
if not _context:
self.log("maybe no new settings file found")
sys.exit(0)
return _context
def try_parse_context(self, seqNo):
self._context = HandlerContext(self._short_name)
handler_env = None
config = None
ctxt = None
code = 0
try:
self.log('try_parse_context : Sequence Number received ' + str(seqNo))
# get the HandlerEnvironment.json. According to the extension handler
# spec, it is always in the ./ directory
self.log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file = './HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
self.error("Unable to locate " + handler_env_file)
return None
ctxt = waagent.GetFileContents(handler_env_file)
if ctxt == None :
self.error("Unable to read " + handler_env_file)
try:
handler_env = json.loads(ctxt)
except:
pass
if handler_env == None :
self.log("JSON error processing " + handler_env_file)
return None
if type(handler_env) == list:
handler_env = handler_env[0]
self._context._name = handler_env['name']
self._context._version = str(handler_env['version'])
self._context._config_dir = handler_env['handlerEnvironment']['configFolder']
self._context._log_dir = handler_env['handlerEnvironment']['logFolder']
self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')
self.logging_file=self._context._log_file
self._context._shell_log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'shell.log')
self._change_log_file()
self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']
self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']
if seqNo != -1:
self._context._seq_no = seqNo
else:
self._context._seq_no = self._get_current_seq_no(self._context._config_dir)
if self._context._seq_no < 0:
self.error("Unable to locate a .settings file!")
return None
self._context._seq_no = str(self._context._seq_no)
if seqNo != -1:
self.log('sequence number from environment variable is ' + self._context._seq_no)
else:
self.log('sequence number based on config file-names is ' + self._context._seq_no)
self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')
self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')
self.log("setting file path is" + self._context._settings_file)
ctxt = None
ctxt = waagent.GetFileContents(self._context._settings_file)
if ctxt == None :
error_msg = 'Unable to read ' + self._context._settings_file + '. '
self.error(error_msg)
return None
else:
if(self.operation is not None and self.operation.lower() == "enable"):
# we should keep the current status file
self.backup_settings_status_file(self._context._seq_no)
self._context._config = self._parse_config(ctxt)
except Exception as e:
errorMsg = "Unable to parse context, error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Error')
raise
return self._context
def _change_log_file(self):
self.log("Change log file to " + self._context._log_file)
waagent.LoggerInit(self._context._log_file,'/dev/stdout')
self._log = waagent.Log
self._error = waagent.Error
def save_seq(self):
self.set_last_seq(self._context._seq_no)
self.log("set most recent sequence number to " + self._context._seq_no)
def set_last_seq(self,seq):
waagent.SetFileContents('mrseq', str(seq))
'''
Sample /etc/azure/vmbackup.conf
[SnapshotThread]
seqsnapshot = 1
isanysnapshotfailed = False
UploadStatusAndLog = True
WriteLog = True
onlyLocalFilesystems = True
seqsnapshot valid values(0-> parallel snapshot, 1-> programatically set sequential snapshot , 2-> customer set it for sequential snapshot)
'''
def get_value_from_configfile(self, key):
global backup_logger
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
except Exception as e:
pass
return value
def get_strvalue_from_configfile(self, key, default):
value = self.get_value_from_configfile(key)
if value == None or value == '':
value = default
try :
value_str = str(value)
except ValueError :
self.log('Not able to parse the read value as string, falling back to default value', 'Warning')
value = default
return value
def get_intvalue_from_configfile(self, key, default):
value = default
value = self.get_value_from_configfile(key)
if value == None or value == '':
value = default
try :
value_int = int(value)
except ValueError :
self.log('Not able to parse the read value as int, falling back to default value', 'Warning')
value = default
return int(value)
def set_value_to_configfile(self, key, value):
configfile = '/etc/azure/vmbackup.conf'
try :
self.log('setting ' + str(key) + 'in config file to ' + str(value) , 'Info')
if not os.path.exists(os.path.dirname(configfile)):
os.makedirs(os.path.dirname(configfile))
config = ConfigParsers.RawConfigParser()
if os.path.exists(configfile):
config.read(configfile)
if config.has_section('SnapshotThread'):
if config.has_option('SnapshotThread', key):
config.remove_option('SnapshotThread', key)
else:
config.add_section('SnapshotThread')
else:
config.add_section('SnapshotThread')
config.set('SnapshotThread', key, value)
with open(configfile, 'w') as config_file:
config.write(config_file)
except Exception as e:
errorMsg = " Unable to set config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Warning')
return value
def get_machine_id(self):
machine_id_file = "/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B"
machine_id = ""
file_pointer = None
try:
if not os.path.exists(os.path.dirname(machine_id_file)):
os.makedirs(os.path.dirname(machine_id_file))
if os.path.exists(machine_id_file):
file_pointer = open(machine_id_file, "r")
machine_id = file_pointer.readline()
file_pointer.close()
else:
mi = MachineIdentity()
if(mi.stored_identity() != None):
machine_id = mi.stored_identity()[1:-1]
file_pointer = open(machine_id_file, "w")
file_pointer.write(machine_id)
file_pointer.close()
except Exception as e:
errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg, 'Error')
finally :
if file_pointer != None :
if file_pointer.closed == False :
file_pointer.close()
self.log("Unique Machine Id : {0}".format(machine_id))
return machine_id
def get_storage_details(self,total_size,failure_flag):
self.storageDetailsObj = Utils.Status.StorageDetails(self.partitioncount, total_size, False, failure_flag)
self.log("partition count : {0}, total used size : {1}, is storage space present : {2}, is size computation failed : {3}".format(self.storageDetailsObj.partitionCount, self.storageDetailsObj.totalUsedSizeInBytes, self.storageDetailsObj.isStoragespacePresent, self.storageDetailsObj.isSizeComputationFailed))
return self.storageDetailsObj
def SetExtErrorCode(self, extErrorCode):
if self.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success :
self.ExtErrorCode = extErrorCode
def SetSnapshotConsistencyType(self, snapshotConsistency):
self.SnapshotConsistency = snapshotConsistency
def SetHealthStatusCode(self, healthStatusCode):
self.HealthStatusCode = healthStatusCode
def do_status_json(self, operation, status, sub_status, status_code, message, telemetrydata, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj,total_size,failure_flag):
tstamp = time.strftime(DateTimeFormat, time.gmtime())
formattedMessage = Utils.Status.FormattedMessage("en-US",message)
stat_obj = Utils.Status.StatusObj(self._context._name, operation, status, sub_status, status_code, formattedMessage, telemetrydata, self.get_storage_details(total_size,failure_flag), self.get_machine_id(), taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj)
top_stat_obj = Utils.Status.TopLevelStatus(self._context._version, tstamp, stat_obj)
return top_stat_obj
def get_extension_version(self):
try:
cur_dir = os.getcwd()
cur_extension = cur_dir.split("/")[-1]
extension_version = cur_extension.split("-")[-1]
return extension_version
except Exception as e:
errMsg = 'Failed to retrieve the Extension version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
extension_version="Unknown"
return extension_version
def get_wala_version(self):
try:
file_pointer = open('/var/log/waagent.log','r')
waagent_version = ''
for line in file_pointer:
if 'Azure Linux Agent Version' in line:
waagent_version = line.split(':')[-1]
if waagent_version[:-1]=="": #for removing the trailing '\n' character
waagent_version = self.get_wala_version_from_command()
return waagent_version
else:
waagent_version = waagent_version[:-1].split("-")[-1] #getting only version number
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
waagent_version="Unknown"
return waagent_version
def get_wala_version_from_command(self):
try:
cur_dir = os.getcwd()
os.chdir("..")
out = self.command_output_from_subprocess(['/usr/sbin/waagent', '-version'],30)
if "Goal state agent: " in out:
waagent_version = out.split("Goal state agent: ")[1].strip()
else:
out = out.split(" ")
waagent = out[0]
waagent_version = waagent.split("-")[-1] #getting only version number
os.chdir(cur_dir)
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
os.chdir(cur_dir)
waagent_version="Unknown"
return waagent_version
def get_dist_info(self):
try:
if 'FreeBSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
return "FreeBSD",release
if 'NS-BSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
return "NS-BSD", release
if 'linux_distribution' in dir(platform):
distinfo = list(platform.linux_distribution(full_distribution_name=0))
# remove trailing whitespace in distro name
if(distinfo[0] == ''):
osfile= open("/etc/os-release", "r")
for line in osfile:
lists=str(line).split("=")
if(lists[0]== "NAME"):
distroname = lists[1].split("\"")
if(lists[0]=="VERSION"):
distroversion = lists[1].split("\"")
osfile.close()
return distroname[1]+"-"+distroversion[1],platform.release()
distinfo[0] = distinfo[0].strip()
return distinfo[0]+"-"+distinfo[1],platform.release()
else:
distinfo = platform.dist()
return distinfo[0]+"-"+distinfo[1],platform.release()
except Exception as e:
errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
return "Unkonwn","Unkonwn"
def substat_new_entry(self,sub_status,code,name,status,formattedmessage):
sub_status_obj = Utils.Status.SubstatusObj(code,name,status,formattedmessage)
sub_status.append(sub_status_obj)
return sub_status
def timedelta_total_seconds(self, delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
@staticmethod
def add_to_telemetery_data(key,value):
HandlerUtility.telemetry_data[key]=value
def add_telemetry_data(self):
os_version,kernel_version = self.get_dist_info()
workloads = self.get_workload_running()
HandlerUtility.add_to_telemetery_data("guestAgentVersion",self.get_wala_version_from_command())
HandlerUtility.add_to_telemetery_data("extensionVersion",self.get_extension_version())
HandlerUtility.add_to_telemetery_data("osVersion",os_version)
HandlerUtility.add_to_telemetery_data("kernelVersion",kernel_version)
HandlerUtility.add_to_telemetery_data("workloads",str(workloads))
HandlerUtility.add_to_telemetery_data("prePostEnabled", str(self.pre_post_enabled))
def convert_telemetery_data_to_bcm_serializable_format(self):
HandlerUtility.serializable_telemetry_data = []
for k,v in HandlerUtility.telemetry_data.items():
each_telemetry_data = {}
each_telemetry_data["Value"] = v
each_telemetry_data["Key"] = k
HandlerUtility.serializable_telemetry_data.append(each_telemetry_data)
def do_status_report(self, operation, status, status_code, message, taskId = None, commandStartTimeUTCTicks = None, snapshot_info = None,total_size = 0,failure_flag = True ):
self.log("{0},{1},{2},{3}".format(operation, status, status_code, message))
sub_stat = []
stat_rept = []
self.add_telemetry_data()
snapshotTelemetry = ""
if CommonVariables.snapshotCreator in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.snapshotCreator , HandlerUtility.telemetry_data[CommonVariables.snapshotCreator])
if CommonVariables.hostStatusCodePreSnapshot in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.hostStatusCodePreSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodePreSnapshot])
if CommonVariables.hostStatusCodeDoSnapshot in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.hostStatusCodeDoSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodeDoSnapshot])
if CommonVariables.statusBlobUploadError in HandlerUtility.telemetry_data.keys():
message = "{0} {1}={2}, ".format(message , CommonVariables.statusBlobUploadError , HandlerUtility.telemetry_data[CommonVariables.statusBlobUploadError])
message = message + snapshotTelemetry
vm_health_obj = Utils.Status.VmHealthInfoObj(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode], int(self.ExtErrorCode))
consistencyTypeStr = CommonVariables.consistency_crashConsistent
if (self.SnapshotConsistency != Utils.Status.SnapshotConsistencyType.crashConsistent):
if (status_code == CommonVariables.success_appconsistent):
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.applicationConsistent
consistencyTypeStr = CommonVariables.consistency_applicationConsistent
elif (status_code == CommonVariables.success):
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.fileSystemConsistent
consistencyTypeStr = CommonVariables.consistency_fileSystemConsistent
else:
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none
consistencyTypeStr = CommonVariables.consistency_none
HandlerUtility.add_to_telemetery_data("consistencyType", consistencyTypeStr)
extensionResponseObj = Utils.Status.ExtensionResponse(message, self.SnapshotConsistency, "")
message = str(json.dumps(extensionResponseObj, cls = ComplexEncoder))
self.convert_telemetery_data_to_bcm_serializable_format()
stat_rept = self.do_status_json(operation, status, sub_stat, status_code, message, HandlerUtility.serializable_telemetry_data, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj, total_size,failure_flag)
time_delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
time_span = self.timedelta_total_seconds(time_delta) * 1000
date_place_holder = 'e2794170-c93d-4178-a8da-9bc7fd91ecc0'
stat_rept.timestampUTC = date_place_holder
date_string = r'\/Date(' + str((int)(time_span)) + r')\/'
stat_rept = "[" + json.dumps(stat_rept, cls = ComplexEncoder) + "]"
stat_rept = stat_rept.replace('\\\/', '\/') # To fix the datetime format of CreationTime to be consumed by C# DateTimeOffset
stat_rept = stat_rept.replace(date_place_holder,date_string)
# Add Status as sub-status for Status to be written on Status-File
sub_stat = self.substat_new_entry(sub_stat,'0',stat_rept,'success',None)
if self.get_public_settings()[CommonVariables.vmType].lower() == CommonVariables.VmTypeV2.lower() and CommonVariables.isTerminalStatus(status) :
status = CommonVariables.status_success
stat_rept_file = self.do_status_json(operation, status, sub_stat, status_code, message, None, taskId, commandStartTimeUTCTicks, None, None,total_size,failure_flag)
stat_rept_file = "[" + json.dumps(stat_rept_file, cls = ComplexEncoder) + "]"
# rename all other status files, or the WALA would report the wrong
# status file.
# because the wala choose the status file with the highest sequence
# number to report.
return stat_rept, stat_rept_file
def write_to_status_file(self, stat_rept_file):
try:
tempStatusFile = os.path.join(self._context._status_dir, CommonVariables.TempStatusFileName)
if self._context._status_file:
with open(tempStatusFile,'w+') as f:
f.write(stat_rept_file)
os.rename(tempStatusFile, self._context._status_file)
except Exception as e:
errMsg = 'Status file creation failed with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
def is_status_file_exists(self):
try:
if os.path.exists(self._context._status_file):
return True
else:
return False
except Exception as e:
self.log("exception is getting status file" + traceback.format_exc())
return False
def backup_settings_status_file(self, _seq_no):
self.log("current seq no is " + _seq_no)
for subdir, dirs, files in os.walk(self._context._config_dir):
for file in files:
try:
if(file.endswith('.settings') and file != (_seq_no + ".settings")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._config_dir,file), join(self._context._config_dir,new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
for subdir, dirs, files in os.walk(self._context._status_dir):
for file in files:
try:
if(file.endswith('.status') and file != (_seq_no + ".status")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._status_dir,file), join(self._context._status_dir, new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
def do_exit(self, exit_code, operation,status,code,message):
try:
HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode]))
self.do_status_report(operation, status,code,message)
except Exception as e:
self.log("Can't update status: " + str(e))
sys.exit(exit_code)
def get_handler_settings(self):
return self._context._config['runtimeSettings'][0]['handlerSettings']
def get_protected_settings(self):
return self.get_handler_settings().get('protectedSettings')
def get_public_settings(self):
return self.get_handler_settings().get('publicSettings')
def is_prev_in_transition(self):
curr_seq = self.get_last_seq()
last_seq = curr_seq - 1
if last_seq >= 0:
self.log("previous status and path: " + str(last_seq) + " " + str(self._context._status_dir))
status_file_prev = os.path.join(self._context._status_dir, str(last_seq) + '_status')
if os.path.isfile(status_file_prev) and os.access(status_file_prev, os.R_OK):
searchfile = open(status_file_prev, "r")
for line in searchfile:
if "Transition" in line:
self.log("transitioning found in the previous status file")
searchfile.close()
return True
searchfile.close()
return False
def get_prev_log(self):
with open(self._context._log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 300):
lines = lines[-300:]
return ''.join(str(x) for x in lines)
else:
return ''.join(str(x) for x in lines)
def get_shell_script_log(self):
lines = ""
try:
with open(self._context._shell_log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 10):
lines = lines[-10:]
return ''.join(str(x) for x in lines)
except Exception as e:
self.log("Can't receive shell log file: " + str(e))
return lines
def update_settings_file(self):
if(self._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings') != None):
del self._context._config['runtimeSettings'][0]['handlerSettings']['protectedSettings']
self.log("removing the protected settings")
waagent.SetFileContents(self._context._settings_file,json.dumps(self._context._config))
def UriHasSpecialCharacters(self, blobs):
uriHasSpecialCharacters = False
if blobs is not None:
for blob in blobs:
blobUri = str(blob.split("?")[0])
if '%' in blobUri:
self.log(blobUri + " URI has special characters")
uriHasSpecialCharacters = True
return uriHasSpecialCharacters
def get_workload_running(self):
workloads = []
try:
dblist= ["mysqld","postgresql","oracle","cassandra",",mongo"] ## add all workload process name in lower case
if os.path.isdir("/proc"):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
for db in dblist :
if db in str(pname).lower() and db not in workloads :
self.log("workload running found with name : " + str(db))
workloads.append(db)
return workloads
except Exception as e:
self.log("Unable to fetch running workloads" + str(e))
return workloads
def set_pre_post_enabled(self):
self.pre_post_enabled = True
def command_output_from_subprocess(self , args, process_wait_time):
process_out = subprocess.Popen(args, stdout=subprocess.PIPE)
while(process_wait_time > 0 and process_out.poll() is None):
time.sleep(1)
process_wait_time -= 1
out = process_out.stdout.read().decode()
out = str(out)
return out
@staticmethod
def split(logger,txt):
result = None
try:
result = shlex.split(txt)
except Exception as e:
logger.log('Shlex.Split threw exception error: %s, stack trace: %s' % (str(e), traceback.format_exc()))
result = txt.split()
return result
@staticmethod
def convert_to_string(txt):
if sys.version_info > (3,):
txt = str(txt, encoding='utf-8', errors="backslashreplace")
else:
txt = str(txt)
return txt
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj,'convertToDictionary'):
return obj.convertToDictionary()
else:
return obj.__dict__
|
{
"content_hash": "2f7c77b78523b1d5ee24c6feb3c967ff",
"timestamp": "",
"source": "github",
"line_count": 785,
"max_line_length": 636,
"avg_line_length": 45.480254777070066,
"alnum_prop": 0.5986219259425243,
"repo_name": "Azure/azure-linux-extensions",
"id": "8942ed8f147623fd8848bc967915c5db339b5ed9",
"size": "36325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VMBackup/main/Utils/HandlerUtil.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "81542"
},
{
"name": "C++",
"bytes": "1038973"
},
{
"name": "CMake",
"bytes": "11642"
},
{
"name": "Dockerfile",
"bytes": "1539"
},
{
"name": "Go",
"bytes": "136483"
},
{
"name": "HTML",
"bytes": "32736"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "11405"
},
{
"name": "PowerShell",
"bytes": "22400"
},
{
"name": "Python",
"bytes": "5124041"
},
{
"name": "Roff",
"bytes": "3827"
},
{
"name": "Shell",
"bytes": "66718"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from . import views
urlpatterns = [
# eg. /POInvoice/
url(r'^$', views.index, name='index'),
# eg. /POInvoice/customer/5/
url(r'^customer/(?P<customer_id>[0-9]+)/$', views.customer_index ),
# eg. /POInvoice/purchorder/7
url(r'^purchorder/(?P<purchorder_id>[0-9]+)/$', views.purchorder_index),
# eg. /POInvoice/invoice/2
url(r'^invoice/(?P<invoice_id>[0-9]+)/$', views.invoice_index),
# eg. /POInvoice/model_form_upload/
url(r'^model_form_upload/$', views.model_form_upload)
]
|
{
"content_hash": "c7bebde017c34f648bbfbcd4af5761ef",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 36.86666666666667,
"alnum_prop": 0.6238698010849909,
"repo_name": "JasonL888/POInvoiceTracker",
"id": "58c9c2122cae8c6a87f5b667456290248ebd805f",
"size": "553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "POInvoice/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1419"
},
{
"name": "Dockerfile",
"bytes": "472"
},
{
"name": "HTML",
"bytes": "12012"
},
{
"name": "JavaScript",
"bytes": "271"
},
{
"name": "Python",
"bytes": "32116"
},
{
"name": "Shell",
"bytes": "477"
}
],
"symlink_target": ""
}
|
from fbchat import log, Client
# Subclass fbchat.Client and override required methods
class EchoBot(Client):
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
self.markAsDelivered(author_id, thread_id)
self.markAsRead(author_id)
log.info("{} from {} in {}".format(message_object, thread_id, thread_type.name))
# If you're not the author, echo
if author_id != self.uid:
self.send(message_object, thread_id=thread_id, thread_type=thread_type)
client = EchoBot("<email>", "<password>")
client.listen()
|
{
"content_hash": "6a994a8df87f231dbe2c4a9181a34638",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 88,
"avg_line_length": 36.9375,
"alnum_prop": 0.6700507614213198,
"repo_name": "Bankde/fbchat",
"id": "a89b2f9a5657ea9ad846e82136dbca993a194c19",
"size": "616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/echobot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "124102"
}
],
"symlink_target": ""
}
|
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds
if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in tgt[
platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' %
compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell', 'builder', 'builder_prefix_arguments', 'venv_relative_python',
'toolchain', 'runner'
])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, [
'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
self.args.use_docker, self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (
_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True) else ['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
elif polling_strategy == 'poll-cv':
# scale test timeout if running with poll-cv
# sanitizer and poll-cv scaling is not cumulative to ensure
# reasonable timeout values.
# TODO(jtattermusch): based on historical data and 5min default
# test timeout poll-cv scaling is currently not useful.
# Leaving here so it can be reintroduced if the default test timeout
# is decreased in the future.
timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary, '--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary, '--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get('shortname', ' '.join(
pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
* timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return [
'CC=clang%s' % version_suffix, 'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix, 'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
'CC=gcc%s' % version_suffix, 'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix, 'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(
'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
if self.args.compiler in ['python3.5', 'python3.6']:
return 'pyenv'
elif self.args.compiler == 'python_alpine':
return 'alpine'
else:
return 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(shell, builder,
builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(
name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(
name='py34',
major='3',
minor='4',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(
name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(
name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(
name='pypy', major='2', config_vars=config_vars)
pypy32_config = _pypy_config_generator(
name='pypy3', major='3', config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python35_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (python27_config, python34_config, python35_config,
python36_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
tests.append(
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (
assembly, assembly_subdir, assembly, assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test
] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(
['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-plugin-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-helloworld',
cpu_cost=1e6,
environ={
'SCHEME': 'HelloWorld',
'EXAMPLE_PATH': 'examples/objective-c/helloworld'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-routeguide',
cpu_cost=1e6,
environ={
'SCHEME': 'RouteGuideClient',
'EXAMPLE_PATH': 'examples/objective-c/route_guide'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-authsample',
cpu_cost=1e6,
environ={
'SCHEME': 'AuthSample',
'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [
self.config.job_spec(
cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg))
for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.'
% arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument(
'-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument(
'-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument(
'-f', '--forever', default=False, action='store_const', const=True)
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'--newline_on_success', default=False, action='store_const', const=True)
argp.add_argument(
'-l',
'--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument(
'-S', '--stop_on_failure', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'python2.7',
'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument(
'--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument(
'--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument(
'-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument(
'--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epollsig,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument(
'--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--disable_auto_set_flakes',
default=False,
const=True,
action='store_const',
help='Disable rerunning historically flaky tests')
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if not args.disable_auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print("Unexpected error getting flaky tests: %s" %
traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(
set([
make_option
for lang in languages for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print(
'Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print(
'Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec(
[
'cmake', '--build', '.', '--target', '%s' % target,
'--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j', '%d' % args.jobs] +
targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j', '%d' %
args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown, 'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(
makefile, set()).union(set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen('http://localhost:%d/quitquitquit' %
legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError, e:
return False
except OSError, e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and (
args.regex_exclude == '' or not re.search(
args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE',
'%s [%d/%d runs flaked]' %
(k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_results_to_bq(resultset, args.bq_result_table, args,
platform_string())
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(
check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message(
'SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
{
"content_hash": "9d97946c3723b6203ea878301487f86e",
"timestamp": "",
"source": "github",
"line_count": 1802,
"max_line_length": 135,
"avg_line_length": 35.1542730299667,
"alnum_prop": 0.5458262297152239,
"repo_name": "geffzhang/grpc",
"id": "bd5b8644b396c655305e2db58bda3c05e8c5789e",
"size": "63947",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/run_tests/run_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "27284"
},
{
"name": "C",
"bytes": "5366430"
},
{
"name": "C#",
"bytes": "1243731"
},
{
"name": "C++",
"bytes": "1440373"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "320727"
},
{
"name": "M4",
"bytes": "36598"
},
{
"name": "Makefile",
"bytes": "626156"
},
{
"name": "Objective-C",
"bytes": "288588"
},
{
"name": "PHP",
"bytes": "136044"
},
{
"name": "Protocol Buffer",
"bytes": "117692"
},
{
"name": "Python",
"bytes": "1457697"
},
{
"name": "Ruby",
"bytes": "540928"
},
{
"name": "Shell",
"bytes": "40158"
},
{
"name": "Swift",
"bytes": "5279"
}
],
"symlink_target": ""
}
|
"""Fixer for exec.
This converts usages of the exec statement into calls to a built-in
exec() function.
exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Comma, Name, Call
class FixExec(fixer_base.BaseFix):
PATTERN = """
exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
|
exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
"""
def transform(self, node, results):
assert results
syms = self.syms
a = results["a"]
b = results.get("b")
c = results.get("c")
args = [a.clone()]
args[0].prefix = ""
if b is not None:
args.extend([Comma(), b.clone()])
if c is not None:
args.extend([Comma(), c.clone()])
return Call(Name(u"exec"), args, prefix=node.prefix)
|
{
"content_hash": "7935b6fdd5ac1e2aa26dbe80416fc0a4",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.5547945205479452,
"repo_name": "fkolacek/FIT-VUT",
"id": "121431c0c272c88da2a8332c91289cb7bd132afc",
"size": "977",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "bp-revok/python/lib/python2.7/lib2to3/fixes/fix_exec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455326"
},
{
"name": "Awk",
"bytes": "8724"
},
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Brainfuck",
"bytes": "83"
},
{
"name": "C",
"bytes": "5006938"
},
{
"name": "C++",
"bytes": "1835332"
},
{
"name": "CSS",
"bytes": "301045"
},
{
"name": "CoffeeScript",
"bytes": "46327"
},
{
"name": "Groff",
"bytes": "46766"
},
{
"name": "HTML",
"bytes": "937735"
},
{
"name": "Java",
"bytes": "552132"
},
{
"name": "JavaScript",
"bytes": "1742225"
},
{
"name": "Lua",
"bytes": "39700"
},
{
"name": "Makefile",
"bytes": "381793"
},
{
"name": "Objective-C",
"bytes": "4618"
},
{
"name": "PHP",
"bytes": "108701"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "60353"
},
{
"name": "Python",
"bytes": "22084026"
},
{
"name": "QMake",
"bytes": "2660"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ragel in Ruby Host",
"bytes": "17993"
},
{
"name": "Ruby",
"bytes": "21607145"
},
{
"name": "Shell",
"bytes": "611321"
},
{
"name": "Tcl",
"bytes": "4920"
},
{
"name": "TeX",
"bytes": "561423"
},
{
"name": "VHDL",
"bytes": "49180"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "154638"
},
{
"name": "Yacc",
"bytes": "32788"
}
],
"symlink_target": ""
}
|
from tddspry.noseplugins.djangoplugin import *
|
{
"content_hash": "5b86c6b6d5126880cefa105e1940d103",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 46,
"avg_line_length": 47,
"alnum_prop": 0.851063829787234,
"repo_name": "playpauseandstop/tddspry",
"id": "455c8eba7754731f3a78c317f0b5b86239456e3d",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tddspry/noseplugins/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "168787"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
}
|
from gerencianet import Gerencianet
from ...credentials import credentials
gn = Gerencianet(credentials.CREDENTIALS)
headers = {
'x-skip-mtls-checking': 'false'
}
params = {
'chave': ''
}
body = {
'webhookUrl': ''
}
response = gn.pix_config_webhook(params=params, body=body, headers=headers)
print(response)
|
{
"content_hash": "b04f6a2215551a90e99e5206a336c30c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 17.157894736842106,
"alnum_prop": 0.6932515337423313,
"repo_name": "gerencianet/gn-api-sdk-python",
"id": "2290596b86e9d594722b2e366c918457a467a2dc",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pix/webhook/pix_config_webhook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25401"
}
],
"symlink_target": ""
}
|
"""
lunaport.domain
~~~~~~~~~~~~~~~
Data domain related object contains most of business logic.
"""
|
{
"content_hash": "90783bfaaa93ee12c7f4ae48148aa606",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 59,
"avg_line_length": 16.833333333333332,
"alnum_prop": 0.6336633663366337,
"repo_name": "greggyNapalm/lunaport_server",
"id": "2a594e445e38686a654bb0d123978dac97a733a5",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lunaport_server/domain/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "287625"
},
{
"name": "Shell",
"bytes": "5508"
}
],
"symlink_target": ""
}
|
import threading, datetime, time, ast
import udpserver, tcpfilesocket, udpbroadcaster, messages
from constants import *
groupManager = None
startup = True
class GroupManager():
def __init__(self, config):
self.groupName = config["group"]
self.memberCount = 0
self.groupMasterName = config["group_master_name"]
self.groupMaster = config["group_master"] == 1
self.actions = config["actions"]
self.memberHosts = []
self.masterHost = ""
if self.groupMaster:
print "INITIALIZING GroupActionHandler WITH ACTIONS: ", self.actions
# init action handler thread
self.actionHandler = GroupActionHandler(self.actions)
self.actionHandler.daemon = True
self.actionHandler.start()
self.GroupMasterRoutine()
else:
# player is a group member --> broadcast acknowledge with request flag set to false in case master is already online
byRequest = "0"
msgData = messages.getMessage(GROUP_MEMBER_ACKNOWLEDGE, ["-s", str(self.groupName), "-i", byRequest])
udpbroadcaster.sendBroadcast(msgData)
def GroupMasterRoutine(self):
# add localhost to host list to receive commands on master player as well
self.memberHosts.append('127.0.0.1')
self.actionHandler.AddHost('127.0.0.1', True)
# send member request broadcast
msgData = messages.getMessage(GROUP_MEMBER_REQUEST, ["-s", str(self.groupName)])
udpbroadcaster.sendBroadcast(msgData, True)
def HandleGroupMemberRequest(self, reqGroupName, masterIP):
if not self.groupMaster and reqGroupName == self.groupName:
self.masterHost = masterIP
# member of requested group --> send acknowledge
byRequest = "1"
msgData = messages.getMessage(GROUP_MEMBER_ACKNOWLEDGE, ["-s", str(self.groupName), "-i", byRequest])
udpbroadcaster.sendMessage(msgData, self.masterHost)
def HandleGroupMemberAcknowledge(self, ackGroupName, memberIP, byRequest):
if self.groupMaster and ackGroupName == self.groupName:
print "MEMBER ACKNOWLEDGED: ", memberIP
if not memberIP in self.memberHosts:
self.memberHosts.append(memberIP)
self.memberCount += 1
self.actionHandler.AddHost(memberIP, byRequest)
def ScheduleActions(self):
print "Scheduling as Master: ", self.groupMaster
if self.groupMaster:
# start thread if not already alive
if not self.actionHandler.isAlive():
self.actionHandler.start()
# set runevent to trigger action scheduling
self.actionHandler.runevent.set()
def StopActionHandling(self):
if self.groupMaster:
self.actionHandler.runevent.clear()
# set update event --> causes handler to proceed and exit loop
self.actionHandler.updateevent.set()
class GroupActionHandler(threading.Thread):
def __init__(self, actions):
self.actions = []
for action in actions:
try:
ad = ast.literal_eval(action)
action = ad
except:
self.actions.append(action)
self.runevent = threading.Event()
self.updateevent = threading.Event()
self.actionThreads = []
self.hosts = []
self.running = True
threading.Thread.__init__(self, name="GroupActionHandler_Thread")
def AddHost(self, host, byRequest):
if not host in self.hosts:
self.hosts.append(host)
# if not localhost and host added not in response to a member request --> New player online event --> check actions
if not host == "127.0.0.1" and not byRequest:
# check if actions are defined that should be processed when a new group host came online
for action in self.actions:
# convert action to dict if needed
try:
actionDict = ast.literal_eval(action)
action = actionDict
except:
pass
if "type" in action and int(action['type']) == ACTION_TYPE_ONETIME and int(action['event']) == ACTION_EVENT_NEW_PLAYER:
print "New Player found --> triggering action ", action
# action found, handled like a startup action using the defined delay
t = threading.Thread(target=self.__ProcessStartupAction, args=[action])
t.daemon = True
t.start()
def run(self):
# wait to get started
self.runevent.wait()
print "RUN EVENT SET - processing Actions in handler now..."
print "ACTIONS: ", self.actions
startupActions = []
update = False
while self.runevent.is_set():
if update:
# TODO: update run of loop
# --> check for new actions and schedule them
# --> check for removed actions and stop them
# --> evaluate if needed as most deletion/add actions re-init handler
pass
for action in self.actions:
# convert action to dict if needed
try:
actionDict = ast.literal_eval(action)
action = actionDict
except:
pass
print "Processing Action: ", action
if "type" in action:
# only process actions with defined type
type = int(action["type"])
if type == ACTION_TYPE_ONETIME:
if action["event"] == ACTION_EVENT_STARTUP:
print "Triggering startup action ", action
startupActions.append(action)
elif action["event"] == ACTION_TYPE_SPECIFIC_TIME:
t_stop = threading.Event()
t = threading.Thread(target=self.__ProcessSpecificTimeAction, args=(action, t_stop))
t.daemon = True
# save references to thread and stop event
tList = {}
tList["thread"] = t
tList["stop_event"] = t_stop
self.actionThreads.append(tList)
t.start()
elif type == ACTION_TYPE_PERIODIC:
t_stop = threading.Event()
t = threading.Thread(target=self.__ProcessPeriodicAction, args=(action, t_stop))
t.daemon = True
# save references to thread and stop event
tList = {}
tList["thread"] = t
tList["stop_event"] = t_stop
self.actionThreads.append(tList)
t.start()
else:
# no type defined, action ignored
pass
# periodic actions are started in threads, check if startup actions have to be handled
global startup
if startup:
for sAction in startupActions:
index = self.actions.index(sAction)
del self.actions[index]
t = threading.Thread(target=self.__ProcessStartupAction, args=[action])
t.daemon = True
t.start()
startup = False
# wait for update event
self.updateevent.wait()
update = True
# stop all action threads
for t in self.actionThreads:
t["stop_event"].set()
def __ProcessSpecificTimeAction(self, action, stopevent):
startTime = datetime.time(int(action['hour']),int(action['minute']))
print "ACTION START TIME: ", startTime
print "CURRENT TIME: ", datetime.datetime.today().time()
while not stopevent.is_set():
if startTime > datetime.datetime.today().time():
while startTime > datetime.datetime.today().time() and not stopevent.is_set():
# wait 1 second then check time again
stopevent.wait(1)
# wait loop passed --> trigger time for action
print "TRIGGER TIME REACHED - SENDING ACTION TO HOSTS: ", action
self.__SendCommandToHosts(action)
else:
# start time has already passed and should have been processed, lets wait for the next day :-)
stopevent.wait(1)
def __ProcessPeriodicAction(self, action, stopevent):
# processes given action, call method in separate Thread!
pType = int(action["periodic_type"])
mult = 0
if pType == PERIODIC_SEC:
mult = 1
elif pType == PERIODIC_MIN:
mult = 60
elif pType == PERIODIC_HOUR:
mult = 3600
elif pType == PERIODIC_DAY:
mult = 86400
# calculate interval in seconds according to periodic type
interval = int(action["periodic_interval"]) * mult
print "Starting periodic process of action ", action
while not stopevent.is_set():
# get action command and send it to members
self.__SendCommandToHosts(action)
# sleeps for the given interval, then loop proceeds - wakes up if event is set to stop thread
stopevent.wait(interval)
def __ProcessStartupAction(self, action):
# delay in seconds before triggering command
delay = int(action["delay"])
print "Processing onetime action in %d seconds" % delay
time.sleep(delay)
self.__SendCommandToHosts(action)
def __SendCommandToHosts(self, action):
# get action command and send it to members
cmd = action["command"]
print "Sending Command to hosts: ", cmd
print "Hosts: ", self.hosts
if int(cmd) == PLAYER_START_FILENUMBER:
msgData = messages.getMessage(int(cmd),args=["-i", str(action['file_number'])])
else:
msgData = messages.getMessage(int(cmd))
udpbroadcaster.sendMessageToHosts(msgData, self.hosts)
#### ACCESS METHODS FOR CREATION AND MODIFICATION ####
def InitGroupManager(groupConfig):
global groupManager
groupManager = GroupManager(groupConfig)
def Schedule():
global groupManager
print "Scheduling actions in Group Manager..."
groupManager.ScheduleActions()
def ReInitGroupManager(groupConfig):
InitGroupManager(groupConfig)
global startup
startup = False
Schedule()
def MemberRequest(groupName, masterIP):
global groupManager
groupManager.HandleGroupMemberRequest(groupName, masterIP)
def MemberAcknowledge(groupName, memberIP, byRequest):
global groupManager
groupManager.HandleGroupMemberAcknowledge(groupName, memberIP, byRequest)
def UpdateActions():
global groupManager
groupManager.actionHandler.updateevent.set()
|
{
"content_hash": "9698b05ee34b91e93f2c0872771bdee5",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 135,
"avg_line_length": 41.525925925925925,
"alnum_prop": 0.5758116303960042,
"repo_name": "peter9teufel/raspmedia",
"id": "0b053f8769111dd23e02dba6076e428a4f382202",
"size": "11212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Raspberry/packages/rmnetwork/GroupManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "740"
},
{
"name": "CSS",
"bytes": "3503"
},
{
"name": "HTML",
"bytes": "2444"
},
{
"name": "PHP",
"bytes": "282"
},
{
"name": "Python",
"bytes": "291861"
},
{
"name": "Shell",
"bytes": "17406"
}
],
"symlink_target": ""
}
|
from os import system
from tota.game import Drawer
from tota import settings
from termcolor import colored
def make_bar(length, life, max_life):
"""Create a small unicode bar."""
life = max(life, 0)
life_chars_count = int(life / (max_life / length))
life_chars = life_chars_count * '\u2588'
no_life_chars = (length - life_chars_count) * '\u2591'
return life_chars + no_life_chars
class TerminalDrawer(Drawer):
def __init__(self, use_basic_icons=False, use_compressed_view=False):
self.use_basic_icons = use_basic_icons
self.use_compressed_view = use_compressed_view
def position_draw(self, game, position):
"""Get the string to draw for a given position of the world."""
# decorations first, then things over them
thing = game.world.things.get(position)
effect = game.world.effects.get(position)
effect_color = settings.EFFECT_COLORS.get(effect)
if thing is not None:
if self.use_basic_icons:
icon = thing.ICON_BASIC
else:
icon = thing.ICON
color = settings.TEAM_COLORS[thing.team]
else:
icon = ' '
color = None
if effect_color is not None:
on_color = 'on_' + effect_color
else:
on_color = None
if self.use_compressed_view:
widener = ''
else:
widener = ' '
return colored(icon + widener, color, on_color)
def draw(self, game):
"""Draw the world with 'ascii'-art ."""
screen = ''
# print the world
screen += '\n'.join(u''.join(self.position_draw(game, (x, y))
for x in range(game.world.size[0]))
for y in range(game.world.size[1]))
# game stats
screen += '\nticks:{}'.format(game.world.t)
# print teams stats
for team in (settings.TEAM_RADIANT, settings.TEAM_DIRE):
team_template = '{name}: {score}'
team_stats = team_template.format(
name=team.upper(),
score=game.scores[team],
)
screen += '\n' + colored(team_stats, settings.TEAM_COLORS[team])
ancient = game.ancients[team]
towers = game.towers[team]
heroes = [hero for hero in game.heroes if hero.team == team]
ancient_template = '{icon} {bar}({life}/{max_life}) Ancient'
ancient_stats = ancient_template.format(
icon=(ancient.ICON_BASIC if self.use_basic_icons
else ancient.ICON),
bar=make_bar(20, ancient.life, ancient.max_life),
life=int(ancient.life) if ancient.alive else 'destroyed!',
max_life=int(ancient.max_life),
)
screen += '\n' + colored(ancient_stats,
settings.TEAM_COLORS[team]) + ' \b\b'
for tower in sorted(towers, key=lambda x: x.position):
tower_template = '{icon} {bar}({life}/{max_life}) Tower'
tower_stats = tower_template.format(
icon=(tower.ICON_BASIC if self.use_basic_icons
else tower.ICON),
bar=make_bar(20, tower.life, tower.max_life),
life=int(tower.life) if tower.alive else 'destroyed!',
max_life=int(tower.max_life),
)
screen += '\n' + colored(tower_stats,
settings.TEAM_COLORS[team]) + ' \b\b'
for hero in sorted(heroes, key=lambda x: x.name):
hero_template = ('{icon} {bar}({life}/{max_life}) Hero: '
'{name}. Lvl {level} {level_bar} ({author})')
hero_stats = hero_template.format(
icon=(hero.ICON_BASIC if self.use_basic_icons
else hero.ICON),
bar=make_bar(20, hero.life, hero.max_life),
name=hero.name,
life=int(hero.life) if hero.alive else 'dead',
max_life=int(hero.max_life),
level=hero.level,
level_bar=make_bar(10, hero.xp % settings.XP_TO_LEVEL,
settings.XP_TO_LEVEL),
author=hero.author,
)
screen += '\n' + colored(hero_stats,
settings.TEAM_COLORS[team]) + ' \b\b'
# print events (of last step) for debugging
if game.debug:
screen += u'\n'
screen += u'\n'.join([colored('{}: {}'.format(thing, event),
settings.TEAM_COLORS[thing.team])
for t, thing, event in game.events
if t == game.world.t])
if game.debug:
system('clear')
GO_TO_TOP = '\033[0;0H'
print(GO_TO_TOP + screen, end='')
|
{
"content_hash": "8f9d76eb3caa2c293efd3c63acb36093",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 78,
"avg_line_length": 37.44117647058823,
"alnum_prop": 0.497643362136685,
"repo_name": "fisadev/tota",
"id": "64eef361c7188a45f88b22c334ea77e00f66b353",
"size": "5092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tota/drawers/terminal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82700"
}
],
"symlink_target": ""
}
|
from djangotoolbox.fields import ListField
from django.db import models
from student.models import Student
from random import random
OPERATION_CHOICES = (
('0',u'addition'),
('1',u'subtraction'),
('2',u'multiplication'),
('3',u'division'),
)
ASK_CHANCE = 0.1
def ask_anyway():
return random < ASK_CHANCE
class Question(models.Model):
first_number = models.IntegerField()
second_number = models.IntegerField()
operation = models.TextField(choices=OPERATION_CHOICES)
def get(self,first,second,op):
return self.objects.get(first_number=first,second_number=second,operation=op)
# def __init__(self,first,second):
# self.first_number = first
# self.second_number = second
def increment_first_number(self):
num = self.first_number % 10 + 1
return self.get(num,self.second_number,self.operation)
def increment_second_number(self):
num = self.second_number % 10 + 1
return self.get(self.first_number,num,self.operation)
def select_question(self,student):
if student.dominated_second_number(self):
question = change_both_numbers(self,student)
else:
question = change_first_number(self,student)
play(question)
def change_first_number(self,student):
question = self
while True:
question = question.increment_first_number()
if not student.dominated(question,operation) or ask_anyway():
return question
def change_both_numbers(self,student):
while True:
question = question.increment_second_number()
if not student.dominated(question,operation) or ask_anyway():
return change_first_number(question,student)
class Answer(models.Model):
student = models.ForeignKey(Student)
question = models.ForeignKey(Question)
answer = models.IntegerField()
answered_correct = models.BooleanField()
datetime = models.DateTimeField()
time_taken = models.TimeField()
def save(self):
super.save()
self.student._answer_list += self.pk
self.student.save()
class AnswerHistory(models.Model):
student = models.ForeignKey(Student)
question = models.ForeignKey(Question)
_answer_list = ListField()
@property
def list(self):
ans_list = []
for ans in self._answer_list:
ans_list += Answer.objects.get(pk=ans)
return ans_list
|
{
"content_hash": "089b4c1cdb1b0c8b8a2fde2ab309f054",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.7371794871794872,
"repo_name": "bernardokyotoku/skillplant",
"id": "d4fc194abf2374357c94f8e72a02a71e9fc67f03",
"size": "2184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "103281"
},
{
"name": "Python",
"bytes": "4219238"
},
{
"name": "Shell",
"bytes": "500"
}
],
"symlink_target": ""
}
|
from typing import List, Tuple
from corehq.extensions import extension_point, ResultFormat
@extension_point(result_format=ResultFormat.FLATTEN)
def uitab_dropdown_items(tab_name, tab, domain, request) -> List[dict]:
"""Add dropdown items to UI Tabs.
Parameters:
:param tab_name: Name of the tab that items will be added to
:param tab: The tab instance
:param domain: The domain of the current request
:param request: The current request
Returns:
A dict with the following keys:
* title
* url (default=None)
* html (default=None)
* is_header (default=False)
* is_divider (default=False)
* data_id (default=None)
"""
@extension_point(result_format=ResultFormat.FLATTEN)
def uitab_sidebar_items(tab_name, tab, domain, request) -> List[Tuple[str, List[dict]]]:
"""Add sidebar items to UI tabs.
Parameters:
:param tab_name: Name of the UI Tab
:param tab: The tab instance
:param domain: The domain name
:param request: The request object
Returns:
A list of tuples: Tuple[header_text, List[dict]]. The dictionaries must have
the following keys:
* title: Link text
* url: relative URL for the UI
* icon: Link icon
* show_in_dropdown (optional): boolean
"""
@extension_point(result_format=ResultFormat.FLATTEN)
def uitab_classes():
"""Add custom tabs to the top navigation
Parameters:
None
Returns:
List of UITab subclasses
"""
|
{
"content_hash": "00f97171fa3a8dfa206376bbeff186da",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 88,
"avg_line_length": 26.93103448275862,
"alnum_prop": 0.6402048655569782,
"repo_name": "dimagi/commcare-hq",
"id": "1f09e308388b9d06d530507ac8151f6d2ec40542",
"size": "1562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/tabs/extension_points.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.views.generic import TemplateView
from .sitemaps import StaticViewSitemap
sitemaps = {
'static': StaticViewSitemap
}
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^robots\.txt', TemplateView.as_view(template_name='robots.txt')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps}, name='sitemap'),
url(r'$', TemplateView.as_view(template_name='index.html'), name='index'),
)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += patterns('', url(r'^__debug__/', include(debug_toolbar.urls)))
urlpatterns += patterns('', url(r'^silk/', include('silk.urls', namespace='silk')))
|
{
"content_hash": "43c63ebbc0d6e343e110984faea66870",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 41.11538461538461,
"alnum_prop": 0.7287184284377923,
"repo_name": "Korkki/django-base-template",
"id": "8d1faaebf3195f99165f849d7a0e3bdeda9c82cc",
"size": "1069",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project_name/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59449"
},
{
"name": "HTML",
"bytes": "9211"
},
{
"name": "Python",
"bytes": "8338"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import select
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
def run_command(cmd, cwd=None):
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, cwd=cwd)
left_over = b''
while p.poll() is None:
incomming = left_over
rlist, wlist, xlist = select.select([p.stdout], [], [])
if rlist:
incomming += os.read(p.stdout.fileno(), 1024)
lines = incomming.splitlines(True) # keepends=True
if not lines:
continue
if lines[-1].endswith('\n'):
data = b''.join(lines)
left_over = b''
else:
data = b''.join(lines[-1])
left_over = lines[-1]
try:
yield data.decode()
except UnicodeDecodeError as exc:
yield unicode(data, errors='ignore')
# Done
yield p.returncode
|
{
"content_hash": "1184bf8994a80c8a389bf8ba8ce68c88",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 67,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.5421319796954315,
"repo_name": "davetcoleman/catkin_tools",
"id": "3d02de124b1dc0419959d5e5ff42f1b6e368e6ab",
"size": "1587",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "catkin_tools/runner/run_windows.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "679"
},
{
"name": "Objective-C",
"bytes": "3354"
},
{
"name": "Python",
"bytes": "275993"
}
],
"symlink_target": ""
}
|
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import api
from horizon import tables
LOG = logging.getLogger(__name__)
class DeleteKeyPairs(tables.DeleteAction):
data_type_singular = _("Keypair")
data_type_plural = _("Keypairs")
def delete(self, request, obj_id):
api.nova.keypair_delete(request, obj_id)
class ImportKeyPair(tables.LinkAction):
name = "import"
verbose_name = _("Import Keypair")
url = "horizon:nova:access_and_security:keypairs:import"
classes = ("ajax-modal", "btn-upload")
class CreateKeyPair(tables.LinkAction):
name = "create"
verbose_name = _("Create Keypair")
url = "horizon:nova:access_and_security:keypairs:create"
classes = ("ajax-modal", "btn-create")
class KeypairsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Keypair Name"))
fingerprint = tables.Column("fingerprint", verbose_name=_("Fingerprint"))
def get_object_id(self, keypair):
return keypair.name
class Meta:
name = "keypairs"
verbose_name = _("Keypairs")
table_actions = (CreateKeyPair, ImportKeyPair, DeleteKeyPairs,)
row_actions = (DeleteKeyPairs,)
|
{
"content_hash": "214c2822d328cb3de19c64ab12d00de0",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 27.155555555555555,
"alnum_prop": 0.6783960720130933,
"repo_name": "gyang/horizon",
"id": "8a9345c32624dda3b66427821b92364dc1a1e585",
"size": "1872",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "horizon/dashboards/nova/access_and_security/keypairs/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.