text
stringlengths 4
1.02M
| meta
dict |
|---|---|
tickets = []
# allocate new tickets
def make_tickets():
for i in range(10):
tickets.append("ticket_" + str(i))
# log all tickets currently in the system to terminal
def show_all_tickets():
term_light_blue = "\033[0;94m"
term_reset = "\033[0;0m"
for ticket in tickets:
print term_light_blue + ticket + term_reset
if '__main__' == __name__:
make_tickets()
show_all_tickets()
|
{
"content_hash": "b952d9726a9bd7a318b55495331f147c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 53,
"avg_line_length": 17.583333333333332,
"alnum_prop": 0.6066350710900474,
"repo_name": "kouritron/uvs",
"id": "3e2c7cc836d70e4bfa54ab06f5a6df205306b2a9",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/break_3way_merge/tickets_prettified.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "275407"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import gym
import logging
import numpy as np
import re
from typing import Callable, Dict, List, Optional, Tuple, Type
from ray.util.debug import log_once
from ray.rllib.models.tf.tf_action_dist import TFActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_ops import get_placeholder
from ray.rllib.utils.tracking_dict import UsageTrackingDict
from ray.rllib.utils.typing import ModelGradients, TensorType, \
TrainerConfigDict
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
class DynamicTFPolicy(TFPolicy):
"""A TFPolicy that auto-defines placeholders dynamically at runtime.
Do not sub-class this class directly (neither should you sub-class
TFPolicy), but rather use rllib.policy.tf_policy_template.build_tf_policy
to generate your custom tf (graph-mode or eager) Policy classes.
Initialization of this class occurs in two phases.
* Phase 1: the model is created and model variables are initialized.
* Phase 2: a fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Initialization defines the static graph.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): TF model instance
dist_class (type): TF action distribution class
"""
@DeveloperAPI
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
loss_fn: Callable[[
Policy, ModelV2, Type[TFActionDistribution], SampleBatch
], TensorType],
*,
stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[
str, TensorType]]] = None,
grad_stats_fn: Optional[Callable[[
Policy, SampleBatch, ModelGradients
], Dict[str, TensorType]]] = None,
before_loss_init: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], None]] = None,
make_model: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], ModelV2]] = None,
action_sampler_fn: Optional[Callable[[
TensorType, List[TensorType]
], Tuple[TensorType, TensorType]]] = None,
action_distribution_fn: Optional[Callable[[
Policy, ModelV2, TensorType, TensorType, TensorType
], Tuple[TensorType, type, List[TensorType]]]] = None,
existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
existing_model: Optional[ModelV2] = None,
view_requirements_fn: Optional[Callable[[Policy], Dict[
str, ViewRequirement]]] = None,
get_batch_divisibility_req: Optional[Callable[[Policy],
int]] = None,
obs_include_prev_action_reward: bool = True):
"""Initialize a dynamic TF policy.
Args:
observation_space (gym.spaces.Space): Observation space of the
policy.
action_space (gym.spaces.Space): Action space of the policy.
config (TrainerConfigDict): Policy-specific configuration data.
loss_fn (Callable[[Policy, ModelV2, Type[TFActionDistribution],
SampleBatch], TensorType]): Function that returns a loss tensor
for the policy graph.
stats_fn (Optional[Callable[[Policy, SampleBatch],
Dict[str, TensorType]]]): Optional function that returns a dict
of TF fetches given the policy and batch input tensors.
grad_stats_fn (Optional[Callable[[Policy, SampleBatch,
ModelGradients], Dict[str, TensorType]]]):
Optional function that returns a dict of TF fetches given the
policy, sample batch, and loss gradient tensors.
before_loss_init (Optional[Callable[
[Policy, gym.spaces.Space, gym.spaces.Space,
TrainerConfigDict], None]]): Optional function to run prior to
loss init that takes the same arguments as __init__.
make_model (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional
function that returns a ModelV2 object given
policy, obs_space, action_space, and policy config.
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (Optional[Callable[[Policy, ModelV2, Dict[
str, TensorType], TensorType, TensorType], Tuple[TensorType,
TensorType]]]): A callable returning a sampled action and its
log-likelihood given Policy, ModelV2, input_dict, explore,
timestep, and is_training.
action_distribution_fn (Optional[Callable[[Policy, ModelV2,
Dict[str, TensorType], TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]]]]): A callable
returning distribution inputs (parameters), a dist-class to
generate an action distribution object from, and
internal-state outputs (or an empty list if not applicable).
Note: No Exploration hooks have to be called from within
`action_distribution_fn`. It's should only perform a simple
forward pass through some model.
If None, pass inputs through `self.model()` to get distribution
inputs.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
existing_inputs (Optional[Dict[str, tf1.placeholder]]): When
copying a policy, this specifies an existing dict of
placeholders to use instead of defining new ones.
existing_model (Optional[ModelV2]): When copying a policy, this
specifies an existing model to clone and share weights with.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]):
Optional callable that returns the divisibility requirement for
sample batches. If None, will assume a value of 1.
obs_include_prev_action_reward (bool): Whether to include the
previous action and reward in the model input (default: True).
"""
self.observation_space = obs_space
self.action_space = action_space
self.config = config
self.framework = "tf"
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
dist_class = dist_inputs = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given")
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
# Setup self.model.
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework="tf")
# Auto-update model's inference view requirements, if recurrent.
self._update_model_inference_view_requirements_from_init_state()
if existing_inputs:
self._state_inputs = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
if self._state_inputs:
self._seq_lens = existing_inputs["seq_lens"]
else:
if self.config["_use_trajectory_view_api"]:
self._state_inputs = [
tf1.placeholder(
shape=(None, ) + vr.space.shape, dtype=vr.space.dtype)
for k, vr in
self.model.inference_view_requirements.items()
if k[:9] == "state_in_"
]
else:
self._state_inputs = [
tf1.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
# Use default settings.
# Add NEXT_OBS, STATE_IN_0.., and others.
self.view_requirements = self._get_default_view_requirements()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.inference_view_requirements)
# Setup standard placeholders.
if existing_inputs is not None:
timestep = existing_inputs["timestep"]
explore = existing_inputs["is_exploring"]
self._input_dict, self._dummy_batch = \
self._get_input_dict_and_dummy_batch(
self.view_requirements, existing_inputs)
else:
action_ph = ModelCatalog.get_action_placeholder(action_space)
prev_action_ph = ModelCatalog.get_action_placeholder(
action_space, "prev_action")
if self.config["_use_trajectory_view_api"]:
self._input_dict, self._dummy_batch = \
self._get_input_dict_and_dummy_batch(
self.view_requirements,
{SampleBatch.ACTIONS: action_ph,
SampleBatch.PREV_ACTIONS: prev_action_ph})
else:
self._input_dict = {
SampleBatch.CUR_OBS: tf1.placeholder(
tf.float32,
shape=[None] + list(obs_space.shape),
name="observation")
}
self._input_dict[SampleBatch.ACTIONS] = action_ph
if self._obs_include_prev_action_reward:
self._input_dict.update({
SampleBatch.PREV_ACTIONS: prev_action_ph,
SampleBatch.PREV_REWARDS: tf1.placeholder(
tf.float32, [None], name="prev_reward"),
})
# Placeholder for (sampling steps) timestep (int).
timestep = tf1.placeholder(tf.int64, (), name="timestep")
# Placeholder for `is_exploring` flag.
explore = tf1.placeholder_with_default(
True, (), name="is_exploring")
# Placeholder for RNN time-chunk valid lengths.
self._seq_lens = tf1.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Placeholder for `is_training` flag.
self._input_dict["is_training"] = self._get_is_training_placeholder()
# Create the Exploration object to use for this Policy.
self.exploration = self._create_exploration()
# Fully customized action generation (e.g., custom policy).
if action_sampler_fn:
sampled_action, sampled_action_logp = action_sampler_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"])
else:
# Distribution generation is customized, e.g., DQN, DDPG.
if action_distribution_fn:
dist_inputs, dist_class, self._state_out = \
action_distribution_fn(
self, self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"])
# Default distribution generation behavior:
# Pass through model. E.g., PG, PPO.
else:
dist_inputs, self._state_out = self.model(
self._input_dict, self._state_inputs, self._seq_lens)
action_dist = dist_class(dist_inputs, self.model)
# Using exploration to get final action (e.g. via sampling).
sampled_action, sampled_action_logp = \
self.exploration.get_exploration_action(
action_distribution=action_dist,
timestep=timestep,
explore=explore)
if self.config["_use_trajectory_view_api"]:
self._dummy_batch[SampleBatch.ACTION_DIST_INPUTS] = \
np.zeros(
[1 if not s else s for s in
dist_inputs.shape.as_list()])
self._input_dict[SampleBatch.ACTION_DIST_INPUTS] = \
tf1.placeholder(shape=dist_inputs.shape.as_list(),
dtype=tf.float32)
# Phase 1 init.
sess = tf1.get_default_session() or tf1.Session()
batch_divisibility_req = get_batch_divisibility_req(self) if \
callable(get_batch_divisibility_req) else \
(get_batch_divisibility_req or 1)
super().__init__(
observation_space=obs_space,
action_space=action_space,
config=config,
sess=sess,
obs_input=self._input_dict[SampleBatch.OBS],
action_input=self._input_dict[SampleBatch.ACTIONS],
sampled_action=sampled_action,
sampled_action_logp=sampled_action_logp,
dist_inputs=dist_inputs,
dist_class=dist_class,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_inputs,
state_outputs=self._state_out,
prev_action_input=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_input=self._input_dict.get(SampleBatch.PREV_REWARDS),
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req,
explore=explore,
timestep=timestep)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
# Loss initialization and model/postprocessing test calls.
if not existing_inputs:
self._initialize_loss_from_dummy_batch(
auto_remove_unneeded_view_reqs=True)
@override(TFPolicy)
@DeveloperAPI
def copy(self,
existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> TFPolicy:
"""Creates a copy of self using existing input placeholders."""
# Note that there might be RNN state inputs at the end of the list
if len(self._loss_input_dict) != len(existing_inputs):
raise ValueError("Tensor list mismatch", self._loss_input_dict,
self._state_inputs, existing_inputs)
for i, (k, v) in enumerate(self._loss_input_dict_no_rnn.items()):
if v.shape.as_list() != existing_inputs[i].shape.as_list():
raise ValueError("Tensor shape mismatch", i, k, v.shape,
existing_inputs[i].shape)
# By convention, the loss inputs are followed by state inputs and then
# the seq len tensor
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(
("state_in_{}".format(i),
existing_inputs[len(self._loss_input_dict_no_rnn) + i]))
if rnn_inputs:
rnn_inputs.append(("seq_lens", existing_inputs[-1]))
input_dict = OrderedDict(
[("is_exploring", self._is_exploring), ("timestep",
self._timestep)] +
[(k, existing_inputs[i])
for i, k in enumerate(self._loss_input_dict_no_rnn.keys())] +
rnn_inputs)
instance = self.__class__(
self.observation_space,
self.action_space,
self.config,
existing_inputs=input_dict,
existing_model=self.model)
instance._loss_input_dict = input_dict
loss = instance._do_loss_init(input_dict)
loss_inputs = [
(k, existing_inputs[i])
for i, k in enumerate(self._loss_input_dict_no_rnn.keys())
]
TFPolicy._initialize_loss(instance, loss, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(
instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
# TODO: (sven) deprecate once _use_trajectory_view_api is always True.
@override(Policy)
@DeveloperAPI
def get_initial_state(self) -> List[TensorType]:
if self.model:
return self.model.get_initial_state()
else:
return []
def _get_input_dict_and_dummy_batch(self, view_requirements,
existing_inputs):
"""Creates input_dict and dummy_batch for loss initialization.
Used for managing the Policy's input placeholders and for loss
initialization.
Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays.
Args:
view_requirements (ViewReqs): The view requirements dict.
existing_inputs (Dict[str, tf.placeholder]): A dict of already
existing placeholders.
Returns:
Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The
input_dict/dummy_batch tuple.
"""
input_dict = {}
dummy_batch = {}
for view_col, view_req in view_requirements.items():
# Point state_in to the already existing self._state_inputs.
mo = re.match("state_in_(\d+)", view_col)
if mo is not None:
input_dict[view_col] = self._state_inputs[int(mo.group(1))]
dummy_batch[view_col] = np.zeros_like(
[view_req.space.sample()])
# State-outs (no placeholders needed).
elif view_col.startswith("state_out_"):
dummy_batch[view_col] = np.zeros_like(
[view_req.space.sample()])
# Skip action dist inputs placeholder (do later).
elif view_col == SampleBatch.ACTION_DIST_INPUTS:
continue
elif view_col in existing_inputs:
input_dict[view_col] = existing_inputs[view_col]
dummy_batch[view_col] = np.zeros(
shape=[
1 if s is None else s
for s in existing_inputs[view_col].shape.as_list()
],
dtype=existing_inputs[view_col].dtype.as_numpy_dtype)
# All others.
else:
if view_req.used_for_training:
input_dict[view_col] = get_placeholder(
space=view_req.space, name=view_col)
dummy_batch[view_col] = np.zeros_like(
[view_req.space.sample()])
return input_dict, dummy_batch
def _initialize_loss_from_dummy_batch(
self, auto_remove_unneeded_view_reqs: bool = True,
stats_fn=None) -> None:
# Test calls depend on variable init, so initialize model first.
self._sess.run(tf1.global_variables_initializer())
if self.config["_use_trajectory_view_api"]:
logger.info("Testing `compute_actions` w/ dummy batch.")
actions, state_outs, extra_fetches = \
self.compute_actions_from_input_dict(
self._dummy_batch, explore=False, timestep=0)
for key, value in extra_fetches.items():
self._dummy_batch[key] = np.zeros_like(value)
self._input_dict[key] = get_placeholder(value=value, name=key)
if key not in self.view_requirements:
logger.info("Adding extra-action-fetch `{}` to "
"view-reqs.".format(key))
self.view_requirements[key] = \
ViewRequirement(space=gym.spaces.Box(
-1.0, 1.0, shape=value.shape[1:],
dtype=value.dtype))
dummy_batch = self._dummy_batch
else:
def fake_array(tensor):
shape = tensor.shape.as_list()
shape = [s if s is not None else 1 for s in shape]
return np.zeros(shape, dtype=tensor.dtype.as_numpy_dtype)
dummy_batch = {
SampleBatch.CUR_OBS: fake_array(self._obs_input),
SampleBatch.NEXT_OBS: fake_array(self._obs_input),
SampleBatch.DONES: np.array([False], dtype=np.bool),
SampleBatch.ACTIONS: fake_array(
ModelCatalog.get_action_placeholder(self.action_space)),
SampleBatch.REWARDS: np.array([0], dtype=np.float32),
}
if self._obs_include_prev_action_reward:
dummy_batch.update({
SampleBatch.PREV_ACTIONS: fake_array(
self._prev_action_input),
SampleBatch.PREV_REWARDS: fake_array(
self._prev_reward_input),
})
state_init = self.get_initial_state()
state_batches = []
for i, h in enumerate(state_init):
dummy_batch["state_in_{}".format(i)] = np.expand_dims(h, 0)
dummy_batch["state_out_{}".format(i)] = np.expand_dims(h, 0)
state_batches.append(np.expand_dims(h, 0))
if state_init:
dummy_batch["seq_lens"] = np.array([1], dtype=np.int32)
for k, v in self.extra_compute_action_fetches().items():
dummy_batch[k] = fake_array(v)
sb = SampleBatch(dummy_batch)
batch_for_postproc = UsageTrackingDict(sb)
batch_for_postproc.count = sb.count
logger.info("Testing `postprocess_trajectory` w/ dummy batch.")
postprocessed_batch = self.postprocess_trajectory(batch_for_postproc)
# Add new columns automatically to (loss) input_dict.
if self.config["_use_trajectory_view_api"]:
for key in batch_for_postproc.added_keys:
if key not in self._input_dict:
self._input_dict[key] = get_placeholder(
value=batch_for_postproc[key], name=key)
if key not in self.view_requirements:
self.view_requirements[key] = \
ViewRequirement(space=gym.spaces.Box(
-1.0, 1.0, shape=batch_for_postproc[key].shape[1:],
dtype=batch_for_postproc[key].dtype))
if not self.config["_use_trajectory_view_api"]:
train_batch = UsageTrackingDict(
dict({
SampleBatch.CUR_OBS: self._obs_input,
}, **self._loss_input_dict))
if self._obs_include_prev_action_reward:
train_batch.update({
SampleBatch.PREV_ACTIONS: self._prev_action_input,
SampleBatch.PREV_REWARDS: self._prev_reward_input,
})
for k, v in postprocessed_batch.items():
if k in train_batch:
continue
elif v.dtype == np.object:
continue # can't handle arbitrary objects in TF
elif k == "seq_lens" or k.startswith("state_in_"):
continue
shape = (None, ) + v.shape[1:]
dtype = np.float32 if v.dtype == np.float64 else v.dtype
placeholder = tf1.placeholder(dtype, shape=shape, name=k)
train_batch[k] = placeholder
for i, si in enumerate(self._state_inputs):
train_batch["state_in_{}".format(i)] = si
else:
train_batch = UsageTrackingDict(
dict(self._input_dict, **self._loss_input_dict))
if self._state_inputs:
train_batch["seq_lens"] = self._seq_lens
if log_once("loss_init"):
logger.debug(
"Initializing loss function with dummy input:\n\n{}\n".format(
summarize(train_batch)))
self._loss_input_dict.update({k: v for k, v in train_batch.items()})
loss = self._do_loss_init(train_batch)
all_accessed_keys = \
train_batch.accessed_keys | batch_for_postproc.accessed_keys | \
batch_for_postproc.added_keys | set(
self.model.inference_view_requirements.keys())
TFPolicy._initialize_loss(self, loss, [(k, v)
for k, v in train_batch.items()
if k in all_accessed_keys])
if "is_training" in self._loss_input_dict:
del self._loss_input_dict["is_training"]
# Call the grads stats fn.
# TODO: (sven) rename to simply stats_fn to match eager and torch.
if self._grad_stats_fn:
self._stats_fetches.update(
self._grad_stats_fn(self, train_batch, self._grads))
# Add new columns automatically to view-reqs.
if self.config["_use_trajectory_view_api"] and \
auto_remove_unneeded_view_reqs:
# Add those needed for postprocessing and training.
all_accessed_keys = train_batch.accessed_keys | \
batch_for_postproc.accessed_keys
# Tag those only needed for post-processing.
for key in batch_for_postproc.accessed_keys:
if key not in train_batch.accessed_keys:
self.view_requirements[key].used_for_training = False
if key in self._loss_input_dict:
del self._loss_input_dict[key]
# Remove those not needed at all (leave those that are needed
# by Sampler to properly execute sample collection).
# Also always leave DONES and REWARDS, no matter what.
for key in list(self.view_requirements.keys()):
if key not in all_accessed_keys and key not in [
SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX,
SampleBatch.UNROLL_ID, SampleBatch.DONES,
SampleBatch.REWARDS] and \
key not in self.model.inference_view_requirements:
# If user deleted this key manually in postprocessing
# fn, warn about it and do not remove from
# view-requirements.
if key in batch_for_postproc.deleted_keys:
logger.warning(
"SampleBatch key '{}' was deleted manually in "
"postprocessing function! RLlib will "
"automatically remove non-used items from the "
"data stream. Remove the `del` from your "
"postprocessing function.".format(key))
else:
del self.view_requirements[key]
if key in self._loss_input_dict:
del self._loss_input_dict[key]
# Add those data_cols (again) that are missing and have
# dependencies by view_cols.
for key in list(self.view_requirements.keys()):
vr = self.view_requirements[key]
if (vr.data_col is not None
and vr.data_col not in self.view_requirements):
used_for_training = \
vr.data_col in train_batch.accessed_keys
self.view_requirements[vr.data_col] = ViewRequirement(
space=vr.space, used_for_training=used_for_training)
self._loss_input_dict_no_rnn = {
k: v
for k, v in self._loss_input_dict.items()
if (v not in self._state_inputs and v != self._seq_lens)
}
# Initialize again after loss init.
self._sess.run(tf1.global_variables_initializer())
def _do_loss_init(self, train_batch: SampleBatch):
loss = self._loss_fn(self, self.model, self.dist_class, train_batch)
if self._stats_fn:
self._stats_fetches.update(self._stats_fn(self, train_batch))
# override the update ops to be those of the model
self._update_ops = self.model.update_ops()
return loss
|
{
"content_hash": "a7b7b003baea6bcdee3ac026b3d3d4d3",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 79,
"avg_line_length": 47.44565217391305,
"alnum_prop": 0.5585337915234823,
"repo_name": "richardliaw/ray",
"id": "900d6cb570e39ec1cba4995bbf7c5f3fafabee7c",
"size": "30555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/policy/dynamic_tf_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inout', '0007_auto_20170102_0146'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'verbose_name_plural': 'Entries'},
),
migrations.AlterField(
model_name='entry',
name='action_date',
field=models.DateTimeField(auto_now_add=True),
),
]
|
{
"content_hash": "25281ca81178203ed664ad1ef794d8a1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 58,
"avg_line_length": 23.59090909090909,
"alnum_prop": 0.5741811175337187,
"repo_name": "detialiaj/pro",
"id": "936a136295e7df86f4b59d60b4274bb56aafd816",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SYS/inout/migrations/0008_auto_20170102_1648.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "394"
},
{
"name": "HTML",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "36038"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
@IN.hook
def access_keys():
entity_bundle = IN.entitier.entity_bundle
keys = {}
group = 'Entity Admin'
keys[group] = OrderedDict() # we may need order
keys_entity_type = keys[group]
# administer all entities
keys_entity_type['admin_all_entity'] = {
'title' : s('(Administer) Allow nabar to do ANY ACTION on all site Entities'),
'flag' : 'danger',
}
for entity_type, bundles in entity_bundle.items():
#entity_type = entity_type.lower() # lower case
group = 'Entity_' + entity_type
keys[group] = OrderedDict() # keep order
keys_entity_type = keys[group]
for entity_bundle in bundles:
bundle_of_entity = s('{entity_bundle} of type {entity_type}', {
'entity_type' : entity_type,
'entity_bundle' : entity_bundle
})
#entity_bundle = entity_bundle.lower()
# add / create
prefix = '_'.join(('add', entity_type, entity_bundle))
keys_entity_type[prefix] = {
'title' : s('Allow nabar to add/create ') + bundle_of_entity
}
# view
prefix = '_'.join(('view', entity_type, entity_bundle))
keys_entity_type[prefix + '_own'] = {
'title' : s('Allow nabar to view own ') + bundle_of_entity
}
keys_entity_type[prefix + '_others'] = {
'title' : s('Allow nabar to view other\'s ') + bundle_of_entity
}
keys_entity_type[prefix + '_own_unpublished'] = {
'title' : s('Allow nabar to view own unpublished ') + bundle_of_entity
}
keys_entity_type[prefix + '_others_unpublished'] = {
'title' : s('Allow nabar to view other\'s unpublished ') + bundle_of_entity
}
# edit / update
prefix = '_'.join(('edit', entity_type, entity_bundle))
keys_entity_type[prefix + '_own'] = {
'title' : s('Allow nabar to edit own ') + bundle_of_entity
}
keys_entity_type[prefix + '_others'] = {
'title' : s('Allow nabar to edit other\'s ') + bundle_of_entity,
'flag' : 'warning',
}
keys_entity_type[prefix + '_own_unpublished'] = {
'title' : s('Allow nabar to edit own unpublished ') + bundle_of_entity
}
keys_entity_type[prefix + '_others_unpublished'] = {
'title' : s('Allow nabar to edit other\'s unpublished ') + bundle_of_entity,
'flag' : 'warning',
}
# delete
prefix = '_'.join(('delete', entity_type, entity_bundle))
keys_entity_type[prefix + '_own'] = {
'title' : s('Allow nabar to delete own ') + bundle_of_entity
}
keys_entity_type[prefix + '_others'] = {
'title' : s('Allow nabar to delete other\'s ') + bundle_of_entity,
'flag' : 'danger',
}
keys_entity_type[prefix + '_own_unpublished'] = {
'title' : s('Allow nabar to delete own unpublished ') + bundle_of_entity
}
keys_entity_type[prefix + '_others_unpublished'] = {
'title' : s('Allow nabar to delete other\'s unpublished ') + bundle_of_entity,
'flag' : 'danger',
}
# admin
prefix = '_'.join(('admin', entity_type, entity_bundle))
keys_entity_type[prefix] = {
'title' : s('(Administer) Allow nabar to do ANY ACTION on all ') + bundle_of_entity,
'flag' : 'danger',
}
# administer entity_type
keys_entity_type['admin_' + entity_type] = {
'title' : s('(Administer) Allow nabar to do ANY ACTION on all type of ') + entity_type,
'flag' : 'danger',
}
return keys
|
{
"content_hash": "3988bbfcf7f0d900b545d152f4ffb49c",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 90,
"avg_line_length": 31.682692307692307,
"alnum_prop": 0.6127465857359636,
"repo_name": "vinoth3v/In",
"id": "4be5acf38e6f87d7ef59ffa81418adf4510fe07f",
"size": "3295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "In/entity/access.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "33032"
},
{
"name": "Python",
"bytes": "779047"
}
],
"symlink_target": ""
}
|
from unittest import TestCase, expectedFailure
from autodepgraph import visualization as vis
import autodepgraph as adg
import networkx as nx
from autodepgraph.graph import AutoDepGraph_DAG
import yaml
import os
test_dir = os.path.join(adg.__path__[0], 'tests', 'test_data')
class Test_Graph(TestCase):
@classmethod
def setUpClass(self):
cal_True_delayed = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_True_delayed')
test_graph = AutoDepGraph_DAG('test graph')
for node in ['A', 'B', 'C', 'D', 'E']:
test_graph.add_node(node, calibrate_function=cal_True_delayed)
test_graph.add_edge('C', 'A')
test_graph.add_edge('C', 'B')
test_graph.add_edge('B', 'A')
test_graph.add_edge('D', 'A')
test_graph.add_edge('E', 'D')
self.test_graph = test_graph
def test_default_not_implemented_cal(self):
test_graph = AutoDepGraph_DAG('test graph')
test_graph.add_node('A')
self.assertEqual(test_graph.nodes()['A']['state'], 'unknown')
with self.assertRaises(ValueError):
test_graph.maintain_node('A')
self.assertEqual(test_graph.nodes()['A']['state'], 'bad')
with self.assertRaises(ValueError):
test_graph.maintain_A()
def test_tolerance_check(self):
# The default check returns 1.0
self.test_graph.nodes['A']['tolerance'] = 0
self.assertEqual(self.test_graph.check_node('A'), 'needs calibration')
self.test_graph.nodes['A']['tolerance'] = 2
self.assertEqual(self.test_graph.check_node('A'), 'good')
self.test_graph.nodes['A']['tolerance'] = 0
self.assertEqual(self.test_graph.check_node('A'), 'needs calibration')
def test_maintain_node_assume_unkown_is_good(self):
self.test_graph.set_all_node_states(
'unknown')
self.test_graph.maintain_node('C')
self.assertEqual(self.test_graph.nodes()['C']['state'], 'good')
self.assertEqual(self.test_graph.nodes()['B']['state'], 'unknown')
def test_calibration_state(self):
s = self.test_graph.calibration_state()
assert( isinstance(s, dict))
def test_set_function(self):
self.test_graph.set_node_attribute('A', 'myattribute', 10)
self.assertEqual(self.test_graph.get_node_attribute('A', 'myattribute'), 10)
self.test_graph.set_node_description('A', 'explain node A')
self.assertEqual(self.test_graph.get_node_attribute('A', 'description'), 'explain node A')
def test_maintain_node_require_cal(self):
self.test_graph.set_all_node_states(
'needs calibration')
self.test_graph.maintain_node('C')
self.assertEqual(self.test_graph.nodes()['C']['state'], 'good')
self.assertEqual(self.test_graph.nodes()['B']['state'], 'good')
self.assertEqual(self.test_graph.nodes()['D']['state'],
'needs calibration')
def test_bad_node(self):
cal_True_delayed = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_True_delayed')
test_graph = AutoDepGraph_DAG('test graph')
for node in ['A', 'B', 'C', 'D', 'E']:
test_graph.add_node(node, calibrate_function=cal_True_delayed)
test_graph.add_edge('C', 'A')
test_graph.add_edge('C', 'B')
test_graph.add_edge('B', 'A')
test_graph.add_edge('D', 'A')
test_graph.add_edge('E', 'D')
test_graph.set_all_node_states('unknown')
self.assertEqual(test_graph.nodes()['C']['state'], 'unknown')
self.assertEqual(test_graph.nodes()['B']['state'], 'unknown')
self.assertEqual(test_graph.nodes()['A']['state'], 'unknown')
cal_False = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_False')
test_graph.nodes['C']['calibrate_function'] = cal_False
# Failure to calibrate should raise an error
with self.assertRaises(ValueError):
test_graph.maintain_node('C')
# In the process of trying to fix node C it should try to
# calibrate it's requirements
self.assertEqual(test_graph.nodes()['C']['state'], 'bad')
self.assertEqual(test_graph.nodes()['B']['state'], 'good')
self.assertEqual(test_graph.nodes()['A']['state'], 'good')
cal_True_delayed = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_True_delayed')
def test_plotting_mpl(self):
self.test_graph.draw_mpl()
self.test_graph.cfg_plot_mode = 'matplotlib'
self.test_graph.update_monitor()
# call twice to have both creation and update of plot
self.test_graph.update_monitor()
def test_plotting_svg(self):
self.test_graph.draw_svg()
self.test_graph.cfg_plot_mode = 'svg'
self.test_graph.update_monitor()
# call twice to have both creation and update of plot
self.test_graph.update_monitor()
def test_dummy_cal_three_qubit_graph(self):
fn = os.path.join(test_dir, 'three_qubit_graph.yaml')
DAG = nx.readwrite.read_yaml(fn)
DAG.set_all_node_states('needs calibration')
DAG.cfg_plot_mode = None
DAG.maintain_node('Chevron q0-q1')
self.assertEqual(DAG.get_node_state('Chevron q0-q1'), 'good')
self.assertEqual(DAG.get_node_state('CZ q0-q1'), 'needs calibration')
def test_write_read_yaml(self):
"""
Mostly an example on how to read and write, but also test for
weird objects being present.
"""
self.test_graph.nodes()['C']['state'] = 'good'
self.test_graph.nodes()['B']['state'] = 'unknown'
fn = os.path.join(test_dir, 'nx_test_graph.yaml')
nx.readwrite.write_yaml(self.test_graph, fn)
read_testgraph = nx.readwrite.read_yaml(fn)
self.assertTrue(isinstance(read_testgraph, AutoDepGraph_DAG))
self.assertEqual(read_testgraph.nodes()['C']['state'], 'good')
self.assertEqual(read_testgraph.nodes()['B']['state'], 'unknown')
def test_adding_edge_nonexistent_node(self):
test_graph = AutoDepGraph_DAG('test graph')
test_graph.add_node('A')
with self.assertRaises(KeyError):
test_graph.add_edge('A', 'B')
with self.assertRaises(KeyError):
test_graph.add_edge('B', 'A')
# def propagate_error(self, state):
# '''
# Sets the state of this node to 'state' and calls this method for all
# child nodes (nodes that depend on this node). Used for recursively
# propagate errors.
# '''
# self.state(state)
# for child_name in self.children():
# # This will result in a depth-first search through the graph
# # that is quite inefficient and can visit many nodes multiple
# # times. We don't really care though, since the graph shouldn't
# # larger than ~100 nodes.
# self.find_instrument(child_name).propagate_error(state)
|
{
"content_hash": "28cb12e9e70a8abb54a296cc164ccb80",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 98,
"avg_line_length": 41.627906976744185,
"alnum_prop": 0.6093575418994414,
"repo_name": "AdriaanRol/AutoDepGraph",
"id": "0b766784c030f24fdf9818f39126d2f04af79ff7",
"size": "7160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autodepgraph/tests/test_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "184951"
},
{
"name": "HTML",
"bytes": "6553"
},
{
"name": "Python",
"bytes": "37190"
},
{
"name": "SWIG",
"bytes": "8371"
}
],
"symlink_target": ""
}
|
""" Wrapper around the dynamo3 RateLimit class """
from future.utils import python_2_unicode_compatible, iteritems, itervalues
from dynamo3 import RateLimit
@python_2_unicode_compatible
class TableLimits(object):
""" Wrapper around :class:`dynamo3.RateLimit` """
def __init__(self):
self.total = {}
self.default = {}
self.indexes = {}
self.tables = {}
def _compute_limit(self, limit, throughput):
""" Compute a percentage limit or return a point limit """
if limit[-1] == '%':
return throughput * float(limit[:-1]) / 100.
else:
return float(limit)
def get_limiter(self, table_descriptions):
""" Construct a RateLimit object from the throttle declarations """
table_caps = {}
for table in table_descriptions:
limit = self.tables.get(table.name) or self.default
# Add the table limit
if limit:
table_caps[table.name] = {
'read': self._compute_limit(limit['read'],
table.read_throughput),
'write': self._compute_limit(limit['write'],
table.write_throughput),
}
if table.name not in self.indexes:
continue
# Add the global index limits
for index in itervalues(table.global_indexes):
limit = (self.indexes[table.name].get(index.name) or
self.default)
if limit:
cap = table_caps.setdefault(table.name, {})
cap[index.name] = {
'read': self._compute_limit(limit['read'],
index.read_throughput),
'write': self._compute_limit(limit['write'],
index.write_throughput),
}
kwargs = {
'table_caps': table_caps,
}
if self.total:
kwargs['total_read'] = float(self.total['read'])
kwargs['total_write'] = float(self.total['write'])
return RateLimit(**kwargs)
def __nonzero__(self):
return (bool(self.tables) or bool(self.indexes) or
bool(self.default) or bool(self.total))
def _set_limit(self, data, key, read, write):
""" Set a limit or delete if non provided """
if read != '0' or write != '0':
data[key] = {
'read': read,
'write': write,
}
elif key in data:
del data[key]
def set_default_limit(self, read='0', write='0'):
""" Set the default table/index limit """
if read == '0' and write == '0':
self.default = {}
return
self.default = {
'read': read,
'write': write,
}
def set_total_limit(self, read='0', write='0'):
""" Set the total throughput limit """
if read == '0' and write == '0':
self.total = {}
return
if not read.isdigit() or not write.isdigit():
raise ValueError("Total read/write limits must be a point value")
self.total = {
'read': read,
'write': write,
}
def set_table_limit(self, tablename, read='0', write='0'):
""" Set the limit on a table """
self._set_limit(self.tables, tablename, read, write)
def set_index_limit(self, tablename, indexname, read='0', write='0'):
""" Set the limit on a global index """
index_data = self.indexes.setdefault(tablename, {})
self._set_limit(index_data, indexname, read, write)
if not index_data:
del self.indexes[tablename]
def load(self, data):
""" Load the configuration from a save() dict """
self.total = data.get('total', {})
self.default = data.get('default', {})
self.tables = {}
self.indexes = {}
for tablename, limit in iteritems(data.get('tables', {})):
self.set_table_limit(tablename, **limit)
for tablename, index_data in iteritems(data.get('indexes', {})):
for indexname, limit in iteritems(index_data):
self.set_index_limit(tablename, indexname, **limit)
def __str__(self):
lines = []
if self.total:
lines.append("Total: %(read)s, %(write)s" % self.total)
if self.default:
lines.append("Default: %(read)s, %(write)s" % self.default)
for tablename, table_limit in iteritems(self.tables):
lines.append("%s: %s, %s" % (tablename, table_limit['read'],
table_limit['write']))
indexes = self.indexes.get(tablename, {})
for indexname, limit in iteritems(indexes):
lines.append("%s:%s: %s, %s" % (tablename, indexname,
limit['read'], limit['write']))
# Add all the throttled indexes that don't have their table throttled.
for tablename, data in iteritems(self.indexes):
if tablename in self.tables:
continue
for indexname, limit in iteritems(data):
lines.append("%s:%s: %s, %s" % (tablename, indexname,
limit['read'], limit['write']))
if lines:
return '\n'.join(lines)
else:
return "No throttle"
def save(self):
""" Wrapper around __json__ """
return self.__json__()
def __json__(self, *_):
""" I dunno, I guess I thought this was useful. """
return {
'tables': self.tables,
'indexes': self.indexes,
'total': self.total,
'default': self.default,
}
|
{
"content_hash": "bd66d47b8391dbf15f7b1ff41bac8cff",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 38.464516129032255,
"alnum_prop": 0.49966454209996647,
"repo_name": "mathcamp/dql",
"id": "583f0bf7afbacf9fb0f90ac3dbc0287937ea68d2",
"size": "5962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dql/throttle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "247512"
},
{
"name": "Shell",
"bytes": "1328"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from .. import Error, Tags, Warning, register
REFERRER_POLICY_VALUES = {
'no-referrer', 'no-referrer-when-downgrade', 'origin',
'origin-when-cross-origin', 'same-origin', 'strict-origin',
'strict-origin-when-cross-origin', 'unsafe-url',
}
SECRET_KEY_INSECURE_PREFIX = 'django-insecure-'
SECRET_KEY_MIN_LENGTH = 50
SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5
W001 = Warning(
"You do not have 'django.middleware.security.SecurityMiddleware' "
"in your MIDDLEWARE so the SECURE_HSTS_SECONDS, "
"SECURE_CONTENT_TYPE_NOSNIFF, SECURE_BROWSER_XSS_FILTER, "
"SECURE_REFERRER_POLICY, and SECURE_SSL_REDIRECT settings will have no "
"effect.",
id='security.W001',
)
W002 = Warning(
"You do not have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, so your pages will not be served with an "
"'x-frame-options' header. Unless there is a good reason for your "
"site to be served in a frame, you should consider enabling this "
"header to help prevent clickjacking attacks.",
id='security.W002',
)
W004 = Warning(
"You have not set a value for the SECURE_HSTS_SECONDS setting. "
"If your entire site is served only over SSL, you may want to consider "
"setting a value and enabling HTTP Strict Transport Security. "
"Be sure to read the documentation first; enabling HSTS carelessly "
"can cause serious, irreversible problems.",
id='security.W004',
)
W005 = Warning(
"You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. "
"Without this, your site is potentially vulnerable to attack "
"via an insecure connection to a subdomain. Only set this to True if "
"you are certain that all subdomains of your domain should be served "
"exclusively via SSL.",
id='security.W005',
)
W006 = Warning(
"Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, "
"so your pages will not be served with an "
"'X-Content-Type-Options: nosniff' header. "
"You should consider enabling this header to prevent the "
"browser from identifying content types incorrectly.",
id='security.W006',
)
W008 = Warning(
"Your SECURE_SSL_REDIRECT setting is not set to True. "
"Unless your site should be available over both SSL and non-SSL "
"connections, you may want to either set this setting True "
"or configure a load balancer or reverse-proxy server "
"to redirect all connections to HTTPS.",
id='security.W008',
)
W009 = Warning(
"Your SECRET_KEY has less than %(min_length)s characters, less than "
"%(min_unique_chars)s unique characters, or it's prefixed with "
"'%(insecure_prefix)s' indicating that it was generated automatically by "
"Django. Please generate a long and random SECRET_KEY, otherwise many of "
"Django's security-critical features will be vulnerable to attack." % {
'min_length': SECRET_KEY_MIN_LENGTH,
'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS,
'insecure_prefix': SECRET_KEY_INSECURE_PREFIX,
},
id='security.W009',
)
W018 = Warning(
"You should not have DEBUG set to True in deployment.",
id='security.W018',
)
W019 = Warning(
"You have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, but X_FRAME_OPTIONS is not set to 'DENY'. "
"Unless there is a good reason for your site to serve other parts of "
"itself in a frame, you should change it to 'DENY'.",
id='security.W019',
)
W020 = Warning(
"ALLOWED_HOSTS must not be empty in deployment.",
id='security.W020',
)
W021 = Warning(
"You have not set the SECURE_HSTS_PRELOAD setting to True. Without this, "
"your site cannot be submitted to the browser preload list.",
id='security.W021',
)
W022 = Warning(
'You have not set the SECURE_REFERRER_POLICY setting. Without this, your '
'site will not send a Referrer-Policy header. You should consider '
'enabling this header to protect user privacy.',
id='security.W022',
)
E023 = Error(
'You have set the SECURE_REFERRER_POLICY setting to an invalid value.',
hint='Valid values are: {}.'.format(', '.join(sorted(REFERRER_POLICY_VALUES))),
id='security.E023',
)
E100 = Error(
"DEFAULT_HASHING_ALGORITHM must be 'sha1' or 'sha256'.",
id='security.E100',
)
def _security_middleware():
return 'django.middleware.security.SecurityMiddleware' in settings.MIDDLEWARE
def _xframe_middleware():
return 'django.middleware.clickjacking.XFrameOptionsMiddleware' in settings.MIDDLEWARE
@register(Tags.security, deploy=True)
def check_security_middleware(app_configs, **kwargs):
passed_check = _security_middleware()
return [] if passed_check else [W001]
@register(Tags.security, deploy=True)
def check_xframe_options_middleware(app_configs, **kwargs):
passed_check = _xframe_middleware()
return [] if passed_check else [W002]
@register(Tags.security, deploy=True)
def check_sts(app_configs, **kwargs):
passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS
return [] if passed_check else [W004]
@register(Tags.security, deploy=True)
def check_sts_include_subdomains(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True
)
return [] if passed_check else [W005]
@register(Tags.security, deploy=True)
def check_sts_preload(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_PRELOAD is True
)
return [] if passed_check else [W021]
@register(Tags.security, deploy=True)
def check_content_type_nosniff(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_CONTENT_TYPE_NOSNIFF is True
)
return [] if passed_check else [W006]
@register(Tags.security, deploy=True)
def check_ssl_redirect(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_SSL_REDIRECT is True
)
return [] if passed_check else [W008]
@register(Tags.security, deploy=True)
def check_secret_key(app_configs, **kwargs):
try:
secret_key = settings.SECRET_KEY
except (ImproperlyConfigured, AttributeError):
passed_check = False
else:
passed_check = (
len(set(secret_key)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and
len(secret_key) >= SECRET_KEY_MIN_LENGTH and
not secret_key.startswith(SECRET_KEY_INSECURE_PREFIX)
)
return [] if passed_check else [W009]
@register(Tags.security, deploy=True)
def check_debug(app_configs, **kwargs):
passed_check = not settings.DEBUG
return [] if passed_check else [W018]
@register(Tags.security, deploy=True)
def check_xframe_deny(app_configs, **kwargs):
passed_check = (
not _xframe_middleware() or
settings.X_FRAME_OPTIONS == 'DENY'
)
return [] if passed_check else [W019]
@register(Tags.security, deploy=True)
def check_allowed_hosts(app_configs, **kwargs):
return [] if settings.ALLOWED_HOSTS else [W020]
@register(Tags.security, deploy=True)
def check_referrer_policy(app_configs, **kwargs):
if _security_middleware():
if settings.SECURE_REFERRER_POLICY is None:
return [W022]
# Support a comma-separated string or iterable of values to allow fallback.
if isinstance(settings.SECURE_REFERRER_POLICY, str):
values = {v.strip() for v in settings.SECURE_REFERRER_POLICY.split(',')}
else:
values = set(settings.SECURE_REFERRER_POLICY)
if not values <= REFERRER_POLICY_VALUES:
return [E023]
return []
# RemovedInDjango40Warning
@register(Tags.security)
def check_default_hashing_algorithm(app_configs, **kwargs):
if settings.DEFAULT_HASHING_ALGORITHM not in {'sha1', 'sha256'}:
return [E100]
return []
|
{
"content_hash": "51bfa8f8ca72d11ad68f04ee4a6853e3",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 90,
"avg_line_length": 32.991902834008094,
"alnum_prop": 0.6878144557614431,
"repo_name": "koordinates/django",
"id": "8cf3d1d61e1043c1527b6dee8c5bcca64b3e4145",
"size": "8149",
"binary": false,
"copies": "3",
"ref": "refs/heads/stable/3.2.x-kx",
"path": "django/core/checks/security/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84917"
},
{
"name": "HTML",
"bytes": "223820"
},
{
"name": "JavaScript",
"bytes": "139791"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "14472067"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
"""Runs the 'ar' command after removing its output file first.
This script is invoked like:
python gcc_ar_wrapper.py --ar=$AR --output=$OUT $OP $INPUTS
to do the equivalent of:
rm -f $OUT && $AR $OP $OUT $INPUTS
"""
import argparse
import os
import subprocess
import sys
import wrapper_utils
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--ar',
required=True,
help='The ar binary to run',
metavar='PATH')
parser.add_argument('--output',
required=True,
help='Output archive file',
metavar='ARCHIVE')
parser.add_argument('--plugin',
help='Load plugin')
parser.add_argument('--resource-whitelist',
help='Merge all resource whitelists into a single file.',
metavar='PATH')
parser.add_argument('operation',
help='Operation on the archive')
parser.add_argument('inputs', nargs='+',
help='Input files')
args = parser.parse_args()
# Specifies the type of object file ar should examine.
# The ar on linux ignores this option.
object_mode = []
if sys.platform.startswith('aix'):
# The @file feature is not available on ar for AIX.
# For linux (and other posix like systems), the @file_name
# option reads the contents of file_name as command line arguments.
# For AIX we must parse these (rsp files) manually.
# Read rspfile.
args.inputs = wrapper_utils.ResolveRspLinks(args.inputs)
object_mode = ['-X64']
else:
if args.resource_whitelist:
whitelist_candidates = wrapper_utils.ResolveRspLinks(args.inputs)
wrapper_utils.CombineResourceWhitelists(
whitelist_candidates, args.resource_whitelist)
command = [args.ar] + object_mode + [args.operation]
if args.plugin is not None:
command += ['--plugin', args.plugin]
command.append(args.output)
command += args.inputs
# Remove the output file first.
try:
os.remove(args.output)
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
# Now just run the ar command.
return subprocess.call(wrapper_utils.CommandToRun(command))
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "cfdd134f31318bf057e395da4d100bf3",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 31.82191780821918,
"alnum_prop": 0.6229014205768403,
"repo_name": "chrisdickinson/nojs",
"id": "893b859f0826280105569204e7dc3c3a6b081a4a",
"size": "2508",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/toolchain/gcc_ar_wrapper.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "52243"
},
{
"name": "JavaScript",
"bytes": "55472"
},
{
"name": "Python",
"bytes": "16760"
}
],
"symlink_target": ""
}
|
from django.db import models
class Setting(models.Model):
VALUE_TYPES = (
("B", "Bool"),
("S", "String"),
("N", "Number")
)
key = models.CharField(max_length=100, primary_key=True)
value = models.CharField(max_length=2000)
description = models.CharField(max_length=2000)
type = models.CharField(max_length=10, choices=VALUE_TYPES, default="S")
order = models.IntegerField(default=0)
def get_value(self):
if self.type == "B":
return self.value == "True"
elif self.type == "N":
return float(self.value)
else:
return self.value
|
{
"content_hash": "19e3cf60fc9d882f06fddeeb1ef82c45",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 29.272727272727273,
"alnum_prop": 0.5822981366459627,
"repo_name": "narcolepticsnowman/GarageWarden",
"id": "1ed2b701da806c26f52585c772792f63517f9551",
"size": "644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GarageWarden/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1393"
},
{
"name": "HTML",
"bytes": "5831"
},
{
"name": "JavaScript",
"bytes": "7427"
},
{
"name": "Python",
"bytes": "25499"
}
],
"symlink_target": ""
}
|
from link import Wrapper
from link.utils import list_to_dataframe
class ElasticSearch(Wrapper):
"""
wraps an ElasticSearch connection and extends the functionality
to do tasks like put queries into dataframes
"""
def __init__(self, wrap_name = None, **kwargs):
self.options = {}
if kwargs:
self.options.update(kwargs)
#get the connection and pass it to wrapper os the wrapped object
connection = self.create_connection()
super(ElasticSearch, self).__init__(wrap_name, connection)
def search(self, query):
"""
Search through ElasticSearch records and return the result as dict
"""
response = self._wrapped.search(
index = self.options['index'],
doc_type = self.options['doc_type'],
body = query,
)
return response
def scan(self, query, chunk_size=10000, scroll='10m'):
"""
Query ElasticSearch records, suitable for large response.
Similar to 'search' method but works better with large result sets
(hundreds of thousands of records) as it receives data from
ElasticSearch in chunks.
Returns generator object which can be used to grow Pandas
DataFrame iteratively, e.g.:
df = DataFrame()
for response in esdb.scan(query):
df = pandas.concat([df, DataFrame(response['hits']['hits'])])
# do something with df here
"""
from elasticsearch.exceptions import TransportError
response = self._wrapped.search(
index=self.options['index'],
doc_type = self.options['doc_type'],
body = query,
search_type = 'scan',
scroll = scroll,
size=chunk_size,
)
scroll_id = response['_scroll_id']
while True:
try:
response = self._wrapped.scroll(scroll_id=scroll_id, scroll=scroll)
yield response
except TransportError as e:
break
def index(self, doc):
"""
Add a new entry to ElasticSearch index
"""
response = self._wrapped.index(
index=self.options['index'],
doc_type=self.options['doc_type'],
body=doc,
)
return response
def create_connection(self):
"""
Override the create_connection from the Wrapper
class which get's called in it's initializer
"""
from elasticsearch import Elasticsearch
return Elasticsearch(hosts=self.options['hosts'])
|
{
"content_hash": "d9e9547a56ffa8ef3e7250233ecf30b5",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 83,
"avg_line_length": 32.7125,
"alnum_prop": 0.5842567825754681,
"repo_name": "dhrod5/link",
"id": "898e4fd56884ab8a8fac3038a081a643d6db4267",
"size": "2617",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "link/wrappers/elasticsearchwrappers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "163150"
},
{
"name": "Vim script",
"bytes": "883"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.utils import six
from django.utils.six.moves import zip
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import INVALID_FORM_DATA
from reviewboard.reviews.models import DefaultReviewer, Group
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (default_reviewer_item_mimetype,
default_reviewer_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_default_reviewer_item_url,
get_default_reviewer_list_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(BaseWebAPITestCase):
"""Testing the DefaultReviewerResource list APIs."""
fixtures = ['test_users']
basic_post_fixtures = ['test_scmtools']
basic_post_use_admin = True
sample_api_url = 'default-reviewers/'
resource = resources.default_reviewer
test_http_methods = ('POST',)
#
# HTTP GET tests
#
@add_fixtures(['test_scmtools'])
def test_get(self):
"""Testing the GET default-reviewers/ API"""
user = User.objects.get(username='doc')
group = Group.objects.create(name='group1')
repository = self.create_repository()
DefaultReviewer.objects.create(name='default1', file_regex='.*')
default_reviewer = DefaultReviewer.objects.create(
name='default2', file_regex='/foo')
default_reviewer.people.add(user)
default_reviewer.groups.add(group)
default_reviewer.repository.add(repository)
rsp = self.api_get(get_default_reviewer_list_url(),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 2)
self.assertEqual(default_reviewers[0]['name'], 'default1')
self.assertEqual(default_reviewers[0]['file_regex'], '.*')
self.assertEqual(default_reviewers[1]['name'], 'default2')
self.assertEqual(default_reviewers[1]['file_regex'], '/foo')
users = default_reviewers[1]['users']
self.assertEqual(len(users), 1)
self.assertEqual(users[0]['title'], user.username)
groups = default_reviewers[1]['groups']
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0]['title'], group.name)
repos = default_reviewers[1]['repositories']
self.assertEqual(len(repos), 1)
self.assertEqual(repos[0]['title'], repository.name)
@add_fixtures(['test_site'])
def test_get_with_site(self):
"""Testing the GET default-reviewers/ API with a local site"""
local_site = self.get_local_site(name=self.local_site_name)
DefaultReviewer.objects.create(name='default1', file_regex='.*',
local_site=local_site)
DefaultReviewer.objects.create(name='default2', file_regex='/foo')
# Test for non-LocalSite ones.
rsp = self.api_get(get_default_reviewer_list_url(),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 1)
self.assertEqual(default_reviewers[0]['name'], 'default2')
self.assertEqual(default_reviewers[0]['file_regex'], '/foo')
# Now test for the ones in the LocalSite.
self._login_user(local_site=True)
rsp = self.api_get(get_default_reviewer_list_url(self.local_site_name),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 1)
self.assertEqual(default_reviewers[0]['name'], 'default1')
self.assertEqual(default_reviewers[0]['file_regex'], '.*')
@add_fixtures(['test_site'])
def test_get_with_site_no_access(self):
"""Testing the GET default-reviewers/ API
with a local site and Permission Denied error
"""
self.api_get(get_default_reviewer_list_url(self.local_site_name),
expected_status=403)
@add_fixtures(['test_scmtools'])
def test_get_with_repositories(self):
"""Testing the GET default-reviewers/?repositories= API"""
repository1 = self.create_repository(name='repo 1')
repository2 = self.create_repository(name='repo 2')
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
default_reviewer.repository.add(repository1)
default_reviewer.repository.add(repository2)
default_reviewer = DefaultReviewer.objects.create(
name='default2', file_regex='/foo')
default_reviewer.repository.add(repository2)
# Test singling out one repository.
rsp = self.api_get('%s?repositories=%s'
% (get_default_reviewer_list_url(), repository2.pk),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 2)
self.assertEqual(default_reviewers[0]['name'], 'default1')
self.assertEqual(default_reviewers[1]['name'], 'default2')
# Test requiring more than one.
rsp = self.api_get('%s?repositories=%s,%s'
% (get_default_reviewer_list_url(), repository1.pk,
repository2.pk),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 1)
self.assertEqual(default_reviewers[0]['name'], 'default1')
def test_get_with_users(self):
"""Testing the GET default-reviewers/?users= API"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='dopey')
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
default_reviewer.people.add(user1)
default_reviewer.people.add(user2)
default_reviewer = DefaultReviewer.objects.create(
name='default2', file_regex='/foo')
default_reviewer.people.add(user2)
# Test singling out one user.
rsp = self.api_get('%s?users=dopey' % get_default_reviewer_list_url(),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 2)
self.assertEqual(default_reviewers[0]['name'], 'default1')
self.assertEqual(default_reviewers[1]['name'], 'default2')
# Test requiring more than one.
rsp = self.api_get(
'%s?users=doc,dopey' % get_default_reviewer_list_url(),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 1)
self.assertEqual(default_reviewers[0]['name'], 'default1')
def test_get_with_groups(self):
"""Testing the GET default-reviewers/?groups= API"""
group1 = Group.objects.create(name='group1')
group2 = Group.objects.create(name='group2')
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
default_reviewer.groups.add(group1)
default_reviewer.groups.add(group2)
default_reviewer = DefaultReviewer.objects.create(
name='default2', file_regex='/foo')
default_reviewer.groups.add(group2)
# Test singling out one group.
rsp = self.api_get(
'%s?groups=group2' % get_default_reviewer_list_url(),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 2)
self.assertEqual(default_reviewers[0]['name'], 'default1')
self.assertEqual(default_reviewers[1]['name'], 'default2')
# Test requiring more than one.
rsp = self.api_get(
'%s?groups=group1,group2' % get_default_reviewer_list_url(),
expected_mimetype=default_reviewer_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewers = rsp['default_reviewers']
self.assertEqual(len(default_reviewers), 1)
self.assertEqual(default_reviewers[0]['name'], 'default1')
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
if post_valid_data:
self.create_review_group(name='group1',
with_local_site=with_local_site)
self.create_review_group(name='group2',
with_local_site=with_local_site)
repo1 = self.create_repository(name='Test Repo 1',
with_local_site=with_local_site,
path='test-repo-1')
repo2 = self.create_repository(name='Test Repo 2',
with_local_site=with_local_site,
path='test-repo-2')
if with_local_site:
site = self.get_local_site(name=local_site_name)
site.users.add(User.objects.get(username='doc'))
site.users.add(User.objects.get(username='dopey'))
post_data = {
'name': 'my-default',
'file_regex': '.*',
'users': 'doc,dopey',
'groups': 'group1,group2',
'repositories': ','.join([six.text_type(repo1.pk),
six.text_type(repo2.pk)]),
}
else:
post_data = {}
return (get_default_reviewer_list_url(local_site_name),
default_reviewer_item_mimetype,
post_data,
[local_site_name])
def check_post_result(self, user, rsp, local_site_name):
self.assertIn('default_reviewer', rsp)
item_rsp = rsp['default_reviewer']
self.assertEqual(item_rsp['name'], 'my-default')
self.assertEqual(item_rsp['file_regex'], '.*')
default_reviewer = DefaultReviewer.objects.get(pk=item_rsp['id'])
self.assertEqual(default_reviewer.name, 'my-default')
self.assertEqual(default_reviewer.file_regex, '.*')
if local_site_name:
self.assertEqual(default_reviewer.local_site.name, local_site_name)
people = list(default_reviewer.people.all())
self.assertEqual(len(people), 2)
self.assertEqual(people[0].username, 'doc')
self.assertEqual(people[1].username, 'dopey')
groups = list(default_reviewer.groups.all())
self.assertEqual(len(groups), 2)
self.assertEqual(groups[0].name, 'group1')
self.assertEqual(groups[1].name, 'group2')
repos = list(default_reviewer.repository.all())
self.assertEqual(len(repos), 2)
self.assertEqual(repos[0].name, 'Test Repo 1')
self.assertEqual(repos[1].name, 'Test Repo 2')
@add_fixtures(['test_users'])
def test_post_with_defaults(self):
"""Testing the POST default-reviewers/ API with field defaults"""
self._login_user(admin=True)
name = 'default1'
file_regex = '.*'
rsp = self.api_post(
get_default_reviewer_list_url(),
{
'name': name,
'file_regex': file_regex,
},
expected_mimetype=default_reviewer_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
default_reviewer = DefaultReviewer.objects.get(
pk=rsp['default_reviewer']['id'])
self.assertEqual(default_reviewer.local_site, None)
self.assertEqual(default_reviewer.name, name)
self.assertEqual(default_reviewer.file_regex, file_regex)
@add_fixtures(['test_users'])
def test_post_with_permission_denied(self):
"""Testing the POST default-reviewers/ API
with Permission Denied error
"""
self._login_user()
self.api_post(
get_default_reviewer_list_url(),
{
'name': 'default1',
'file_regex': '.*',
},
expected_status=403)
@add_fixtures(['test_users', 'test_site'])
def test_post_with_invalid_regex(self):
"""Testing the POST default-reviewers/ API with an invalid regex"""
self._login_user(admin=True)
rsp = self.api_post(
get_default_reviewer_list_url(),
{
'name': 'default1',
'file_regex': '\\',
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertIn('file_regex', rsp['fields'])
@add_fixtures(['test_users'])
def test_post_with_invalid_username(self):
"""Testing the POST default-reviewers/ API with invalid username"""
self._login_user(admin=True)
rsp = self.api_post(
get_default_reviewer_list_url(),
{
'name': 'default1',
'file_regex': '.*',
'users': 'foo'
},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('users', rsp['fields'])
@add_fixtures(['test_users', 'test_site'])
def test_post_with_user_invalid_site(self):
"""Testing the POST default-reviewers/ API
with user and invalid site
"""
self._login_user(admin=True)
local_site = self.get_local_site(name=self.local_site_name)
rsp = self.api_post(
get_default_reviewer_list_url(local_site),
{
'name': 'default1',
'file_regex': '.*',
'users': 'grumpy'
},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('users', rsp['fields'])
@add_fixtures(['test_users'])
def test_post_with_invalid_group(self):
"""Testing the POST default-reviewers/ API with invalid group"""
self._login_user(admin=True)
rsp = self.api_post(
get_default_reviewer_list_url(),
{
'name': 'default1',
'file_regex': '.*',
'groups': 'foo'
},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('groups', rsp['fields'])
@add_fixtures(['test_users', 'test_site'])
def test_post_with_group_invalid_site(self):
"""Testing the POST default-reviewers/ API
with group and invalid site
"""
self._login_user(admin=True)
local_site = self.get_local_site(name=self.local_site_name)
Group.objects.create(name='group1', local_site=local_site)
rsp = self.api_post(
get_default_reviewer_list_url(),
{
'name': 'default1',
'file_regex': '.*',
'groups': 'group1'
},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('groups', rsp['fields'])
@add_fixtures(['test_users'])
def test_post_with_invalid_repository(self):
"""Testing the POST default-reviewers/ API with invalid repository"""
self._login_user(admin=True)
rsp = self.api_post(
get_default_reviewer_list_url(),
{
'name': 'default1',
'file_regex': '.*',
'repositories': '12345'
},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('repositories', rsp['fields'])
@add_fixtures(['test_users', 'test_site', 'test_scmtools'])
def test_post_with_repository_invalid_site(self):
"""Testing the POST default-reviewers/ API
with repository and invalid site
"""
repository = self.create_repository(with_local_site=True)
self._login_user(admin=True)
rsp = self.api_post(
get_default_reviewer_list_url(),
{
'name': 'default1',
'file_regex': '.*',
'repositories': six.text_type(repository.pk),
},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('repositories', rsp['fields'])
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(BaseWebAPITestCase):
"""Testing the DefaultReviewerResource item APIs."""
fixtures = ['test_users']
basic_get_fixtures = ['test_scmtools']
basic_put_fixtures = ['test_scmtools']
basic_delete_use_admin = True
basic_put_use_admin = True
sample_api_url = 'default-reviewers/<id>/'
resource = resources.default_reviewer
def compare_item(self, item_rsp, default_reviewer):
self.assertEqual(default_reviewer.name, item_rsp['name'])
self.assertEqual(default_reviewer.file_regex, item_rsp['file_regex'])
users = list(default_reviewer.people.all())
for user_rsp, user in zip(item_rsp['users'], users):
self.assertEqual(user_rsp['title'], user.username)
self.assertEqual(len(item_rsp['users']), len(users))
groups = list(default_reviewer.groups.all())
for group_rsp, group in zip(item_rsp['groups'], groups):
self.assertEqual(group_rsp['title'], group.name)
self.assertEqual(len(item_rsp['groups']), len(groups))
repos = list(default_reviewer.repository.all())
for repo_rsp, repo in zip(item_rsp['repositories'], repos):
self.assertEqual(repo_rsp['title'], repo.name)
self.assertEqual(len(item_rsp['repositories']), len(repos))
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
if with_local_site:
local_site = self.get_local_site(name=local_site_name)
else:
local_site = None
default_reviewer = DefaultReviewer.objects.create(
name='default1',
file_regex='.*',
local_site=local_site)
return (get_default_reviewer_item_url(default_reviewer.pk,
local_site_name),
[])
def check_delete_result(self, user):
self.assertEqual(
DefaultReviewer.objects.filter(name='default1').count(),
0)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
if with_local_site:
default_reviewer.local_site = \
self.get_local_site(name=local_site_name)
default_reviewer.save()
default_reviewer.people.add(User.objects.get(username='doc'))
default_reviewer.groups.add(
self.create_review_group(name='group1',
with_local_site=with_local_site))
default_reviewer.repository.add(
self.create_repository(with_local_site=with_local_site))
return (get_default_reviewer_item_url(default_reviewer.pk,
local_site_name),
default_reviewer_item_mimetype,
default_reviewer)
def test_get_not_modified(self):
"""Testing the GET default-reviewers/<id>/ API
with Not Modified response
"""
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
self._testHttpCaching(
get_default_reviewer_item_url(default_reviewer.pk),
check_etags=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
if with_local_site:
local_site = self.get_local_site(name=local_site_name)
local_site.users.add(User.objects.get(username='doc'))
local_site.users.add(User.objects.get(username='dopey'))
default_reviewer.local_site = local_site
default_reviewer.save()
default_reviewer.people.add(User.objects.get(username='doc'))
default_reviewer.groups.add(
self.create_review_group(name='group1',
with_local_site=with_local_site))
repo1 = self.create_repository(with_local_site=with_local_site,
name='Test Repo 1',
path='test-repo-1')
default_reviewer.repository.add(repo1)
if put_valid_data:
self.create_review_group(name='group2',
with_local_site=with_local_site)
repo2 = self.create_repository(with_local_site=with_local_site,
name='Test Repo 2',
path='test-repo-2')
put_data = {
'name': 'New name',
'file_regex': '/foo/',
'users': 'doc,dopey',
'groups': 'group1,group2',
'repositories': ','.join([six.text_type(repo1.pk),
six.text_type(repo2.pk)]),
}
else:
put_data = {}
return (get_default_reviewer_item_url(default_reviewer.pk,
local_site_name),
default_reviewer_item_mimetype,
put_data,
default_reviewer,
[])
def check_put_result(self, user, item_rsp, default_reviewer):
self.assertEqual(item_rsp['name'], 'New name')
self.assertEqual(item_rsp['file_regex'], '/foo/')
default_reviewer = DefaultReviewer.objects.get(pk=item_rsp['id'])
self.assertEqual(default_reviewer.name, 'New name')
self.assertEqual(default_reviewer.file_regex, '/foo/')
people = list(default_reviewer.people.all())
self.assertEqual(len(people), 2)
self.assertEqual(people[0].username, 'doc')
self.assertEqual(people[1].username, 'dopey')
groups = list(default_reviewer.groups.all())
self.assertEqual(len(groups), 2)
self.assertEqual(groups[0].name, 'group1')
self.assertEqual(groups[1].name, 'group2')
repos = list(default_reviewer.repository.all())
self.assertEqual(len(repos), 2)
self.assertEqual(repos[0].name, 'Test Repo 1')
self.assertEqual(repos[1].name, 'Test Repo 2')
@add_fixtures(['test_users'])
def test_put_with_invalid_username(self):
"""Testing the PUT default-reviewers/<id>/ API with invalid username"""
self._login_user(admin=True)
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{'users': 'foo'},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('users', rsp['fields'])
@add_fixtures(['test_users', 'test_site'])
def test_put_with_user_invalid_site(self):
"""Testing the PUT default-reviewers/<id>/ API
with user and invalid site
"""
self._login_user(admin=True)
local_site = self.get_local_site(name=self.local_site_name)
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*', local_site=local_site)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk,
self.local_site_name),
{'users': 'grumpy'},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('users', rsp['fields'])
@add_fixtures(['test_users'])
def test_put_with_invalid_group(self):
"""Testing the PUT default-reviewers/<id>/ API with invalid group"""
self._login_user(admin=True)
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{'groups': 'foo'},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('groups', rsp['fields'])
@add_fixtures(['test_users', 'test_site'])
def test_put_with_group_invalid_site(self):
"""Testing the PUT default-reviewers/<id>/ API
with group and invalid site
"""
self._login_user(admin=True)
local_site = self.get_local_site(name=self.local_site_name)
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
Group.objects.create(name='group1', local_site=local_site)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{'groups': 'group1'},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('groups', rsp['fields'])
@add_fixtures(['test_users'])
def test_put_with_invalid_repository(self):
"""Testing the PUT default-reviewers/<id>/ API
with invalid repository
"""
self._login_user(admin=True)
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{'repositories': '12345'},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('repositories', rsp['fields'])
@add_fixtures(['test_users', 'test_site', 'test_scmtools'])
def test_put_with_repository_invalid_site(self):
"""Testing the PUT default-reviewers/<id>/ API
with repository and invalid site
"""
repository = self.create_repository(with_local_site=True)
default_reviewer = DefaultReviewer.objects.create(
name='default1', file_regex='.*')
self._login_user(admin=True)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{'repositories': six.text_type(repository.pk)},
expected_status=400)
self.assertIn('fields', rsp)
self.assertIn('repositories', rsp['fields'])
@add_fixtures(['test_users'])
def test_put_clear_groups(self):
"""Testing PUT <URL> API with empty groups field"""
group = Group.objects.create(name='group1')
default_reviewer = DefaultReviewer.objects.create(name='default1',
file_regex='.*')
default_reviewer.groups.add(group)
self._login_user(admin=True)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{
'file_regex': '.*',
'name': 'default1',
'groups': ''
},
expected_mimetype=default_reviewer_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
default_reviewer = DefaultReviewer.objects.get(pk=default_reviewer.pk)
self.assertEqual(list(default_reviewer.groups.all()), [])
self.assertIn('default_reviewer', rsp)
self.compare_item(rsp['default_reviewer'], default_reviewer)
@add_fixtures(['test_users'])
def test_put_groups_only_commas(self):
"""Testing PUT <URL> API with groups field containing only commas"""
group = Group.objects.create(name='group1')
default_reviewer = DefaultReviewer.objects.create(name='default1',
file_regex='.*')
default_reviewer.groups.add(group)
self._login_user(admin=True)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{
'file_regex': '.*',
'name': 'default1',
'groups': ' , , , '
},
expected_mimetype=default_reviewer_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
default_reviewer = DefaultReviewer.objects.get(pk=default_reviewer.pk)
self.assertEqual(list(default_reviewer.groups.all()), [])
self.assertIn('default_reviewer', rsp)
self.compare_item(rsp['default_reviewer'], default_reviewer)
@add_fixtures(['test_users'])
def test_put_clear_users(self):
"""Testing PUT <URL> API with empty users field"""
doc = User.objects.get(username='doc')
default_reviewer = DefaultReviewer.objects.create(name='default1',
file_regex='.*')
default_reviewer.people.add(doc)
self._login_user(admin=True)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{
'file_regex': '.*',
'name': 'default1',
'users': ''
},
expected_mimetype=default_reviewer_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
default_reviewer = DefaultReviewer.objects.get(pk=default_reviewer.pk)
self.assertEqual(list(default_reviewer.people.all()), [])
self.assertIn('default_reviewer', rsp)
self.compare_item(rsp['default_reviewer'], default_reviewer)
@add_fixtures(['test_users'])
def test_put_users_only_commas(self):
"""Testing PUT <URL> API with users field containing only commas"""
doc = User.objects.get(username='doc')
default_reviewer = DefaultReviewer.objects.create(name='default1',
file_regex='.*')
default_reviewer.people.add(doc)
self._login_user(admin=True)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{
'file_regex': '.*',
'name': 'default1',
'users': ' , , , '
},
expected_mimetype=default_reviewer_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
default_reviewer = DefaultReviewer.objects.get(pk=default_reviewer.pk)
self.assertEqual(list(default_reviewer.people.all()), [])
self.assertIn('default_reviewer', rsp)
self.compare_item(rsp['default_reviewer'], default_reviewer)
@add_fixtures(['test_users', 'test_scmtools'])
def test_put_clear_repositories(self):
"""Testing PUT <URL> API with empty repositories field"""
repository = self.create_repository()
default_reviewer = DefaultReviewer.objects.create(name='default1',
file_regex='.*')
default_reviewer.repository.add(repository)
self._login_user(admin=True)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{
'file_regex': '.*',
'name': 'default1',
'repositories': '',
},
expected_mimetype=default_reviewer_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
default_reviewer = DefaultReviewer.objects.get(pk=default_reviewer.pk)
self.assertEqual(list(default_reviewer.repository.all()), [])
self.assertIn('default_reviewer', rsp)
self.compare_item(rsp['default_reviewer'], default_reviewer)
@add_fixtures(['test_users', 'test_scmtools'])
def test_put_repositories_only_comma(self):
"""Testing PUT <URL> API with repositories field containing only
commas
"""
repository = self.create_repository()
default_reviewer = DefaultReviewer.objects.create(name='default1',
file_regex='.*')
default_reviewer.repository.add(repository)
self._login_user(admin=True)
rsp = self.api_put(
get_default_reviewer_item_url(default_reviewer.pk),
{
'file_regex': '.*',
'name': 'default1',
'repositories': ' , , , ',
},
expected_mimetype=default_reviewer_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
default_reviewer = DefaultReviewer.objects.get(pk=default_reviewer.pk)
self.assertEqual(list(default_reviewer.repository.all()), [])
self.assertIn('default_reviewer', rsp)
self.compare_item(rsp['default_reviewer'], default_reviewer)
|
{
"content_hash": "079eb06af9f600cc7777bd5cd95720a1",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 79,
"avg_line_length": 37.783964365256125,
"alnum_prop": 0.5746831712348953,
"repo_name": "brennie/reviewboard",
"id": "d3cff52acc079023d15b6982a86729de4a94efdb",
"size": "33930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/tests/test_default_reviewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "247208"
},
{
"name": "HTML",
"bytes": "204351"
},
{
"name": "JavaScript",
"bytes": "2557855"
},
{
"name": "Python",
"bytes": "5241630"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
import os
import xlrd
from TableCode.cs_data import GenCSTableData
from TableCode.cs_file import GenCSTableManagerFile, genCSLoadTablesFile
from TableCode.go_data import GenGolangTableData
from TableCode.go_file import GenGoTableManagerFile, genGolangLoadTablesFile
from const import excel_dir
def processExcel(filePath, fileName):
if "." in fileName:
fileName = fileName.split('.')
fileName = fileName[0]
data = xlrd.open_workbook(filePath)
table = data.sheets()[0]
nrows = table.nrows
ncols = table.ncols
cs_fields_index = []#filed index
golang_fields_index = []#filed index
tableKeysIndex = []
if table.nrows == 0 or table.ncols == 0:
print("empty file:" + fileName)
for index in range(ncols):
CS_row = table.cell(1, index).value
if CS_row == "C" or CS_row == "CS":
cs_fields_index.append(index)
if CS_row == "S" or CS_row == "CS":
golang_fields_index.append(index)
if len(cs_fields_index) > 0:
cs_files.append(fileName)
GenCSTableManagerFile(fileName, cs_fields_index, table)
GenCSTableData(fileName, cs_fields_index, table)
if len(golang_fields_index) > 0:
go_files.append(fileName)
GenGoTableManagerFile(fileName, golang_fields_index, table)
GenGolangTableData(fileName, golang_fields_index, table)
cs_files = []
go_files = []
def excel_start():
excels = []
for dir in os.listdir(excel_dir): # 遍历当前目录所有文件和目录
fileName = dir
child = os.path.join(excel_dir, dir) # 加上路径,否则找不到
if os.path.isdir(child): # 如果是目录,则继续遍历子目录的文件
for file in os.listdir(child):
if "~" in child:
continue
if os.path.splitext(file)[1] == '.xlsx': # 分割文件名和文件扩展名,并且扩展名为'proto'
fileName = file
processExcel(child, fileName)
excels.append(fileName)
elif os.path.isfile(child): # 如果是文件,则直接判断扩展名
if "~" in child:
continue
if os.path.splitext(child)[1] == '.xlsx':
processExcel(child, fileName)
excels.append(fileName)
genCSLoadTablesFile(cs_files)
genGolangLoadTablesFile(go_files)
|
{
"content_hash": "3f752be07c23ba4c8d03c043b7520e48",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 85,
"avg_line_length": 33.5,
"alnum_prop": 0.6136962247585601,
"repo_name": "Mikejinhua/UnitySocketProtobuf3Demo",
"id": "e429b1e29f7ccc7a997895c815fce59e827679d3",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tools/TableCode/excel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C#",
"bytes": "1091367"
},
{
"name": "Go",
"bytes": "16645"
},
{
"name": "Python",
"bytes": "16946"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
}
|
"""Contain all zeromq devices needed by OCT
"""
from __future__ import print_function, unicode_literals
import zmq
def forwarder(frontend, backend):
"""Simple pub/sub forwarder
:param int frontend: fontend zeromq port
:param int backend: backend zeromq port
"""
try:
context = zmq.Context()
front_sub = context.socket(zmq.SUB)
front_sub.bind("tcp://*:%d" % frontend)
front_sub.setsockopt_string(zmq.SUBSCRIBE, "")
back_pub = context.socket(zmq.PUB)
back_pub.bind("tcp://*:%d" % backend)
print("forwarder started, backend on port : %d\tfrontend on port: %d" % (backend, frontend))
zmq.proxy(front_sub, back_pub)
except Exception as e:
print(e)
finally:
front_sub.close()
back_pub.close()
context.term()
def streamer(frontend, backend):
"""Simple push/pull streamer
:param int frontend: fontend zeromq port
:param int backend: backend zeromq port
"""
try:
context = zmq.Context()
front_pull = context.socket(zmq.PULL)
front_pull.set_hwm(0)
front_pull.bind("tcp://*:%d" % frontend)
back_push = context.socket(zmq.PUSH)
back_push.bind("tcp://*:%d" % backend)
print("streamer started, backend on port : %d\tfrontend on port: %d" % (backend, frontend))
zmq.proxy(front_pull, back_push)
except Exception as e:
print(e)
finally:
front_pull.close()
back_push.close()
context.term()
|
{
"content_hash": "30ba4968692f0e3ba63d4e22cb1aae94",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 100,
"avg_line_length": 26.912280701754387,
"alnum_prop": 0.6023468057366362,
"repo_name": "karec/oct",
"id": "2878f4eebcaa1f4ece07b142476a8c1153d7321d",
"size": "1534",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oct/core/devices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1932"
},
{
"name": "HTML",
"bytes": "6205"
},
{
"name": "Python",
"bytes": "44417"
}
],
"symlink_target": ""
}
|
from django import forms
class SendEmailForm(forms.Form):
to_email = forms.CharField()
subject = forms.CharField()
message = forms.CharField()
class SendHTTPRequestForm(forms.Form):
http_method = forms.ChoiceField(choices=(
("get", "GET"),
("post", "POST"),
("head", "HEAD"),
("put", "PUT"),
))
http_data = forms.CharField()
http_url = forms.URLField()
|
{
"content_hash": "1f1d712f2530cee77fc201b3508b4a2a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 45,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.592326139088729,
"repo_name": "theju/dtwt",
"id": "47f8544d8ba9d535c57b500c9d8ed95867e9c676",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "action/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22649"
}
],
"symlink_target": ""
}
|
from ._models_py3 import AadAuthenticationParameters
from ._models_py3 import AddressSpace
from ._models_py3 import ApplicationGateway
from ._models_py3 import ApplicationGatewayAuthenticationCertificate
from ._models_py3 import ApplicationGatewayAutoscaleConfiguration
from ._models_py3 import ApplicationGatewayAvailableSslOptions
from ._models_py3 import ApplicationGatewayAvailableSslPredefinedPolicies
from ._models_py3 import ApplicationGatewayAvailableWafRuleSetsResult
from ._models_py3 import ApplicationGatewayBackendAddress
from ._models_py3 import ApplicationGatewayBackendAddressPool
from ._models_py3 import ApplicationGatewayBackendHealth
from ._models_py3 import ApplicationGatewayBackendHealthHttpSettings
from ._models_py3 import ApplicationGatewayBackendHealthOnDemand
from ._models_py3 import ApplicationGatewayBackendHealthPool
from ._models_py3 import ApplicationGatewayBackendHealthServer
from ._models_py3 import ApplicationGatewayBackendHttpSettings
from ._models_py3 import ApplicationGatewayConnectionDraining
from ._models_py3 import ApplicationGatewayCustomError
from ._models_py3 import ApplicationGatewayFirewallDisabledRuleGroup
from ._models_py3 import ApplicationGatewayFirewallExclusion
from ._models_py3 import ApplicationGatewayFirewallRule
from ._models_py3 import ApplicationGatewayFirewallRuleGroup
from ._models_py3 import ApplicationGatewayFirewallRuleSet
from ._models_py3 import ApplicationGatewayFrontendIPConfiguration
from ._models_py3 import ApplicationGatewayFrontendPort
from ._models_py3 import ApplicationGatewayHeaderConfiguration
from ._models_py3 import ApplicationGatewayHttpListener
from ._models_py3 import ApplicationGatewayIPConfiguration
from ._models_py3 import ApplicationGatewayListResult
from ._models_py3 import ApplicationGatewayOnDemandProbe
from ._models_py3 import ApplicationGatewayPathRule
from ._models_py3 import ApplicationGatewayProbe
from ._models_py3 import ApplicationGatewayProbeHealthResponseMatch
from ._models_py3 import ApplicationGatewayRedirectConfiguration
from ._models_py3 import ApplicationGatewayRequestRoutingRule
from ._models_py3 import ApplicationGatewayRewriteRule
from ._models_py3 import ApplicationGatewayRewriteRuleActionSet
from ._models_py3 import ApplicationGatewayRewriteRuleCondition
from ._models_py3 import ApplicationGatewayRewriteRuleSet
from ._models_py3 import ApplicationGatewaySku
from ._models_py3 import ApplicationGatewaySslCertificate
from ._models_py3 import ApplicationGatewaySslPolicy
from ._models_py3 import ApplicationGatewaySslPredefinedPolicy
from ._models_py3 import ApplicationGatewayTrustedRootCertificate
from ._models_py3 import ApplicationGatewayUrlPathMap
from ._models_py3 import ApplicationGatewayWebApplicationFirewallConfiguration
from ._models_py3 import ApplicationRuleCondition
from ._models_py3 import ApplicationSecurityGroup
from ._models_py3 import ApplicationSecurityGroupListResult
from ._models_py3 import AuthorizationListResult
from ._models_py3 import AutoApprovedPrivateLinkService
from ._models_py3 import AutoApprovedPrivateLinkServicesResult
from ._models_py3 import Availability
from ._models_py3 import AvailableDelegation
from ._models_py3 import AvailableDelegationsResult
from ._models_py3 import AvailablePrivateEndpointType
from ._models_py3 import AvailablePrivateEndpointTypesResult
from ._models_py3 import AvailableProvidersList
from ._models_py3 import AvailableProvidersListCity
from ._models_py3 import AvailableProvidersListCountry
from ._models_py3 import AvailableProvidersListParameters
from ._models_py3 import AvailableProvidersListState
from ._models_py3 import AvailableServiceAlias
from ._models_py3 import AvailableServiceAliasesResult
from ._models_py3 import AzureAsyncOperationResult
from ._models_py3 import AzureFirewall
from ._models_py3 import AzureFirewallApplicationRule
from ._models_py3 import AzureFirewallApplicationRuleCollection
from ._models_py3 import AzureFirewallApplicationRuleProtocol
from ._models_py3 import AzureFirewallFqdnTag
from ._models_py3 import AzureFirewallFqdnTagListResult
from ._models_py3 import AzureFirewallIPConfiguration
from ._models_py3 import AzureFirewallListResult
from ._models_py3 import AzureFirewallNatRCAction
from ._models_py3 import AzureFirewallNatRule
from ._models_py3 import AzureFirewallNatRuleCollection
from ._models_py3 import AzureFirewallNetworkRule
from ._models_py3 import AzureFirewallNetworkRuleCollection
from ._models_py3 import AzureFirewallPublicIPAddress
from ._models_py3 import AzureFirewallRCAction
from ._models_py3 import AzureFirewallSku
from ._models_py3 import AzureReachabilityReport
from ._models_py3 import AzureReachabilityReportItem
from ._models_py3 import AzureReachabilityReportLatencyInfo
from ._models_py3 import AzureReachabilityReportLocation
from ._models_py3 import AzureReachabilityReportParameters
from ._models_py3 import BGPCommunity
from ._models_py3 import BackendAddressPool
from ._models_py3 import BastionHost
from ._models_py3 import BastionHostIPConfiguration
from ._models_py3 import BastionHostListResult
from ._models_py3 import BgpPeerStatus
from ._models_py3 import BgpPeerStatusListResult
from ._models_py3 import BgpServiceCommunity
from ._models_py3 import BgpServiceCommunityListResult
from ._models_py3 import BgpSettings
from ._models_py3 import CheckPrivateLinkServiceVisibilityRequest
from ._models_py3 import CloudErrorBody
from ._models_py3 import (
Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties,
)
from ._models_py3 import ConnectionMonitor
from ._models_py3 import ConnectionMonitorDestination
from ._models_py3 import ConnectionMonitorListResult
from ._models_py3 import ConnectionMonitorParameters
from ._models_py3 import ConnectionMonitorQueryResult
from ._models_py3 import ConnectionMonitorResult
from ._models_py3 import ConnectionMonitorResultProperties
from ._models_py3 import ConnectionMonitorSource
from ._models_py3 import ConnectionResetSharedKey
from ._models_py3 import ConnectionSharedKey
from ._models_py3 import ConnectionStateSnapshot
from ._models_py3 import ConnectivityDestination
from ._models_py3 import ConnectivityHop
from ._models_py3 import ConnectivityInformation
from ._models_py3 import ConnectivityIssue
from ._models_py3 import ConnectivityParameters
from ._models_py3 import ConnectivitySource
from ._models_py3 import Container
from ._models_py3 import ContainerNetworkInterface
from ._models_py3 import ContainerNetworkInterfaceConfiguration
from ._models_py3 import ContainerNetworkInterfaceIpConfiguration
from ._models_py3 import DdosCustomPolicy
from ._models_py3 import DdosProtectionPlan
from ._models_py3 import DdosProtectionPlanListResult
from ._models_py3 import DdosSettings
from ._models_py3 import Delegation
from ._models_py3 import DeviceProperties
from ._models_py3 import DhcpOptions
from ._models_py3 import Dimension
from ._models_py3 import DnsNameAvailabilityResult
from ._models_py3 import EffectiveNetworkSecurityGroup
from ._models_py3 import EffectiveNetworkSecurityGroupAssociation
from ._models_py3 import EffectiveNetworkSecurityGroupListResult
from ._models_py3 import EffectiveNetworkSecurityRule
from ._models_py3 import EffectiveRoute
from ._models_py3 import EffectiveRouteListResult
from ._models_py3 import EndpointServiceResult
from ._models_py3 import EndpointServicesListResult
from ._models_py3 import Error
from ._models_py3 import ErrorDetails
from ._models_py3 import ErrorResponse
from ._models_py3 import EvaluatedNetworkSecurityGroup
from ._models_py3 import ExpressRouteCircuit
from ._models_py3 import ExpressRouteCircuitArpTable
from ._models_py3 import ExpressRouteCircuitAuthorization
from ._models_py3 import ExpressRouteCircuitConnection
from ._models_py3 import ExpressRouteCircuitConnectionListResult
from ._models_py3 import ExpressRouteCircuitListResult
from ._models_py3 import ExpressRouteCircuitPeering
from ._models_py3 import ExpressRouteCircuitPeeringConfig
from ._models_py3 import ExpressRouteCircuitPeeringId
from ._models_py3 import ExpressRouteCircuitPeeringListResult
from ._models_py3 import ExpressRouteCircuitReference
from ._models_py3 import ExpressRouteCircuitRoutesTable
from ._models_py3 import ExpressRouteCircuitRoutesTableSummary
from ._models_py3 import ExpressRouteCircuitServiceProviderProperties
from ._models_py3 import ExpressRouteCircuitSku
from ._models_py3 import ExpressRouteCircuitStats
from ._models_py3 import ExpressRouteCircuitsArpTableListResult
from ._models_py3 import ExpressRouteCircuitsRoutesTableListResult
from ._models_py3 import ExpressRouteCircuitsRoutesTableSummaryListResult
from ._models_py3 import ExpressRouteConnection
from ._models_py3 import ExpressRouteConnectionId
from ._models_py3 import ExpressRouteConnectionList
from ._models_py3 import ExpressRouteCrossConnection
from ._models_py3 import ExpressRouteCrossConnectionListResult
from ._models_py3 import ExpressRouteCrossConnectionPeering
from ._models_py3 import ExpressRouteCrossConnectionPeeringList
from ._models_py3 import ExpressRouteCrossConnectionRoutesTableSummary
from ._models_py3 import ExpressRouteCrossConnectionsRoutesTableSummaryListResult
from ._models_py3 import ExpressRouteGateway
from ._models_py3 import ExpressRouteGatewayList
from ._models_py3 import ExpressRouteGatewayPropertiesAutoScaleConfiguration
from ._models_py3 import ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds
from ._models_py3 import ExpressRouteLink
from ._models_py3 import ExpressRouteLinkListResult
from ._models_py3 import ExpressRouteLinkMacSecConfig
from ._models_py3 import ExpressRoutePort
from ._models_py3 import ExpressRoutePortListResult
from ._models_py3 import ExpressRoutePortsLocation
from ._models_py3 import ExpressRoutePortsLocationBandwidths
from ._models_py3 import ExpressRoutePortsLocationListResult
from ._models_py3 import ExpressRouteServiceProvider
from ._models_py3 import ExpressRouteServiceProviderBandwidthsOffered
from ._models_py3 import ExpressRouteServiceProviderListResult
from ._models_py3 import FirewallPolicy
from ._models_py3 import FirewallPolicyFilterRule
from ._models_py3 import FirewallPolicyFilterRuleAction
from ._models_py3 import FirewallPolicyListResult
from ._models_py3 import FirewallPolicyNatRule
from ._models_py3 import FirewallPolicyNatRuleAction
from ._models_py3 import FirewallPolicyRule
from ._models_py3 import FirewallPolicyRuleCondition
from ._models_py3 import FirewallPolicyRuleConditionApplicationProtocol
from ._models_py3 import FirewallPolicyRuleGroup
from ._models_py3 import FirewallPolicyRuleGroupListResult
from ._models_py3 import FlowLogFormatParameters
from ._models_py3 import FlowLogInformation
from ._models_py3 import FlowLogStatusParameters
from ._models_py3 import FrontendIPConfiguration
from ._models_py3 import GatewayRoute
from ._models_py3 import GatewayRouteListResult
from ._models_py3 import GetVpnSitesConfigurationRequest
from ._models_py3 import HTTPConfiguration
from ._models_py3 import HTTPHeader
from ._models_py3 import HubIPAddresses
from ._models_py3 import HubVirtualNetworkConnection
from ._models_py3 import IPAddressAvailabilityResult
from ._models_py3 import IPConfiguration
from ._models_py3 import IPConfigurationProfile
from ._models_py3 import InboundNatPool
from ._models_py3 import InboundNatRule
from ._models_py3 import InboundNatRuleListResult
from ._models_py3 import IpTag
from ._models_py3 import IpsecPolicy
from ._models_py3 import Ipv6ExpressRouteCircuitPeeringConfig
from ._models_py3 import ListHubVirtualNetworkConnectionsResult
from ._models_py3 import ListP2SVpnGatewaysResult
from ._models_py3 import ListVirtualHubsResult
from ._models_py3 import ListVirtualWANsResult
from ._models_py3 import ListVpnConnectionsResult
from ._models_py3 import ListVpnGatewaysResult
from ._models_py3 import ListVpnServerConfigurationsResult
from ._models_py3 import ListVpnSiteLinkConnectionsResult
from ._models_py3 import ListVpnSiteLinksResult
from ._models_py3 import ListVpnSitesResult
from ._models_py3 import LoadBalancer
from ._models_py3 import LoadBalancerBackendAddressPoolListResult
from ._models_py3 import LoadBalancerFrontendIPConfigurationListResult
from ._models_py3 import LoadBalancerListResult
from ._models_py3 import LoadBalancerLoadBalancingRuleListResult
from ._models_py3 import LoadBalancerOutboundRuleListResult
from ._models_py3 import LoadBalancerProbeListResult
from ._models_py3 import LoadBalancerSku
from ._models_py3 import LoadBalancingRule
from ._models_py3 import LocalNetworkGateway
from ._models_py3 import LocalNetworkGatewayListResult
from ._models_py3 import LogSpecification
from ._models_py3 import ManagedRuleGroupOverride
from ._models_py3 import ManagedRuleOverride
from ._models_py3 import ManagedRuleSet
from ._models_py3 import ManagedRulesDefinition
from ._models_py3 import ManagedServiceIdentity
from ._models_py3 import MatchCondition
from ._models_py3 import MatchVariable
from ._models_py3 import MatchedRule
from ._models_py3 import MetricSpecification
from ._models_py3 import NatGateway
from ._models_py3 import NatGatewayListResult
from ._models_py3 import NatGatewaySku
from ._models_py3 import NetworkConfigurationDiagnosticParameters
from ._models_py3 import NetworkConfigurationDiagnosticProfile
from ._models_py3 import NetworkConfigurationDiagnosticResponse
from ._models_py3 import NetworkConfigurationDiagnosticResult
from ._models_py3 import NetworkIntentPolicy
from ._models_py3 import NetworkIntentPolicyConfiguration
from ._models_py3 import NetworkInterface
from ._models_py3 import NetworkInterfaceAssociation
from ._models_py3 import NetworkInterfaceDnsSettings
from ._models_py3 import NetworkInterfaceIPConfiguration
from ._models_py3 import NetworkInterfaceIPConfigurationListResult
from ._models_py3 import NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties
from ._models_py3 import NetworkInterfaceListResult
from ._models_py3 import NetworkInterfaceLoadBalancerListResult
from ._models_py3 import NetworkInterfaceTapConfiguration
from ._models_py3 import NetworkInterfaceTapConfigurationListResult
from ._models_py3 import NetworkProfile
from ._models_py3 import NetworkProfileListResult
from ._models_py3 import NetworkRuleCondition
from ._models_py3 import NetworkSecurityGroup
from ._models_py3 import NetworkSecurityGroupListResult
from ._models_py3 import NetworkSecurityGroupResult
from ._models_py3 import NetworkSecurityRulesEvaluationResult
from ._models_py3 import NetworkWatcher
from ._models_py3 import NetworkWatcherListResult
from ._models_py3 import NextHopParameters
from ._models_py3 import NextHopResult
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OperationPropertiesFormatServiceSpecification
from ._models_py3 import OutboundRule
from ._models_py3 import OwaspCrsExclusionEntry
from ._models_py3 import P2SConnectionConfiguration
from ._models_py3 import P2SVpnConnectionHealth
from ._models_py3 import P2SVpnConnectionHealthRequest
from ._models_py3 import P2SVpnGateway
from ._models_py3 import P2SVpnProfileParameters
from ._models_py3 import PacketCapture
from ._models_py3 import PacketCaptureFilter
from ._models_py3 import PacketCaptureListResult
from ._models_py3 import PacketCaptureParameters
from ._models_py3 import PacketCaptureQueryStatusResult
from ._models_py3 import PacketCaptureResult
from ._models_py3 import PacketCaptureResultProperties
from ._models_py3 import PacketCaptureStorageLocation
from ._models_py3 import PatchRouteFilter
from ._models_py3 import PatchRouteFilterRule
from ._models_py3 import PeerExpressRouteCircuitConnection
from ._models_py3 import PeerExpressRouteCircuitConnectionListResult
from ._models_py3 import PolicySettings
from ._models_py3 import PrepareNetworkPoliciesRequest
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointListResult
from ._models_py3 import PrivateLinkService
from ._models_py3 import PrivateLinkServiceConnection
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import PrivateLinkServiceIpConfiguration
from ._models_py3 import PrivateLinkServiceListResult
from ._models_py3 import PrivateLinkServicePropertiesAutoApproval
from ._models_py3 import PrivateLinkServicePropertiesVisibility
from ._models_py3 import PrivateLinkServiceVisibility
from ._models_py3 import Probe
from ._models_py3 import ProtocolConfiguration
from ._models_py3 import ProtocolCustomSettingsFormat
from ._models_py3 import PublicIPAddress
from ._models_py3 import PublicIPAddressDnsSettings
from ._models_py3 import PublicIPAddressListResult
from ._models_py3 import PublicIPAddressSku
from ._models_py3 import PublicIPPrefix
from ._models_py3 import PublicIPPrefixListResult
from ._models_py3 import PublicIPPrefixSku
from ._models_py3 import QueryTroubleshootingParameters
from ._models_py3 import ReferencedPublicIpAddress
from ._models_py3 import Resource
from ._models_py3 import ResourceNavigationLink
from ._models_py3 import ResourceNavigationLinksListResult
from ._models_py3 import ResourceSet
from ._models_py3 import RetentionPolicyParameters
from ._models_py3 import Route
from ._models_py3 import RouteFilter
from ._models_py3 import RouteFilterListResult
from ._models_py3 import RouteFilterRule
from ._models_py3 import RouteFilterRuleListResult
from ._models_py3 import RouteListResult
from ._models_py3 import RouteTable
from ._models_py3 import RouteTableListResult
from ._models_py3 import SecurityGroupNetworkInterface
from ._models_py3 import SecurityGroupViewParameters
from ._models_py3 import SecurityGroupViewResult
from ._models_py3 import SecurityRule
from ._models_py3 import SecurityRuleAssociations
from ._models_py3 import SecurityRuleListResult
from ._models_py3 import ServiceAssociationLink
from ._models_py3 import ServiceAssociationLinksListResult
from ._models_py3 import ServiceEndpointPolicy
from ._models_py3 import ServiceEndpointPolicyDefinition
from ._models_py3 import ServiceEndpointPolicyDefinitionListResult
from ._models_py3 import ServiceEndpointPolicyListResult
from ._models_py3 import ServiceEndpointPropertiesFormat
from ._models_py3 import ServiceTagInformation
from ._models_py3 import ServiceTagInformationPropertiesFormat
from ._models_py3 import ServiceTagsListResult
from ._models_py3 import SubResource
from ._models_py3 import Subnet
from ._models_py3 import SubnetAssociation
from ._models_py3 import SubnetListResult
from ._models_py3 import TagsObject
from ._models_py3 import Topology
from ._models_py3 import TopologyAssociation
from ._models_py3 import TopologyParameters
from ._models_py3 import TopologyResource
from ._models_py3 import TrafficAnalyticsConfigurationProperties
from ._models_py3 import TrafficAnalyticsProperties
from ._models_py3 import TrafficSelectorPolicy
from ._models_py3 import TroubleshootingDetails
from ._models_py3 import TroubleshootingParameters
from ._models_py3 import TroubleshootingRecommendedActions
from ._models_py3 import TroubleshootingResult
from ._models_py3 import TunnelConnectionHealth
from ._models_py3 import UnprepareNetworkPoliciesRequest
from ._models_py3 import Usage
from ._models_py3 import UsageName
from ._models_py3 import UsagesListResult
from ._models_py3 import VerificationIPFlowParameters
from ._models_py3 import VerificationIPFlowResult
from ._models_py3 import VirtualHub
from ._models_py3 import VirtualHubId
from ._models_py3 import VirtualHubRoute
from ._models_py3 import VirtualHubRouteTable
from ._models_py3 import VirtualNetwork
from ._models_py3 import VirtualNetworkBgpCommunities
from ._models_py3 import VirtualNetworkConnectionGatewayReference
from ._models_py3 import VirtualNetworkGateway
from ._models_py3 import VirtualNetworkGatewayConnection
from ._models_py3 import VirtualNetworkGatewayConnectionListEntity
from ._models_py3 import VirtualNetworkGatewayConnectionListResult
from ._models_py3 import VirtualNetworkGatewayIPConfiguration
from ._models_py3 import VirtualNetworkGatewayListConnectionsResult
from ._models_py3 import VirtualNetworkGatewayListResult
from ._models_py3 import VirtualNetworkGatewaySku
from ._models_py3 import VirtualNetworkListResult
from ._models_py3 import VirtualNetworkListUsageResult
from ._models_py3 import VirtualNetworkPeering
from ._models_py3 import VirtualNetworkPeeringListResult
from ._models_py3 import VirtualNetworkTap
from ._models_py3 import VirtualNetworkTapListResult
from ._models_py3 import VirtualNetworkUsage
from ._models_py3 import VirtualNetworkUsageName
from ._models_py3 import VirtualRouter
from ._models_py3 import VirtualRouterListResult
from ._models_py3 import VirtualRouterPeering
from ._models_py3 import VirtualRouterPeeringListResult
from ._models_py3 import VirtualWAN
from ._models_py3 import VirtualWanSecurityProvider
from ._models_py3 import VirtualWanSecurityProviders
from ._models_py3 import VirtualWanVpnProfileParameters
from ._models_py3 import VpnClientConfiguration
from ._models_py3 import VpnClientConnectionHealth
from ._models_py3 import VpnClientConnectionHealthDetail
from ._models_py3 import VpnClientConnectionHealthDetailListResult
from ._models_py3 import VpnClientIPsecParameters
from ._models_py3 import VpnClientParameters
from ._models_py3 import VpnClientRevokedCertificate
from ._models_py3 import VpnClientRootCertificate
from ._models_py3 import VpnConnection
from ._models_py3 import VpnDeviceScriptParameters
from ._models_py3 import VpnGateway
from ._models_py3 import VpnLinkBgpSettings
from ._models_py3 import VpnLinkProviderProperties
from ._models_py3 import VpnPacketCaptureStartParameters
from ._models_py3 import VpnPacketCaptureStopParameters
from ._models_py3 import VpnProfileResponse
from ._models_py3 import VpnServerConfigRadiusClientRootCertificate
from ._models_py3 import VpnServerConfigRadiusServerRootCertificate
from ._models_py3 import VpnServerConfigVpnClientRevokedCertificate
from ._models_py3 import VpnServerConfigVpnClientRootCertificate
from ._models_py3 import VpnServerConfiguration
from ._models_py3 import VpnServerConfigurationsResponse
from ._models_py3 import VpnSite
from ._models_py3 import VpnSiteId
from ._models_py3 import VpnSiteLink
from ._models_py3 import VpnSiteLinkConnection
from ._models_py3 import WebApplicationFirewallCustomRule
from ._models_py3 import WebApplicationFirewallPolicy
from ._models_py3 import WebApplicationFirewallPolicyListResult
from ._network_management_client_enums import Access
from ._network_management_client_enums import ApplicationGatewayBackendHealthServerHealth
from ._network_management_client_enums import ApplicationGatewayCookieBasedAffinity
from ._network_management_client_enums import ApplicationGatewayCustomErrorStatusCode
from ._network_management_client_enums import ApplicationGatewayFirewallMode
from ._network_management_client_enums import ApplicationGatewayOperationalState
from ._network_management_client_enums import ApplicationGatewayProtocol
from ._network_management_client_enums import ApplicationGatewayRedirectType
from ._network_management_client_enums import ApplicationGatewayRequestRoutingRuleType
from ._network_management_client_enums import ApplicationGatewaySkuName
from ._network_management_client_enums import ApplicationGatewaySslCipherSuite
from ._network_management_client_enums import ApplicationGatewaySslPolicyName
from ._network_management_client_enums import ApplicationGatewaySslPolicyType
from ._network_management_client_enums import ApplicationGatewaySslProtocol
from ._network_management_client_enums import ApplicationGatewayTier
from ._network_management_client_enums import AssociationType
from ._network_management_client_enums import AuthenticationMethod
from ._network_management_client_enums import AuthorizationUseStatus
from ._network_management_client_enums import AzureFirewallApplicationRuleProtocolType
from ._network_management_client_enums import AzureFirewallNatRCActionType
from ._network_management_client_enums import AzureFirewallNetworkRuleProtocol
from ._network_management_client_enums import AzureFirewallRCActionType
from ._network_management_client_enums import AzureFirewallSkuName
from ._network_management_client_enums import AzureFirewallSkuTier
from ._network_management_client_enums import AzureFirewallThreatIntelMode
from ._network_management_client_enums import BgpPeerState
from ._network_management_client_enums import CircuitConnectionStatus
from ._network_management_client_enums import ConnectionMonitorSourceStatus
from ._network_management_client_enums import ConnectionState
from ._network_management_client_enums import ConnectionStatus
from ._network_management_client_enums import DdosCustomPolicyProtocol
from ._network_management_client_enums import DdosCustomPolicyTriggerSensitivityOverride
from ._network_management_client_enums import DdosSettingsProtectionCoverage
from ._network_management_client_enums import DhGroup
from ._network_management_client_enums import Direction
from ._network_management_client_enums import EffectiveRouteSource
from ._network_management_client_enums import EffectiveRouteState
from ._network_management_client_enums import EffectiveSecurityRuleProtocol
from ._network_management_client_enums import EvaluationState
from ._network_management_client_enums import ExpressRouteCircuitPeeringAdvertisedPublicPrefixState
from ._network_management_client_enums import ExpressRouteCircuitPeeringState
from ._network_management_client_enums import ExpressRouteCircuitSkuFamily
from ._network_management_client_enums import ExpressRouteCircuitSkuTier
from ._network_management_client_enums import ExpressRouteLinkAdminState
from ._network_management_client_enums import ExpressRouteLinkConnectorType
from ._network_management_client_enums import ExpressRouteLinkMacSecCipher
from ._network_management_client_enums import ExpressRoutePeeringState
from ._network_management_client_enums import ExpressRoutePeeringType
from ._network_management_client_enums import ExpressRoutePortsEncapsulation
from ._network_management_client_enums import FirewallPolicyFilterRuleActionType
from ._network_management_client_enums import FirewallPolicyNatRuleActionType
from ._network_management_client_enums import FirewallPolicyRuleConditionApplicationProtocolType
from ._network_management_client_enums import FirewallPolicyRuleConditionNetworkProtocol
from ._network_management_client_enums import FirewallPolicyRuleConditionType
from ._network_management_client_enums import FirewallPolicyRuleType
from ._network_management_client_enums import FlowLogFormatType
from ._network_management_client_enums import HTTPMethod
from ._network_management_client_enums import HubVirtualNetworkConnectionStatus
from ._network_management_client_enums import IPAllocationMethod
from ._network_management_client_enums import IPVersion
from ._network_management_client_enums import IkeEncryption
from ._network_management_client_enums import IkeIntegrity
from ._network_management_client_enums import IpFlowProtocol
from ._network_management_client_enums import IpsecEncryption
from ._network_management_client_enums import IpsecIntegrity
from ._network_management_client_enums import IssueType
from ._network_management_client_enums import LoadBalancerOutboundRuleProtocol
from ._network_management_client_enums import LoadBalancerSkuName
from ._network_management_client_enums import LoadDistribution
from ._network_management_client_enums import ManagedRuleEnabledState
from ._network_management_client_enums import NatGatewaySkuName
from ._network_management_client_enums import NetworkOperationStatus
from ._network_management_client_enums import NextHopType
from ._network_management_client_enums import OfficeTrafficCategory
from ._network_management_client_enums import Origin
from ._network_management_client_enums import OwaspCrsExclusionEntryMatchVariable
from ._network_management_client_enums import OwaspCrsExclusionEntrySelectorMatchOperator
from ._network_management_client_enums import PcError
from ._network_management_client_enums import PcProtocol
from ._network_management_client_enums import PcStatus
from ._network_management_client_enums import PfsGroup
from ._network_management_client_enums import ProbeProtocol
from ._network_management_client_enums import ProcessorArchitecture
from ._network_management_client_enums import Protocol
from ._network_management_client_enums import ProvisioningState
from ._network_management_client_enums import PublicIPAddressSkuName
from ._network_management_client_enums import PublicIPPrefixSkuName
from ._network_management_client_enums import ResourceIdentityType
from ._network_management_client_enums import RouteFilterRuleType
from ._network_management_client_enums import RouteNextHopType
from ._network_management_client_enums import SecurityRuleAccess
from ._network_management_client_enums import SecurityRuleDirection
from ._network_management_client_enums import SecurityRuleProtocol
from ._network_management_client_enums import ServiceProviderProvisioningState
from ._network_management_client_enums import Severity
from ._network_management_client_enums import TransportProtocol
from ._network_management_client_enums import TunnelConnectionStatus
from ._network_management_client_enums import UsageUnit
from ._network_management_client_enums import VerbosityLevel
from ._network_management_client_enums import VirtualNetworkGatewayConnectionProtocol
from ._network_management_client_enums import VirtualNetworkGatewayConnectionStatus
from ._network_management_client_enums import VirtualNetworkGatewayConnectionType
from ._network_management_client_enums import VirtualNetworkGatewaySkuName
from ._network_management_client_enums import VirtualNetworkGatewaySkuTier
from ._network_management_client_enums import VirtualNetworkGatewayType
from ._network_management_client_enums import VirtualNetworkPeeringState
from ._network_management_client_enums import VirtualWanSecurityProviderType
from ._network_management_client_enums import VpnAuthenticationType
from ._network_management_client_enums import VpnClientProtocol
from ._network_management_client_enums import VpnConnectionStatus
from ._network_management_client_enums import VpnGatewayGeneration
from ._network_management_client_enums import VpnGatewayTunnelingProtocol
from ._network_management_client_enums import VpnType
from ._network_management_client_enums import WebApplicationFirewallAction
from ._network_management_client_enums import WebApplicationFirewallEnabledState
from ._network_management_client_enums import WebApplicationFirewallMatchVariable
from ._network_management_client_enums import WebApplicationFirewallMode
from ._network_management_client_enums import WebApplicationFirewallOperator
from ._network_management_client_enums import WebApplicationFirewallPolicyResourceState
from ._network_management_client_enums import WebApplicationFirewallRuleType
from ._network_management_client_enums import WebApplicationFirewallTransform
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AadAuthenticationParameters",
"AddressSpace",
"ApplicationGateway",
"ApplicationGatewayAuthenticationCertificate",
"ApplicationGatewayAutoscaleConfiguration",
"ApplicationGatewayAvailableSslOptions",
"ApplicationGatewayAvailableSslPredefinedPolicies",
"ApplicationGatewayAvailableWafRuleSetsResult",
"ApplicationGatewayBackendAddress",
"ApplicationGatewayBackendAddressPool",
"ApplicationGatewayBackendHealth",
"ApplicationGatewayBackendHealthHttpSettings",
"ApplicationGatewayBackendHealthOnDemand",
"ApplicationGatewayBackendHealthPool",
"ApplicationGatewayBackendHealthServer",
"ApplicationGatewayBackendHttpSettings",
"ApplicationGatewayConnectionDraining",
"ApplicationGatewayCustomError",
"ApplicationGatewayFirewallDisabledRuleGroup",
"ApplicationGatewayFirewallExclusion",
"ApplicationGatewayFirewallRule",
"ApplicationGatewayFirewallRuleGroup",
"ApplicationGatewayFirewallRuleSet",
"ApplicationGatewayFrontendIPConfiguration",
"ApplicationGatewayFrontendPort",
"ApplicationGatewayHeaderConfiguration",
"ApplicationGatewayHttpListener",
"ApplicationGatewayIPConfiguration",
"ApplicationGatewayListResult",
"ApplicationGatewayOnDemandProbe",
"ApplicationGatewayPathRule",
"ApplicationGatewayProbe",
"ApplicationGatewayProbeHealthResponseMatch",
"ApplicationGatewayRedirectConfiguration",
"ApplicationGatewayRequestRoutingRule",
"ApplicationGatewayRewriteRule",
"ApplicationGatewayRewriteRuleActionSet",
"ApplicationGatewayRewriteRuleCondition",
"ApplicationGatewayRewriteRuleSet",
"ApplicationGatewaySku",
"ApplicationGatewaySslCertificate",
"ApplicationGatewaySslPolicy",
"ApplicationGatewaySslPredefinedPolicy",
"ApplicationGatewayTrustedRootCertificate",
"ApplicationGatewayUrlPathMap",
"ApplicationGatewayWebApplicationFirewallConfiguration",
"ApplicationRuleCondition",
"ApplicationSecurityGroup",
"ApplicationSecurityGroupListResult",
"AuthorizationListResult",
"AutoApprovedPrivateLinkService",
"AutoApprovedPrivateLinkServicesResult",
"Availability",
"AvailableDelegation",
"AvailableDelegationsResult",
"AvailablePrivateEndpointType",
"AvailablePrivateEndpointTypesResult",
"AvailableProvidersList",
"AvailableProvidersListCity",
"AvailableProvidersListCountry",
"AvailableProvidersListParameters",
"AvailableProvidersListState",
"AvailableServiceAlias",
"AvailableServiceAliasesResult",
"AzureAsyncOperationResult",
"AzureFirewall",
"AzureFirewallApplicationRule",
"AzureFirewallApplicationRuleCollection",
"AzureFirewallApplicationRuleProtocol",
"AzureFirewallFqdnTag",
"AzureFirewallFqdnTagListResult",
"AzureFirewallIPConfiguration",
"AzureFirewallListResult",
"AzureFirewallNatRCAction",
"AzureFirewallNatRule",
"AzureFirewallNatRuleCollection",
"AzureFirewallNetworkRule",
"AzureFirewallNetworkRuleCollection",
"AzureFirewallPublicIPAddress",
"AzureFirewallRCAction",
"AzureFirewallSku",
"AzureReachabilityReport",
"AzureReachabilityReportItem",
"AzureReachabilityReportLatencyInfo",
"AzureReachabilityReportLocation",
"AzureReachabilityReportParameters",
"BGPCommunity",
"BackendAddressPool",
"BastionHost",
"BastionHostIPConfiguration",
"BastionHostListResult",
"BgpPeerStatus",
"BgpPeerStatusListResult",
"BgpServiceCommunity",
"BgpServiceCommunityListResult",
"BgpSettings",
"CheckPrivateLinkServiceVisibilityRequest",
"CloudErrorBody",
"Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties",
"ConnectionMonitor",
"ConnectionMonitorDestination",
"ConnectionMonitorListResult",
"ConnectionMonitorParameters",
"ConnectionMonitorQueryResult",
"ConnectionMonitorResult",
"ConnectionMonitorResultProperties",
"ConnectionMonitorSource",
"ConnectionResetSharedKey",
"ConnectionSharedKey",
"ConnectionStateSnapshot",
"ConnectivityDestination",
"ConnectivityHop",
"ConnectivityInformation",
"ConnectivityIssue",
"ConnectivityParameters",
"ConnectivitySource",
"Container",
"ContainerNetworkInterface",
"ContainerNetworkInterfaceConfiguration",
"ContainerNetworkInterfaceIpConfiguration",
"DdosCustomPolicy",
"DdosProtectionPlan",
"DdosProtectionPlanListResult",
"DdosSettings",
"Delegation",
"DeviceProperties",
"DhcpOptions",
"Dimension",
"DnsNameAvailabilityResult",
"EffectiveNetworkSecurityGroup",
"EffectiveNetworkSecurityGroupAssociation",
"EffectiveNetworkSecurityGroupListResult",
"EffectiveNetworkSecurityRule",
"EffectiveRoute",
"EffectiveRouteListResult",
"EndpointServiceResult",
"EndpointServicesListResult",
"Error",
"ErrorDetails",
"ErrorResponse",
"EvaluatedNetworkSecurityGroup",
"ExpressRouteCircuit",
"ExpressRouteCircuitArpTable",
"ExpressRouteCircuitAuthorization",
"ExpressRouteCircuitConnection",
"ExpressRouteCircuitConnectionListResult",
"ExpressRouteCircuitListResult",
"ExpressRouteCircuitPeering",
"ExpressRouteCircuitPeeringConfig",
"ExpressRouteCircuitPeeringId",
"ExpressRouteCircuitPeeringListResult",
"ExpressRouteCircuitReference",
"ExpressRouteCircuitRoutesTable",
"ExpressRouteCircuitRoutesTableSummary",
"ExpressRouteCircuitServiceProviderProperties",
"ExpressRouteCircuitSku",
"ExpressRouteCircuitStats",
"ExpressRouteCircuitsArpTableListResult",
"ExpressRouteCircuitsRoutesTableListResult",
"ExpressRouteCircuitsRoutesTableSummaryListResult",
"ExpressRouteConnection",
"ExpressRouteConnectionId",
"ExpressRouteConnectionList",
"ExpressRouteCrossConnection",
"ExpressRouteCrossConnectionListResult",
"ExpressRouteCrossConnectionPeering",
"ExpressRouteCrossConnectionPeeringList",
"ExpressRouteCrossConnectionRoutesTableSummary",
"ExpressRouteCrossConnectionsRoutesTableSummaryListResult",
"ExpressRouteGateway",
"ExpressRouteGatewayList",
"ExpressRouteGatewayPropertiesAutoScaleConfiguration",
"ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds",
"ExpressRouteLink",
"ExpressRouteLinkListResult",
"ExpressRouteLinkMacSecConfig",
"ExpressRoutePort",
"ExpressRoutePortListResult",
"ExpressRoutePortsLocation",
"ExpressRoutePortsLocationBandwidths",
"ExpressRoutePortsLocationListResult",
"ExpressRouteServiceProvider",
"ExpressRouteServiceProviderBandwidthsOffered",
"ExpressRouteServiceProviderListResult",
"FirewallPolicy",
"FirewallPolicyFilterRule",
"FirewallPolicyFilterRuleAction",
"FirewallPolicyListResult",
"FirewallPolicyNatRule",
"FirewallPolicyNatRuleAction",
"FirewallPolicyRule",
"FirewallPolicyRuleCondition",
"FirewallPolicyRuleConditionApplicationProtocol",
"FirewallPolicyRuleGroup",
"FirewallPolicyRuleGroupListResult",
"FlowLogFormatParameters",
"FlowLogInformation",
"FlowLogStatusParameters",
"FrontendIPConfiguration",
"GatewayRoute",
"GatewayRouteListResult",
"GetVpnSitesConfigurationRequest",
"HTTPConfiguration",
"HTTPHeader",
"HubIPAddresses",
"HubVirtualNetworkConnection",
"IPAddressAvailabilityResult",
"IPConfiguration",
"IPConfigurationProfile",
"InboundNatPool",
"InboundNatRule",
"InboundNatRuleListResult",
"IpTag",
"IpsecPolicy",
"Ipv6ExpressRouteCircuitPeeringConfig",
"ListHubVirtualNetworkConnectionsResult",
"ListP2SVpnGatewaysResult",
"ListVirtualHubsResult",
"ListVirtualWANsResult",
"ListVpnConnectionsResult",
"ListVpnGatewaysResult",
"ListVpnServerConfigurationsResult",
"ListVpnSiteLinkConnectionsResult",
"ListVpnSiteLinksResult",
"ListVpnSitesResult",
"LoadBalancer",
"LoadBalancerBackendAddressPoolListResult",
"LoadBalancerFrontendIPConfigurationListResult",
"LoadBalancerListResult",
"LoadBalancerLoadBalancingRuleListResult",
"LoadBalancerOutboundRuleListResult",
"LoadBalancerProbeListResult",
"LoadBalancerSku",
"LoadBalancingRule",
"LocalNetworkGateway",
"LocalNetworkGatewayListResult",
"LogSpecification",
"ManagedRuleGroupOverride",
"ManagedRuleOverride",
"ManagedRuleSet",
"ManagedRulesDefinition",
"ManagedServiceIdentity",
"MatchCondition",
"MatchVariable",
"MatchedRule",
"MetricSpecification",
"NatGateway",
"NatGatewayListResult",
"NatGatewaySku",
"NetworkConfigurationDiagnosticParameters",
"NetworkConfigurationDiagnosticProfile",
"NetworkConfigurationDiagnosticResponse",
"NetworkConfigurationDiagnosticResult",
"NetworkIntentPolicy",
"NetworkIntentPolicyConfiguration",
"NetworkInterface",
"NetworkInterfaceAssociation",
"NetworkInterfaceDnsSettings",
"NetworkInterfaceIPConfiguration",
"NetworkInterfaceIPConfigurationListResult",
"NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties",
"NetworkInterfaceListResult",
"NetworkInterfaceLoadBalancerListResult",
"NetworkInterfaceTapConfiguration",
"NetworkInterfaceTapConfigurationListResult",
"NetworkProfile",
"NetworkProfileListResult",
"NetworkRuleCondition",
"NetworkSecurityGroup",
"NetworkSecurityGroupListResult",
"NetworkSecurityGroupResult",
"NetworkSecurityRulesEvaluationResult",
"NetworkWatcher",
"NetworkWatcherListResult",
"NextHopParameters",
"NextHopResult",
"Operation",
"OperationDisplay",
"OperationListResult",
"OperationPropertiesFormatServiceSpecification",
"OutboundRule",
"OwaspCrsExclusionEntry",
"P2SConnectionConfiguration",
"P2SVpnConnectionHealth",
"P2SVpnConnectionHealthRequest",
"P2SVpnGateway",
"P2SVpnProfileParameters",
"PacketCapture",
"PacketCaptureFilter",
"PacketCaptureListResult",
"PacketCaptureParameters",
"PacketCaptureQueryStatusResult",
"PacketCaptureResult",
"PacketCaptureResultProperties",
"PacketCaptureStorageLocation",
"PatchRouteFilter",
"PatchRouteFilterRule",
"PeerExpressRouteCircuitConnection",
"PeerExpressRouteCircuitConnectionListResult",
"PolicySettings",
"PrepareNetworkPoliciesRequest",
"PrivateEndpoint",
"PrivateEndpointConnection",
"PrivateEndpointListResult",
"PrivateLinkService",
"PrivateLinkServiceConnection",
"PrivateLinkServiceConnectionState",
"PrivateLinkServiceIpConfiguration",
"PrivateLinkServiceListResult",
"PrivateLinkServicePropertiesAutoApproval",
"PrivateLinkServicePropertiesVisibility",
"PrivateLinkServiceVisibility",
"Probe",
"ProtocolConfiguration",
"ProtocolCustomSettingsFormat",
"PublicIPAddress",
"PublicIPAddressDnsSettings",
"PublicIPAddressListResult",
"PublicIPAddressSku",
"PublicIPPrefix",
"PublicIPPrefixListResult",
"PublicIPPrefixSku",
"QueryTroubleshootingParameters",
"ReferencedPublicIpAddress",
"Resource",
"ResourceNavigationLink",
"ResourceNavigationLinksListResult",
"ResourceSet",
"RetentionPolicyParameters",
"Route",
"RouteFilter",
"RouteFilterListResult",
"RouteFilterRule",
"RouteFilterRuleListResult",
"RouteListResult",
"RouteTable",
"RouteTableListResult",
"SecurityGroupNetworkInterface",
"SecurityGroupViewParameters",
"SecurityGroupViewResult",
"SecurityRule",
"SecurityRuleAssociations",
"SecurityRuleListResult",
"ServiceAssociationLink",
"ServiceAssociationLinksListResult",
"ServiceEndpointPolicy",
"ServiceEndpointPolicyDefinition",
"ServiceEndpointPolicyDefinitionListResult",
"ServiceEndpointPolicyListResult",
"ServiceEndpointPropertiesFormat",
"ServiceTagInformation",
"ServiceTagInformationPropertiesFormat",
"ServiceTagsListResult",
"SubResource",
"Subnet",
"SubnetAssociation",
"SubnetListResult",
"TagsObject",
"Topology",
"TopologyAssociation",
"TopologyParameters",
"TopologyResource",
"TrafficAnalyticsConfigurationProperties",
"TrafficAnalyticsProperties",
"TrafficSelectorPolicy",
"TroubleshootingDetails",
"TroubleshootingParameters",
"TroubleshootingRecommendedActions",
"TroubleshootingResult",
"TunnelConnectionHealth",
"UnprepareNetworkPoliciesRequest",
"Usage",
"UsageName",
"UsagesListResult",
"VerificationIPFlowParameters",
"VerificationIPFlowResult",
"VirtualHub",
"VirtualHubId",
"VirtualHubRoute",
"VirtualHubRouteTable",
"VirtualNetwork",
"VirtualNetworkBgpCommunities",
"VirtualNetworkConnectionGatewayReference",
"VirtualNetworkGateway",
"VirtualNetworkGatewayConnection",
"VirtualNetworkGatewayConnectionListEntity",
"VirtualNetworkGatewayConnectionListResult",
"VirtualNetworkGatewayIPConfiguration",
"VirtualNetworkGatewayListConnectionsResult",
"VirtualNetworkGatewayListResult",
"VirtualNetworkGatewaySku",
"VirtualNetworkListResult",
"VirtualNetworkListUsageResult",
"VirtualNetworkPeering",
"VirtualNetworkPeeringListResult",
"VirtualNetworkTap",
"VirtualNetworkTapListResult",
"VirtualNetworkUsage",
"VirtualNetworkUsageName",
"VirtualRouter",
"VirtualRouterListResult",
"VirtualRouterPeering",
"VirtualRouterPeeringListResult",
"VirtualWAN",
"VirtualWanSecurityProvider",
"VirtualWanSecurityProviders",
"VirtualWanVpnProfileParameters",
"VpnClientConfiguration",
"VpnClientConnectionHealth",
"VpnClientConnectionHealthDetail",
"VpnClientConnectionHealthDetailListResult",
"VpnClientIPsecParameters",
"VpnClientParameters",
"VpnClientRevokedCertificate",
"VpnClientRootCertificate",
"VpnConnection",
"VpnDeviceScriptParameters",
"VpnGateway",
"VpnLinkBgpSettings",
"VpnLinkProviderProperties",
"VpnPacketCaptureStartParameters",
"VpnPacketCaptureStopParameters",
"VpnProfileResponse",
"VpnServerConfigRadiusClientRootCertificate",
"VpnServerConfigRadiusServerRootCertificate",
"VpnServerConfigVpnClientRevokedCertificate",
"VpnServerConfigVpnClientRootCertificate",
"VpnServerConfiguration",
"VpnServerConfigurationsResponse",
"VpnSite",
"VpnSiteId",
"VpnSiteLink",
"VpnSiteLinkConnection",
"WebApplicationFirewallCustomRule",
"WebApplicationFirewallPolicy",
"WebApplicationFirewallPolicyListResult",
"Access",
"ApplicationGatewayBackendHealthServerHealth",
"ApplicationGatewayCookieBasedAffinity",
"ApplicationGatewayCustomErrorStatusCode",
"ApplicationGatewayFirewallMode",
"ApplicationGatewayOperationalState",
"ApplicationGatewayProtocol",
"ApplicationGatewayRedirectType",
"ApplicationGatewayRequestRoutingRuleType",
"ApplicationGatewaySkuName",
"ApplicationGatewaySslCipherSuite",
"ApplicationGatewaySslPolicyName",
"ApplicationGatewaySslPolicyType",
"ApplicationGatewaySslProtocol",
"ApplicationGatewayTier",
"AssociationType",
"AuthenticationMethod",
"AuthorizationUseStatus",
"AzureFirewallApplicationRuleProtocolType",
"AzureFirewallNatRCActionType",
"AzureFirewallNetworkRuleProtocol",
"AzureFirewallRCActionType",
"AzureFirewallSkuName",
"AzureFirewallSkuTier",
"AzureFirewallThreatIntelMode",
"BgpPeerState",
"CircuitConnectionStatus",
"ConnectionMonitorSourceStatus",
"ConnectionState",
"ConnectionStatus",
"DdosCustomPolicyProtocol",
"DdosCustomPolicyTriggerSensitivityOverride",
"DdosSettingsProtectionCoverage",
"DhGroup",
"Direction",
"EffectiveRouteSource",
"EffectiveRouteState",
"EffectiveSecurityRuleProtocol",
"EvaluationState",
"ExpressRouteCircuitPeeringAdvertisedPublicPrefixState",
"ExpressRouteCircuitPeeringState",
"ExpressRouteCircuitSkuFamily",
"ExpressRouteCircuitSkuTier",
"ExpressRouteLinkAdminState",
"ExpressRouteLinkConnectorType",
"ExpressRouteLinkMacSecCipher",
"ExpressRoutePeeringState",
"ExpressRoutePeeringType",
"ExpressRoutePortsEncapsulation",
"FirewallPolicyFilterRuleActionType",
"FirewallPolicyNatRuleActionType",
"FirewallPolicyRuleConditionApplicationProtocolType",
"FirewallPolicyRuleConditionNetworkProtocol",
"FirewallPolicyRuleConditionType",
"FirewallPolicyRuleType",
"FlowLogFormatType",
"HTTPMethod",
"HubVirtualNetworkConnectionStatus",
"IPAllocationMethod",
"IPVersion",
"IkeEncryption",
"IkeIntegrity",
"IpFlowProtocol",
"IpsecEncryption",
"IpsecIntegrity",
"IssueType",
"LoadBalancerOutboundRuleProtocol",
"LoadBalancerSkuName",
"LoadDistribution",
"ManagedRuleEnabledState",
"NatGatewaySkuName",
"NetworkOperationStatus",
"NextHopType",
"OfficeTrafficCategory",
"Origin",
"OwaspCrsExclusionEntryMatchVariable",
"OwaspCrsExclusionEntrySelectorMatchOperator",
"PcError",
"PcProtocol",
"PcStatus",
"PfsGroup",
"ProbeProtocol",
"ProcessorArchitecture",
"Protocol",
"ProvisioningState",
"PublicIPAddressSkuName",
"PublicIPPrefixSkuName",
"ResourceIdentityType",
"RouteFilterRuleType",
"RouteNextHopType",
"SecurityRuleAccess",
"SecurityRuleDirection",
"SecurityRuleProtocol",
"ServiceProviderProvisioningState",
"Severity",
"TransportProtocol",
"TunnelConnectionStatus",
"UsageUnit",
"VerbosityLevel",
"VirtualNetworkGatewayConnectionProtocol",
"VirtualNetworkGatewayConnectionStatus",
"VirtualNetworkGatewayConnectionType",
"VirtualNetworkGatewaySkuName",
"VirtualNetworkGatewaySkuTier",
"VirtualNetworkGatewayType",
"VirtualNetworkPeeringState",
"VirtualWanSecurityProviderType",
"VpnAuthenticationType",
"VpnClientProtocol",
"VpnConnectionStatus",
"VpnGatewayGeneration",
"VpnGatewayTunnelingProtocol",
"VpnType",
"WebApplicationFirewallAction",
"WebApplicationFirewallEnabledState",
"WebApplicationFirewallMatchVariable",
"WebApplicationFirewallMode",
"WebApplicationFirewallOperator",
"WebApplicationFirewallPolicyResourceState",
"WebApplicationFirewallRuleType",
"WebApplicationFirewallTransform",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
{
"content_hash": "f70a5bb7d1f8fb27a360583deeb4cf35",
"timestamp": "",
"source": "github",
"line_count": 1125,
"max_line_length": 105,
"avg_line_length": 44.448,
"alnum_prop": 0.826113910887129,
"repo_name": "Azure/azure-sdk-for-python",
"id": "dc066145698c5cdccaf27f356a525828fb47686f",
"size": "50472",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('iiits', '0059_auto_20160717_0609'),
]
operations = [
migrations.AlterField(
model_name='notice',
name='valid_until',
field=models.DateTimeField(default=datetime.datetime(2016, 7, 24, 6, 14, 48, 161315, tzinfo=utc)),
),
migrations.AlterField(
model_name='topstory',
name='title',
field=models.CharField(max_length=255),
),
]
|
{
"content_hash": "a78458ab3954aba32faa816b8578db15",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 110,
"avg_line_length": 26,
"alnum_prop": 0.6030769230769231,
"repo_name": "IIITS/iiits.ac.in",
"id": "79094171d897f57a2a4f0df5c1978a00d2070601",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iiits/migrations/0060_auto_20160717_0614.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "570565"
},
{
"name": "HTML",
"bytes": "161340"
},
{
"name": "JavaScript",
"bytes": "1819248"
},
{
"name": "Python",
"bytes": "234433"
}
],
"symlink_target": ""
}
|
import clr
clr.AddReferenceByPartialName("PresentationCore")
clr.AddReferenceByPartialName("PresentationFramework")
clr.AddReferenceByPartialName("WindowsBase")
clr.AddReferenceByPartialName("IronPython")
clr.AddReferenceByPartialName("Microsoft.Scripting")
from math import *
from System import *
from System.Windows import *
from System.Windows.Media import *
from System.Windows.Media.Animation import *
from System.Windows.Controls import *
from System.Windows.Shapes import *
from System.Threading import *
from System.Windows.Threading import *
import IronPython
are = AutoResetEvent(False)
def CallBack(f, p = DispatcherPriority.Normal):
Application.Current.Dispatcher.BeginInvoke(p, IronPython.Runtime.CallTarget0(f))
def CallBack1(f, p0, p = DispatcherPriority.Normal):
Application.Current.Dispatcher.BeginInvoke(p, IronPython.Runtime.CallTarget1(f), p0)
def on_startup(*args):
global dispatcher
dispatcher = Dispatcher.FromThread(t)
are.Set()
def start():
try:
global app
app = Application()
app.Startup += on_startup
app.Run()
finally:
clr.SetCommandDispatcher(None)
t = Thread(ThreadStart(start))
t.IsBackground = True
t.ApartmentState = ApartmentState.STA
t.Start()
are.WaitOne()
def DispatchConsoleCommand(consoleCommand):
if consoleCommand:
dispatcher.Invoke(DispatcherPriority.Normal, consoleCommand)
clr.SetCommandDispatcher(DispatchConsoleCommand)
def LoadXaml(filename):
from System.IO import *
from System.Windows.Markup import XamlReader
f = FileStream(filename, FileMode.Open, FileAccess.Read)
try:
element = XamlReader.Load(f)
finally:
f.Close()
return element
def SetScript(e,s):
from Pythalon import PythonScript
e.SetValue(PythonScript.ScriptProperty, s)
def SaveXaml(filename, element):
from System.Windows.Markup import XamlWriter
s = XamlWriter.Save(element)
try:
f = open(filename, "w")
f.write(s)
finally:
f.close()
def Walk(tree):
yield tree
if hasattr(tree, 'Children'):
for child in tree.Children:
for x in Walk(child):
yield x
elif hasattr(tree, 'Child'):
for x in Walk(tree.Child):
yield x
elif hasattr(tree, 'Content'):
for x in Walk(tree.Content):
yield x
def LoadNames(tree, namespace):
for node in Walk(tree):
if hasattr(node, 'Name'):
namespace[node.Name] = node
|
{
"content_hash": "49a3f4d1825860f8ccf5d3df48d48621",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 88,
"avg_line_length": 26.177083333333332,
"alnum_prop": 0.6975726223637088,
"repo_name": "jstammers/EDMSuite",
"id": "12b6d9165e1405e19568186f934dacd0bfea4f06",
"size": "3243",
"binary": false,
"copies": "1",
"ref": "refs/heads/atom-mega-mix",
"path": "NavPython/IronPython/Tutorial/avalon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3461"
},
{
"name": "C#",
"bytes": "6585392"
},
{
"name": "CSS",
"bytes": "5394"
},
{
"name": "F#",
"bytes": "1632"
},
{
"name": "Forth",
"bytes": "790"
},
{
"name": "HTML",
"bytes": "163836"
},
{
"name": "JavaScript",
"bytes": "1060"
},
{
"name": "PowerShell",
"bytes": "70539"
},
{
"name": "Python",
"bytes": "7661598"
},
{
"name": "Ruby",
"bytes": "1067"
},
{
"name": "Visual Basic",
"bytes": "2135"
}
],
"symlink_target": ""
}
|
from ....testing import assert_equal
from ..preprocess import Tensor2ApparentDiffusion
def test_Tensor2ApparentDiffusion_inputs():
input_map = dict(args=dict(argstr='%s',
),
debug=dict(argstr='-debug',
position=1,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
out_filename=dict(argstr='%s',
genfile=True,
position=-1,
),
quiet=dict(argstr='-quiet',
position=1,
),
terminal_output=dict(nohash=True,
),
)
inputs = Tensor2ApparentDiffusion.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Tensor2ApparentDiffusion_outputs():
output_map = dict(ADC=dict(),
)
outputs = Tensor2ApparentDiffusion.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
{
"content_hash": "be39235b2df156b05f738d6ad8e7102b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.6398305084745762,
"repo_name": "iglpdc/nipype",
"id": "c7bd91a6103486080f09c1a527c1721f25a6e316",
"size": "1234",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4458175"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import click
from nctrader import settings
from nctrader.compat import queue
from nctrader.price_parser import PriceParser
from nctrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler
from nctrader.strategy import DisplayStrategy
from nctrader.position_sizer.fixed import FixedPositionSizer
from nctrader.risk_manager.example import ExampleRiskManager
from nctrader.portfolio_handler import PortfolioHandler
from nctrader.compliance.example import ExampleCompliance
from nctrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler
from nctrader.statistics.simple import SimpleStatistics
from nctrader.trading_session.backtest import Backtest
def run(config, testing, tickers, filename, n, n_window):
# Set up variables needed for backtest
events_queue = queue.Queue()
csv_dir = config.CSV_DATA_DIR
initial_equity = PriceParser.parse(500000.00)
# Use Yahoo Daily Price Handler
price_handler = YahooDailyCsvBarPriceHandler(
csv_dir, events_queue, tickers
)
# Use the Display Strategy
strategy = DisplayStrategy(n=n, n_window=n_window)
# Use an example Position Sizer
position_sizer = FixedPositionSizer()
# Use an example Risk Manager
risk_manager = ExampleRiskManager()
# Use the default Portfolio Handler
portfolio_handler = PortfolioHandler(
initial_equity, events_queue, price_handler,
position_sizer, risk_manager
)
# Use the ExampleCompliance component
compliance = ExampleCompliance(config)
# Use a simulated IB Execution Handler
execution_handler = IBSimulatedExecutionHandler(
events_queue, price_handler, compliance
)
# Use the default Statistics
statistics = SimpleStatistics(config, portfolio_handler)
# Set up the backtest
backtest = Backtest(
price_handler, strategy,
portfolio_handler, execution_handler,
position_sizer, risk_manager,
statistics, initial_equity
)
results = backtest.simulate_trading(testing=testing)
statistics.save(filename)
return results
@click.command()
@click.option('--config', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')
@click.option('--testing/--no-testing', default=False, help='Enable testing mode')
@click.option('--tickers', default='SP500TR', help='Tickers (use comma)')
@click.option('--filename', default='', help='Pickle (.pkl) statistics filename')
@click.option('--n', default=10000, help='Display prices every n price events')
@click.option('--n_window', default=5, help='Display n_window prices')
def main(config, testing, tickers, filename, n, n_window):
tickers = tickers.split(",")
config = settings.from_file(config, testing)
run(config, testing, tickers, filename, n, n_window)
if __name__ == "__main__":
main()
|
{
"content_hash": "38be145e6394c82c19358d20b2f5f488",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 91,
"avg_line_length": 35.074074074074076,
"alnum_prop": 0.737064413938754,
"repo_name": "nwillemse/nctrader",
"id": "7f224c154b2e9624c8a98b4fbc71b5b768a7fe36",
"size": "2841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/display_prices_backtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223113"
}
],
"symlink_target": ""
}
|
import os
import optparse
import sys
import m5
from m5.objects import *
m5.util.addToPath('../common')
# --------------------
# Define Command Line Options
# ====================
parser = optparse.OptionParser()
parser.add_option("-d", "--detailed", action="store_true")
parser.add_option("-t", "--timing", action="store_true")
parser.add_option("-m", "--maxtick", type="int")
parser.add_option("-n", "--numcpus",
help="Number of cpus in total", type="int")
parser.add_option("-f", "--frequency",
default = "1GHz",
help="Frequency of each CPU")
parser.add_option("--l1size",
default = "32kB")
parser.add_option("--l1latency",
default = "1ns")
parser.add_option("--l2size",
default = "256kB")
parser.add_option("--l2latency",
default = "10ns")
parser.add_option("--rootdir",
help="Root directory of Splash2",
default="/dist/splash2/codes")
parser.add_option("-b", "--benchmark",
help="Splash 2 benchmark to run")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
if not options.numcpus:
print "Specify the number of cpus with -n"
sys.exit(1)
# --------------------
# Define Splash2 Benchmarks
# ====================
class Cholesky(LiveProcess):
cwd = options.rootdir + '/kernels/cholesky'
executable = options.rootdir + '/kernels/cholesky/CHOLESKY'
cmd = ['CHOLESKY', '-p' + str(options.numcpus),
options.rootdir + '/kernels/cholesky/inputs/tk23.O']
class FFT(LiveProcess):
cwd = options.rootdir + '/kernels/fft'
executable = options.rootdir + '/kernels/fft/FFT'
cmd = ['FFT', '-p', str(options.numcpus), '-m18']
class LU_contig(LiveProcess):
executable = options.rootdir + '/kernels/lu/contiguous_blocks/LU'
cmd = ['LU', '-p', str(options.numcpus)]
cwd = options.rootdir + '/kernels/lu/contiguous_blocks'
class LU_noncontig(LiveProcess):
executable = options.rootdir + '/kernels/lu/non_contiguous_blocks/LU'
cmd = ['LU', '-p', str(options.numcpus)]
cwd = options.rootdir + '/kernels/lu/non_contiguous_blocks'
class Radix(LiveProcess):
executable = options.rootdir + '/kernels/radix/RADIX'
cmd = ['RADIX', '-n524288', '-p', str(options.numcpus)]
cwd = options.rootdir + '/kernels/radix'
class Barnes(LiveProcess):
executable = options.rootdir + '/apps/barnes/BARNES'
cmd = ['BARNES']
input = options.rootdir + '/apps/barnes/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/barnes'
class FMM(LiveProcess):
executable = options.rootdir + '/apps/fmm/FMM'
cmd = ['FMM']
if str(options.numcpus) == '1':
input = options.rootdir + '/apps/fmm/inputs/input.2048'
else:
input = options.rootdir + '/apps/fmm/inputs/input.2048.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/fmm'
class Ocean_contig(LiveProcess):
executable = options.rootdir + '/apps/ocean/contiguous_partitions/OCEAN'
cmd = ['OCEAN', '-p', str(options.numcpus)]
cwd = options.rootdir + '/apps/ocean/contiguous_partitions'
class Ocean_noncontig(LiveProcess):
executable = options.rootdir + '/apps/ocean/non_contiguous_partitions/OCEAN'
cmd = ['OCEAN', '-p', str(options.numcpus)]
cwd = options.rootdir + '/apps/ocean/non_contiguous_partitions'
class Raytrace(LiveProcess):
executable = options.rootdir + '/apps/raytrace/RAYTRACE'
cmd = ['RAYTRACE', '-p' + str(options.numcpus),
options.rootdir + '/apps/raytrace/inputs/teapot.env']
cwd = options.rootdir + '/apps/raytrace'
class Water_nsquared(LiveProcess):
executable = options.rootdir + '/apps/water-nsquared/WATER-NSQUARED'
cmd = ['WATER-NSQUARED']
if options.numcpus==1:
input = options.rootdir + '/apps/water-nsquared/input'
else:
input = options.rootdir + '/apps/water-nsquared/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/water-nsquared'
class Water_spatial(LiveProcess):
executable = options.rootdir + '/apps/water-spatial/WATER-SPATIAL'
cmd = ['WATER-SPATIAL']
if options.numcpus==1:
input = options.rootdir + '/apps/water-spatial/input'
else:
input = options.rootdir + '/apps/water-spatial/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/water-spatial'
# --------------------
# Base L1 Cache Definition
# ====================
class L1(BaseCache):
latency = options.l1latency
block_size = 64
mshrs = 12
tgts_per_mshr = 8
# ----------------------
# Base L2 Cache Definition
# ----------------------
class L2(BaseCache):
block_size = 64
latency = options.l2latency
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
# ----------------------
# Define the cpus
# ----------------------
busFrequency = Frequency(options.frequency)
if options.timing:
cpus = [TimingSimpleCPU(cpu_id = i,
clock=options.frequency)
for i in xrange(options.numcpus)]
elif options.detailed:
cpus = [DerivO3CPU(cpu_id = i,
clock=options.frequency)
for i in xrange(options.numcpus)]
else:
cpus = [AtomicSimpleCPU(cpu_id = i,
clock=options.frequency)
for i in xrange(options.numcpus)]
# ----------------------
# Create a system, and add system wide objects
# ----------------------
system = System(cpu = cpus, physmem = SimpleMemory(),
membus = CoherentBus(clock = busFrequency))
system.clock = '1GHz'
system.toL2bus = CoherentBus(clock = busFrequency)
system.l2 = L2(size = options.l2size, assoc = 8)
# ----------------------
# Connect the L2 cache and memory together
# ----------------------
system.physmem.port = system.membus.master
system.l2.cpu_side = system.toL2bus.master
system.l2.mem_side = system.membus.slave
system.system_port = system.membus.slave
# ----------------------
# Connect the L2 cache and clusters together
# ----------------------
for cpu in cpus:
cpu.addPrivateSplitL1Caches(L1(size = options.l1size, assoc = 1),
L1(size = options.l1size, assoc = 4))
# connect cpu level-1 caches to shared level-2 cache
cpu.connectAllPorts(system.toL2bus, system.membus)
# ----------------------
# Define the root
# ----------------------
root = Root(full_system = False, system = system)
# --------------------
# Pick the correct Splash2 Benchmarks
# ====================
if options.benchmark == 'Cholesky':
root.workload = Cholesky()
elif options.benchmark == 'FFT':
root.workload = FFT()
elif options.benchmark == 'LUContig':
root.workload = LU_contig()
elif options.benchmark == 'LUNoncontig':
root.workload = LU_noncontig()
elif options.benchmark == 'Radix':
root.workload = Radix()
elif options.benchmark == 'Barnes':
root.workload = Barnes()
elif options.benchmark == 'FMM':
root.workload = FMM()
elif options.benchmark == 'OceanContig':
root.workload = Ocean_contig()
elif options.benchmark == 'OceanNoncontig':
root.workload = Ocean_noncontig()
elif options.benchmark == 'Raytrace':
root.workload = Raytrace()
elif options.benchmark == 'WaterNSquared':
root.workload = Water_nsquared()
elif options.benchmark == 'WaterSpatial':
root.workload = Water_spatial()
else:
print >> sys.stderr, """The --benchmark environment variable was set to something improper.
Use Cholesky, FFT, LUContig, LUNoncontig, Radix, Barnes, FMM, OceanContig,
OceanNoncontig, Raytrace, WaterNSquared, or WaterSpatial"""
sys.exit(1)
# --------------------
# Assign the workload to the cpus
# ====================
for cpu in cpus:
cpu.workload = root.workload
# ----------------------
# Run the simulation
# ----------------------
if options.timing or options.detailed:
root.system.mem_mode = 'timing'
# instantiate configuration
m5.instantiate()
# simulate until program terminates
if options.maxtick:
exit_event = m5.simulate(options.maxtick)
else:
exit_event = m5.simulate(m5.MaxTick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
|
{
"content_hash": "654e8be767099df34cd8b3d266b3353c",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 95,
"avg_line_length": 31.914728682170544,
"alnum_prop": 0.6148894826329852,
"repo_name": "lixt/lily2-gem5",
"id": "6880f8db54ff49656a23404adfc4a47ebd4aa47c",
"size": "9839",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "configs/splash2/run.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "230559"
},
{
"name": "C",
"bytes": "916884"
},
{
"name": "C++",
"bytes": "9708710"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Perl",
"bytes": "1598428"
},
{
"name": "Python",
"bytes": "3374662"
},
{
"name": "Ruby",
"bytes": "19410"
},
{
"name": "Shell",
"bytes": "2193"
},
{
"name": "TeX",
"bytes": "19361"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
}
|
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#N Period Edge Ratio Computation for single issue with graphical displays
#Imports
from YahooGrabber import YahooGrabber
import numpy as np
import time as t
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
from pandas.parser import CParserError
#Empty data structure assignment
tempdf = pd.DataFrame()
edgelist = []
MFEpoints = None
MFElist = []
MAEpoints = None
MAElist = []
nDay = None
#Variable assignment
#Ticker for testing
ticker = 'NUGT'
#For ATR + MFE/MFA calculation
atrwindow = 20
#For directional signal generation
donchianwindow = 20
#How many days to calculate e-ratio for
LengthOfTest = range(2, 50) #(2,3) = 2 day Eratio // assuming fill at "Entry Price"
#Request data
while True:
try:
#Get data
Asset = YahooGrabber(ticker)
except CParserError:
continue
break
#In sample Trimmer
Asset = Asset[-1000:]
#Make column that represents X axis
Asset['Index'] = Asset.index
#Format for mpl
Asset['IndexToNumber'] = Asset['Index'].apply(mdates.date2num)
#Format Dataframe to feed candlestick_ohlc()
AssetCopy = Asset[['IndexToNumber', 'Open', 'High', 'Low', 'Close', 'Adj Close']].copy()
#Timer begin
start = t.time()
#log returns
Asset['LogRet'] = np.log(Asset['Adj Close']/Asset['Adj Close'].shift(1))
Asset['LogRet'] = Asset['LogRet'].fillna(0)
#Donchian Channel calculation from params
Asset['RollingMax'] = Asset['High'].rolling(window=donchianwindow, center=False).max()
Asset['RollingMin'] = Asset['Low'].rolling(window=donchianwindow, center=False).min()
#Index copies
Asset['Index'] = Asset.index
Asset['RangeIndex'] = range(1, len(Asset.index) + 1)
#ATR calculation
Asset['Method1'] = Asset['High'] - Asset['Low']
Asset['Method2'] = abs((Asset['High'] - Asset['Close'].shift(1)))
Asset['Method3'] = abs((Asset['Low'] - Asset['Close'].shift(1)))
Asset['Method1'] = Asset['Method1'].fillna(0)
Asset['Method2'] = Asset['Method2'].fillna(0)
Asset['Method3'] = Asset['Method3'].fillna(0)
Asset['TrueRange'] = Asset[['Method1','Method2','Method3']].max(axis = 1)
#ATR in points not %
Asset['AverageTrueRangePoints'] = Asset['TrueRange'].rolling(window = atrwindow,
center=False).mean()
#ATR in percent
Asset['AverageTrueRangePercent'] = Asset['AverageTrueRangePoints'] / Asset['Close']
#Signal generation; long if new high and no new high on previous period, short
#if new low and no new low on previous period; if Donchian window or ATR is not calculated stay out of market
Asset['Regime'] = np.where(Asset['High'] > Asset['RollingMax'].shift(1), 1, 0)
Asset['Regime'] = np.where(Asset['Low'] < Asset['RollingMin'].shift(1), -1, Asset['Regime'])
#Stay flat if ATR has not been established // nan > 0 == false
Asset['Regime'] = np.where(Asset['AverageTrueRangePercent'] > 0, Asset['Regime'], 0)
#Find trade date when regime changes
Asset['OriginalTrade'] = 0
Asset['OriginalTrade'].loc[(Asset['Regime'].shift(1) != Asset['Regime']) & (Asset['Regime'] == -1)] = -1
Asset['OriginalTrade'].loc[(Asset['Regime'].shift(1) != Asset['Regime']) & (Asset['Regime'] == 1)] = 1
#Organize entry price & check for gap
Asset['EntryPrice'] = np.nan
#For all original trade days
for a in Asset['OriginalTrade'][Asset['OriginalTrade'] != 0].index:
#If a long trade
if Asset['OriginalTrade'][a] == 1:
#Record previous high as entry
Asset['EntryPrice'].loc[a] = Asset['RollingMax'].shift(1).loc[a]
#Check for open higher than entry gap and reassign
if Asset['Open'][a] > Asset['EntryPrice'][a]:
Asset['EntryPrice'].loc[a] = Asset['Open'][a]
#If a short trade
if Asset['OriginalTrade'][a] == -1:
#Record previous high as entry
Asset['EntryPrice'].loc[a] = Asset['RollingMin'].shift(1).loc[a]
#Check for open lower than entry gap and reassign
if Asset['Open'][a] < Asset['EntryPrice'][a]:
Asset['EntryPrice'].loc[a] = Asset['Open'][a]
Asset['EntryPrice'] = Asset['EntryPrice'].ffill()
#Make list of Original Trade DATES
tradedates = Asset[['OriginalTrade', 'Index', 'RangeIndex', 'Adj Close', 'AverageTrueRangePoints', 'EntryPrice']].loc[(
Asset['OriginalTrade'] != 0)]
#Number of signals generated
numsignals = len(tradedates)
#For number of e-ratio days to compute
for z in LengthOfTest:
print('Calculating ' + str(z) + ' Day e-ratio')
#For each value of RangeIndex on Tradedate
for i in tradedates.RangeIndex:
print('Calculating ' + str(z) + ' Day e-ratio for trade on ' + str(Asset['Index'].loc[Asset['RangeIndex'] == i][0]))
#Assign computation space
tempdf = pd.DataFrame()
#Assign entry price
entryprice = Asset['EntryPrice'].loc[Asset['RangeIndex'] == i][0]
#Take H, L, C, sample data for number of days under study post trade
tempdf['Close'] = Asset['Close'].loc[Asset.index[i:i+z]]
tempdf['High'] = Asset['High'].loc[Asset.index[i:i+z]]
tempdf['Low'] = Asset['Low'].loc[Asset.index[i:i+z]]
#For long trades
if tradedates['OriginalTrade'].loc[tradedates['RangeIndex'] == i][0] == 1:
if len(tempdf) < z-1 :
MFElist.append(np.nan)
MAElist.append(np.nan)
print('Not enough data for this calculation')
continue
print('Long entry at ', entryprice)
#MFE
maxup = max(tempdf['High'] - entryprice)
#MAE
maxdown = max(entryprice - tempdf['Low'])
print('MFE in points = ', maxup)
print('MAE in points = ', maxdown)
#MFE assignment
MFEpoints = maxup
MFElist.append(MFEpoints)
#MAE assignment
MAEpoints = maxdown
MAElist.append(MAEpoints)
#For short trades
if tradedates['OriginalTrade'].loc[tradedates['RangeIndex'] == i][0] == -1:
if len(tempdf) < z-1 :
MFElist.append(np.nan)
MAElist.append(np.nan)
print('Not enough data for this calculation')
continue
print('Short entry at ', entryprice)
#MAE
maxup = max(tempdf['High'] - entryprice)
#MFE
maxdown = max(entryprice - tempdf['Low'])
print('MFE in points = ', maxdown)
print('MAE in points = ', maxup)
#MFE assignment
MFEpoints = maxdown
MFElist.append(MFEpoints)
#MAE assignment
MAEpoints = maxup
MAElist.append(MAEpoints)
#Rotating column name
nDay = str(z)
#Display results
print(MFElist)
print(MAElist)
#To Series
MFESeries = pd.Series(MFElist, index = tradedates.index)
MAESeries = pd.Series(MAElist, index = tradedates.index)
#Display
print(MFESeries)
print(MAESeries)
#Assign MFE/MAE to tradedates
tradedates[nDay + 'DayMFE'] = MFESeries
tradedates[nDay + 'DayMAE'] = MAESeries
#Clear lists
MFElist = []
MAElist = []
#Adjust MFE and MAE for volatility - normalization
tradedates[nDay + 'DayVolAdjMFE'] = tradedates[nDay + 'DayMFE']/tradedates['AverageTrueRangePoints']
tradedates[nDay + 'DayVolAdjMAE'] = tradedates[nDay + 'DayMAE']/tradedates['AverageTrueRangePoints']
#Add MFE and MAE values
sumMFE = sum(tradedates[nDay + 'DayVolAdjMFE'].fillna(0))
sumMAE = sum(tradedates[nDay + 'DayVolAdjMAE'].fillna(0))
#Divide by number of signals
AvgVolAdjMFE = sumMFE/numsignals
AvgVolAdjMAE = sumMAE/numsignals
#Calculate edge ratio
edgeratio = AvgVolAdjMFE/AvgVolAdjMAE
#Display results
print('The ', z, ' day edge ratio is', edgeratio)
#Add results to list
edgelist.append(edgeratio)
#Get calculations ready for graphing
edgeratioframe = pd.DataFrame(index = range(2, len(edgelist) + 2))
edgeratioframe['EdgeRatio'] = edgelist
#Plot edge ratio
edgeratioframe['EdgeRatio'].plot(grid=True, figsize=(8,5))
#End timer
end = t.time()
#Timer stats
print((end - start), ' seconds later.')
#Display results
print('Max eRatio is', max(edgeratioframe['EdgeRatio']))
#Graphics
#X and Y axis scale figure
figure, axe = plt.subplots(figsize = (10,5))
#Assign axis labels
plt.ylabel(ticker + ' Price')
plt.xlabel('Date')
#Overlay
axe.plot(AssetCopy['IndexToNumber'], Asset['RollingMax'], color = 'green', label = 'RollingMax')
axe.plot(AssetCopy['IndexToNumber'], Asset['RollingMin'], color = 'red', label = 'RollingMin')
#axe.plot(Asset['IndexToNumber'], Asset['SMA'], color = 'black', label = 'SMA')
#Signal triangles..
axe.scatter(Asset.loc[Asset['OriginalTrade'] == 1, 'IndexToNumber'].values,
Asset.loc[Asset['OriginalTrade'] == 1, 'EntryPrice'].values, label='skitscat', color='green', s=75, marker="^")
axe.scatter(Asset.loc[Asset['OriginalTrade'] == -1, 'IndexToNumber'].values,
Asset.loc[Asset['OriginalTrade'] == -1, 'EntryPrice'].values, label='skitscat', color='red', s=75, marker="v")
#Plot the DF candlestick values with the figure, object
candlestick_ohlc(axe, AssetCopy.values, width=.6, colorup='green', colordown='red')
#Date formatting
axe.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
#For ATR
figure2, axe2 = plt.subplots(figsize = (10,2))
#Add labels
plt.ylabel(ticker + ' ATR Percent')
plt.xlabel('Date')
#ATR line graph
axe2.plot(AssetCopy['IndexToNumber'], Asset['AverageTrueRangePercent'], color = 'black', label = '4wkATRPercent')
#Date formatting
axe2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
{
"content_hash": "662a528fe3d0862d922782d261d4977c",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 124,
"avg_line_length": 37.481617647058826,
"alnum_prop": 0.620892594409024,
"repo_name": "adamrvfisher/TechnicalAnalysisLibrary",
"id": "86336447f4d581e6e06f7d7e81d25f0e6aade778",
"size": "10220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ERatioSingleIssueDonchianTrendIII.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15514"
}
],
"symlink_target": ""
}
|
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class HostAutoStartManager(BaseEntity):
'''The AutoStartManager allows clients to invoke and set up the auto-start/auto-
stop order of virtual machines on a single host. Virtual machines configured to
use auto-start are automatically started or stopped when the host is started or
shut down. The AutoStartManager is available when clients connect directly to a
host, such as an ESX Server machine or through VirtualCenter.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.HostAutoStartManager):
super(HostAutoStartManager, self).__init__(core, name=name, ref=ref, type=type)
@property
def config(self):
''''''
return self.update('config')
def AutoStartPowerOff(self):
'''Powers-off virtual machines according to the current AutoStart configuration
.Powers-off virtual machines according to the current AutoStart configuration.
'''
return self.delegate("AutoStartPowerOff")()
def AutoStartPowerOn(self):
'''Powers-on virtual machines according to the current AutoStart configuration
.Powers-on virtual machines according to the current AutoStart configuration.
'''
return self.delegate("AutoStartPowerOn")()
def ReconfigureAutostart(self, spec):
'''Changes the power-on or power-off sequence and system defaults. The
specification is an incremental change to the current configuration.Changes the
power-on or power-off sequence and system defaults. The specification is an
incremental change to the current configuration.Changes the power-on or power-
off sequence and system defaults. The specification is an incremental change to
the current configuration.Changes the power-on or power-off sequence and system
defaults. The specification is an incremental change to the current
configuration.
:param spec: List of changes to defaults and auto-start/auto-stop order.
'''
return self.delegate("ReconfigureAutostart")(spec)
|
{
"content_hash": "cd57bc11e880b9f6aeec36585febaeed",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 96,
"avg_line_length": 41.689655172413794,
"alnum_prop": 0.684449958643507,
"repo_name": "xuru/pyvisdk",
"id": "4d5cefdf07a703b1f4e7600d3e80756528d08abd",
"size": "2419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/mo/host_auto_start_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
"""Non-terminal symbols of Python grammar (from "graminit.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/symbol.py
#--start constants--
single_input = 256
file_input = 257
eval_input = 258
funcdef = 259
parameters = 260
varargslist = 261
fpdef = 262
fplist = 263
stmt = 264
simple_stmt = 265
small_stmt = 266
expr_stmt = 267
augassign = 268
print_stmt = 269
del_stmt = 270
pass_stmt = 271
flow_stmt = 272
break_stmt = 273
continue_stmt = 274
return_stmt = 275
raise_stmt = 276
import_stmt = 277
import_as_name = 278
dotted_as_name = 279
dotted_name = 280
global_stmt = 281
exec_stmt = 282
assert_stmt = 283
compound_stmt = 284
if_stmt = 285
while_stmt = 286
for_stmt = 287
try_stmt = 288
except_clause = 289
suite = 290
test = 291
and_test = 292
not_test = 293
comparison = 294
comp_op = 295
expr = 296
xor_expr = 297
and_expr = 298
shift_expr = 299
arith_expr = 300
term = 301
factor = 302
power = 303
atom = 304
listmaker = 305
lambdef = 306
trailer = 307
subscriptlist = 308
subscript = 309
sliceop = 310
exprlist = 311
testlist = 312
dictmaker = 313
classdef = 314
arglist = 315
argument = 316
list_iter = 317
list_for = 318
list_if = 319
#--end constants--
sym_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
sym_name[_value] = _name
def main():
import sys
import token
if len(sys.argv) == 1:
sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
token.main()
if __name__ == "__main__":
main()
|
{
"content_hash": "9c6cdafea59b7879518556f467977243",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 69,
"avg_line_length": 18.285714285714285,
"alnum_prop": 0.6748798076923077,
"repo_name": "Integral-Technology-Solutions/ConfigNOW-4.3",
"id": "daf86ab27e9f4323fe72483edc562295368e8c00",
"size": "1688",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Lib/symbol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "HTML",
"bytes": "2243"
},
{
"name": "Java",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2973691"
},
{
"name": "Shell",
"bytes": "5797"
}
],
"symlink_target": ""
}
|
from participantCollection import ParticipantCollection
import re
import pyperclip
# Edit me!
nextYearURL = ""
year = 2018
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean _YEAR_ year-long challenge is now over. Join us for **[the NEXT_YEAR challenge](NEXT_YEAR_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('NEXT_YEAR_URL', nextYearURL, answer)
answer = re.sub('_YEAR_', str(year), answer)
answer = re.sub('NEXT_YEAR', str(year + 1), answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
{
"content_hash": "60ec21e1ffdebf6e7f5b63575d08e1ed",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 145,
"avg_line_length": 33.450980392156865,
"alnum_prop": 0.6512309495896834,
"repo_name": "foobarbazblarg/stayclean",
"id": "ad4e4d05764f5e4a9fd3bfac928efbcff8dbe1dc",
"size": "1831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stayclean-2018/display-final-after-year-is-over.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from blog.models import Tag, BlogPost
admin.site.register(Tag)
admin.site.register(BlogPost)
|
{
"content_hash": "3f2b4e829ecaf79ccafb769a6198ce29",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 37,
"avg_line_length": 13.2,
"alnum_prop": 0.7878787878787878,
"repo_name": "azul-cloud/cityinfo",
"id": "d57d50c9cf0f19d50d60c4527536aa42698e0255",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "166"
},
{
"name": "Python",
"bytes": "66771"
}
],
"symlink_target": ""
}
|
"""Classes for solving optimization models for transmission expansion planning"""
from .OpfModels import *
from .OptModels import *
from .TepScenariosModels import *
#from .TepRobustnessAnalysis import *
#from .TepScenariosNSGA import *
__all__ = ['OpfModels', 'OptModels', 'TepScenariosModels']
|
{
"content_hash": "8cfef32b2129f18ae4b4291882a3898a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 81,
"avg_line_length": 37,
"alnum_prop": 0.7736486486486487,
"repo_name": "csvelasq/TepUnderScenarios",
"id": "01aab30c25bc86aea3a7418e3bf57805b8678bb1",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tepmodel/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135392"
}
],
"symlink_target": ""
}
|
"""
>>> from pybrain.tools.shortcuts import buildNetwork
>>> from test_recurrent_network import buildRecurrentNetwork
>>> from test_peephole_lstm import buildMinimalLSTMNetwork
>>> from test_peephole_mdlstm import buildMinimalMDLSTMNetwork
>>> from test_nested_network import buildNestedNetwork
>>> from test_simple_lstm_network import buildSimpleLSTMNetwork
>>> from test_simple_mdlstm import buildSimpleMDLSTMNetwork
>>> from test_swiping_network import buildSwipingNetwork
>>> from test_shared_connections import buildSharedCrossedNetwork
>>> from test_sliced_connections import buildSlicedNetwork
>>> from test_borderswipingnetwork import buildSimpleBorderSwipingNet
Test a number of network architectures, and compare if they produce the same output,
whether the Python implementation is used, or CTYPES.
Use the network construction scripts in other test files to build a number of networks,
and then test the equivalence of each.
Simple net
>>> testEquivalence(buildNetwork(2,2))
True
A lot of layers
>>> net = buildNetwork(2,3,4,3,2,3,4,3,2)
>>> testEquivalence(net)
True
Nonstandard components
>>> from pybrain.structure import TanhLayer
>>> net = buildNetwork(2,3,2, bias = True, outclass = TanhLayer)
>>> testEquivalence(net)
True
Shared connections
>>> net = buildSharedCrossedNetwork()
>>> testEquivalence(net)
True
Sliced connections
>>> net = buildSlicedNetwork()
>>> testEquivalence(net)
True
Nested networks (not supposed to work yet!)
>>> net = buildNestedNetwork()
>>> testEquivalence(net)
Network cannot be converted.
Recurrent networks
>>> net = buildRecurrentNetwork()
>>> net.name = '22'
>>> net.params[:] = [1,1,0.5]
>>> testEquivalence(net)
True
Swiping networks
>>> net = buildSwipingNetwork()
>>> testEquivalence(net)
True
Border-swiping networks
>>> net = buildSimpleBorderSwipingNet()
>>> testEquivalence(net)
True
Lstm
>>> net = buildSimpleLSTMNetwork()
>>> testEquivalence(net)
True
Mdlstm
>>> net = buildSimpleMDLSTMNetwork()
>>> testEquivalence(net)
True
Lstm with peepholes
>>> net = buildMinimalLSTMNetwork(True)
>>> testEquivalence(net)
True
Mdlstm with peepholes
>>> net = buildMinimalMDLSTMNetwork(True)
>>> testEquivalence(net)
True
TODO:
- heavily nested
- exotic module use
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
_dependencies = ['arac']
from pybrain.tests.helpers import buildAppropriateDataset, epsilonCheck
from pybrain.tests import runModuleTestSuite
def testEquivalence(net):
cnet = net.convertToFastNetwork()
if cnet == None:
return None
ds = buildAppropriateDataset(net)
if net.sequential:
for seq in ds:
net.reset()
cnet.reset()
for input, _ in seq:
res = net.activate(input)
cres = cnet.activate(input)
if net.name == '22':
h = net['hidden0']
ch = cnet['hidden0']
print 'ni', input, net.inputbuffer.T
print 'ci', input, cnet.inputbuffer.T
print 'hni',h.inputbuffer.T[0]
print 'hci', ch.inputbuffer.T[0]
print 'hnout',h.outputbuffer.T[0]
print 'hcout', ch.outputbuffer.T[0]
print
else:
for input, _ in ds:
res = net.activate(input)
cres = cnet.activate(input)
if epsilonCheck(sum(res-cres), 0.001):
return True
else:
print 'in-net', net.inputbuffer.T
print 'in-arac', cnet.inputbuffer.T
print 'out-net', net.outputbuffer.T
print 'out-arac', cnet.outputbuffer.T
return (res, cres)
if __name__ == "__main__":
runModuleTestSuite(__import__('__main__'))
|
{
"content_hash": "36f29a07ced95d4303befcf297465530",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 87,
"avg_line_length": 28.48936170212766,
"alnum_prop": 0.6221060492905153,
"repo_name": "daanwierstra/pybrain",
"id": "ae90db644f67b6587e11d19b789a337b96d1dccb",
"size": "4017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybrain/tests/unittests/_test_equivalence_to_ctypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "380415"
},
{
"name": "Python",
"bytes": "1279804"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
import xpcom
from xpcom import components, nsError, _xpcom, logger
class Factory:
_com_interfaces_ = components.interfaces.nsIFactory
# This will only ever be constructed via other Python code,
# so we can have ctor args.
def __init__(self, klass):
self.klass = klass
def createInstance(self, outer, iid):
if outer is not None:
raise xpcom.ServerException(nsError.NS_ERROR_NO_AGGREGATION)
logger.debug("Python Factory creating %s", self.klass.__name__)
try:
return self.klass()
except:
# An exception here may not be obvious to the user - none
# of their code has been called yet. It can be handy on
# failure to tell the user what class failed!
logger.error("Creation of class '%r' failed!\nException details follow\n",
self.klass)
# The framework itself will also report the error.
raise
def lockServer(self, lock):
logger.debug("Python Factory LockServer called '%s'", lock)
|
{
"content_hash": "bb47e70d182e1e65c296c00b408dbbb7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 38.5,
"alnum_prop": 0.6196660482374768,
"repo_name": "dgomez10/xanon",
"id": "b65483dd2db94907b55825d7a5fdf2c4d285f757",
"size": "2868",
"binary": false,
"copies": "31",
"ref": "refs/heads/master",
"path": "SDK/bindings/xpcom/python/xpcom/server/factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "5767"
},
{
"name": "Assembly",
"bytes": "602"
},
{
"name": "C",
"bytes": "1834867"
},
{
"name": "C++",
"bytes": "6956615"
},
{
"name": "CSS",
"bytes": "704096"
},
{
"name": "Go",
"bytes": "228"
},
{
"name": "HTML",
"bytes": "5861682"
},
{
"name": "Haskell",
"bytes": "8060"
},
{
"name": "IDL",
"bytes": "84486"
},
{
"name": "Java",
"bytes": "113453"
},
{
"name": "JavaScript",
"bytes": "328606"
},
{
"name": "Makefile",
"bytes": "9270"
},
{
"name": "PHP",
"bytes": "2262806"
},
{
"name": "Perl",
"bytes": "422783"
},
{
"name": "Python",
"bytes": "5266626"
},
{
"name": "R",
"bytes": "3737"
},
{
"name": "SQLPL",
"bytes": "1764"
},
{
"name": "Shell",
"bytes": "45175"
},
{
"name": "Visual Basic",
"bytes": "3078"
}
],
"symlink_target": ""
}
|
"""Utility functions used across the application."""
import os
def list_files(root_dir, ext):
"""Return the list of files in a directory tree with a given extension.
Args:
root_dir (str): the path to the root directory.
ext (str): the file extension
Returns:
list(str): a list of paths to the files in the directory tree with
the matching file extension.
"""
paths = []
for path, dirs, files in os.walk(root_dir):
for filename in files:
if filename.endswith(ext):
paths.append(os.path.join(path, filename))
return paths
|
{
"content_hash": "718fcde7a742a2c0fc02eb18838169f8",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 28.227272727272727,
"alnum_prop": 0.6231884057971014,
"repo_name": "StuartMacKay/checklists_scrapers",
"id": "7fe5272ec4b7ef353de872b7bab88f9ef2e1d0c4",
"size": "621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checklists_scrapers/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "173637"
},
{
"name": "Shell",
"bytes": "6728"
}
],
"symlink_target": ""
}
|
__author__ = 'Shamal Faily'
class GoalAssociation:
def __init__(self,associationId,envName,goalName,goalDimName,aType,subGoalName,subGoalDimName,alternativeId,rationale):
self.theId = associationId
self.theEnvironmentName = envName
self.theGoal = goalName
self.theGoalDimension = goalDimName
self.theAssociationType = aType
self.theSubGoal = subGoalName
self.theSubGoalDimension = subGoalDimName
self.theAlternativeId = alternativeId
self.theRationale = rationale
def id(self): return self.theId
def environment(self): return self.theEnvironmentName
def goal(self): return self.theGoal
def goalDimension(self): return self.theGoalDimension
def type(self): return self.theAssociationType
def subGoal(self): return self.theSubGoal
def subGoalDimension(self): return self.theSubGoalDimension
def alternative(self): return self.theAlternativeId
def rationale(self): return self.theRationale
def name(self): return self.theEnvironmentName + ' / ' + self.theGoal + ' / ' + self.theSubGoal
def __str__(self): return self.theEnvironmentName + ' / ' + self.theGoal + ' / ' + self.theGoalDimension + ' / ' + self.theAssociationType + ' / ' + self.theSubGoal + ' / ' + self.theSubGoalDimension + ' / ' + str(self.theAlternativeId)
|
{
"content_hash": "0b630799540ab48ea4ef23367668bf07",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 239,
"avg_line_length": 51.32,
"alnum_prop": 0.7427903351519876,
"repo_name": "failys/CAIRIS",
"id": "493a11978d3ff6ac76cb9849942d547f019a684c",
"size": "2081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cairis/core/GoalAssociation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Makefile",
"bytes": "1017"
},
{
"name": "Python",
"bytes": "2813021"
},
{
"name": "Shell",
"bytes": "6461"
},
{
"name": "XSLT",
"bytes": "35533"
}
],
"symlink_target": ""
}
|
"""run_bot.py run a Clusterfuzz bot locally."""
import os
import signal
from local.butler import appengine
from local.butler import common
from local.butler import constants
def _setup_bot_directory(args):
"""Set up the bot directory."""
appengine.symlink_config_dir()
src_root_dir = os.path.abspath('.')
if os.path.exists(args.directory):
print('Bot directory already exists. Re-using...')
else:
print('Creating new CF bot directory...')
os.makedirs(args.directory)
clusterfuzz_dir = os.path.join(args.directory, 'clusterfuzz')
bot_src_dir = os.path.join(clusterfuzz_dir, 'src')
if not os.path.exists(clusterfuzz_dir):
os.makedirs(clusterfuzz_dir)
os.mkdir(bot_src_dir)
common.update_dir(
os.path.join(src_root_dir, 'src', 'appengine'),
os.path.join(bot_src_dir, 'appengine'))
common.update_dir(
os.path.join(src_root_dir, 'src', 'python'),
os.path.join(bot_src_dir, 'python'))
common.update_dir(
os.path.join(src_root_dir, 'src', 'clusterfuzz'),
os.path.join(bot_src_dir, 'clusterfuzz'))
common.update_dir(
os.path.join(src_root_dir, 'src', 'third_party'),
os.path.join(bot_src_dir, 'third_party'))
common.update_dir(
os.path.join(src_root_dir, 'resources'),
os.path.join(clusterfuzz_dir, 'resources'))
common.update_dir(
os.path.join(src_root_dir, 'bot'), os.path.join(clusterfuzz_dir, 'bot'))
def _setup_environment_and_configs(args, appengine_path):
"""Set up environment variables and configuration files."""
clusterfuzz_dir = os.path.abspath(os.path.join(args.directory, 'clusterfuzz'))
# Matches startup scripts.
os.environ['PYTHONPATH'] = ':'.join([
os.getenv('PYTHONPATH', ''),
appengine_path,
os.path.join(clusterfuzz_dir, 'src'),
])
os.environ['ROOT_DIR'] = clusterfuzz_dir
if not os.getenv('BOT_NAME'):
os.environ['BOT_NAME'] = args.name
os.environ['LD_LIBRARY_PATH'] = '{0}:{1}'.format(
os.path.join(clusterfuzz_dir, 'src', 'clusterfuzz', '_internal',
'scripts'), os.getenv('LD_LIBRARY_PATH', ''))
tmpdir = os.path.join(clusterfuzz_dir, 'bot_tmpdir')
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
os.environ['TMPDIR'] = tmpdir
os.environ['BOT_TMPDIR'] = tmpdir
os.environ['KILL_STALE_INSTANCES'] = 'False'
os.environ['LOCAL_DEVELOPMENT'] = 'True'
os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST
os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST
os.environ['APPLICATION_ID'] = constants.TEST_APP_ID
if not os.getenv('UNTRUSTED_WORKER'):
local_gcs_buckets_path = os.path.abspath(
os.path.join(args.server_storage_path, 'local_gcs'))
assert os.path.exists(local_gcs_buckets_path), (
'Server storage path not found, make sure to start run_server with '
'the same storage path.')
os.environ['LOCAL_GCS_BUCKETS_PATH'] = local_gcs_buckets_path
if args.android_serial:
if not os.getenv('OS_OVERRIDE'):
os.environ['OS_OVERRIDE'] = 'ANDROID'
os.environ['ANDROID_SERIAL'] = args.android_serial
def execute(args):
"""Run the bot."""
appengine_path = appengine.find_sdk_path()
_setup_bot_directory(args)
_setup_environment_and_configs(args, appengine_path)
try:
os.chdir(os.path.join(args.directory, 'clusterfuzz'))
proc = common.execute_async('python src/python/bot/startup/run_bot.py')
def _stop_handler(*_):
print('Bot has been stopped. Exit.')
proc.kill()
signal.signal(signal.SIGTERM, _stop_handler)
common.process_proc_output(proc)
proc.wait()
except KeyboardInterrupt:
_stop_handler()
|
{
"content_hash": "052a2132447167b4e34d178eaabf59ef",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 80,
"avg_line_length": 32.46017699115044,
"alnum_prop": 0.6682115594329335,
"repo_name": "google/clusterfuzz",
"id": "19807b8df8c1d4eceaaec8ff886d9f5a74fbcee6",
"size": "4243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/local/butler/run_bot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21721"
},
{
"name": "C",
"bytes": "3485"
},
{
"name": "C++",
"bytes": "16326"
},
{
"name": "CSS",
"bytes": "16789"
},
{
"name": "Dockerfile",
"bytes": "25218"
},
{
"name": "Go",
"bytes": "16253"
},
{
"name": "HTML",
"bytes": "503044"
},
{
"name": "JavaScript",
"bytes": "9433"
},
{
"name": "Jinja",
"bytes": "3308"
},
{
"name": "PowerShell",
"bytes": "17307"
},
{
"name": "Python",
"bytes": "5085058"
},
{
"name": "Ruby",
"bytes": "93"
},
{
"name": "Shell",
"bytes": "80910"
},
{
"name": "Starlark",
"bytes": "1951"
}
],
"symlink_target": ""
}
|
"""Parser for Molpro output files"""
import itertools
import numpy
from . import logfileparser
from . import utils
def create_atomic_orbital_names(orbitals):
"""Generate all atomic orbital names that could be used by Molpro.
The names are returned in a dictionary, organized by subshell (S, P, D and so on).
"""
# We can write out the first two manually, since there are not that many.
atomic_orbital_names = {
'S': ['s', '1s'],
'P': ['x', 'y', 'z', '2px', '2py', '2pz'],
}
# Although we could write out all names for the other subshells, it is better
# to generate them if we need to expand further, since the number of functions quickly
# grows and there are both Cartesian and spherical variants to consider.
# For D orbitals, the Cartesian functions are xx, yy, zz, xy, xz and yz, and the
# spherical ones are called 3d0, 3d1-, 3d1+, 3d2- and 3d2+. For F orbitals, the Cartesians
# are xxx, xxy, xxz, xyy, ... and the sphericals are 4f0, 4f1-, 4f+ and so on.
for i, orb in enumerate(orbitals):
# Cartesian can be generated directly by combinations.
cartesian = list(map(''.join, list(itertools.combinations_with_replacement(['x', 'y', 'z'], i+2))))
# For spherical functions, we need to construct the names.
pre = str(i+3) + orb.lower()
spherical = [pre + '0'] + [pre + str(j) + s for j in range(1, i+3) for s in ['-', '+']]
atomic_orbital_names[orb] = cartesian + spherical
return atomic_orbital_names
class Molpro(logfileparser.Logfile):
"""Molpro file parser"""
atomic_orbital_names = create_atomic_orbital_names(['D', 'F', 'G'])
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(Molpro, self).__init__(logname="Molpro", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "Molpro log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Molpro("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by Molpro."""
ans = label.replace("`", "'").replace("``", "''")
return ans
def before_parsing(self):
self.electronorbitals = ""
self.insidescf = False
def after_parsing(self):
# If optimization thresholds are default, they are normally not printed and we need
# to set them to the default after parsing. Make sure to set them in the same order that
# they appear in the in the geometry optimization progress printed in the output,
# namely: energy difference, maximum gradient, maximum step.
if not hasattr(self, "geotargets"):
self.geotargets = []
# Default THRENERG (required accuracy of the optimized energy).
self.geotargets.append(1E-6)
# Default THRGRAD (required accuracy of the optimized gradient).
self.geotargets.append(3E-4)
# Default THRSTEP (convergence threshold for the geometry optimization step).
self.geotargets.append(3E-4)
def _parse_orbitals(self, inputfile, line):
# From this block aonames, atombasis, moenergies and mocoeffs can be parsed. The data is
# flipped compared to most programs (GAMESS, Gaussian), since the MOs are in rows. Also, Molpro
# does not cut the table into parts, rather each MO row has as many lines as it takes ro print
# all of the MO coefficients. Each row normally has 10 coefficients, although this can be less
# for the last row and when symmetry is used (each irrep has its own block).
#
# ELECTRON ORBITALS
# =================
#
#
# Orb Occ Energy Couls-En Coefficients
#
# 1 1s 1 1s 1 2px 1 2py 1 2pz 2 1s (...)
# 3 1s 3 1s 3 2px 3 2py 3 2pz 4 1s (...)
# (...)
#
# 1.1 2 -11.0351 -43.4915 0.701460 0.025696 -0.000365 -0.000006 0.000000 0.006922 (...)
# -0.006450 0.004742 -0.001028 -0.002955 0.000000 -0.701460 (...)
# (...)
#
# If an MCSCF calculation was performed, the natural orbitals
# (coefficients and occupation numbers) are printed in a
# format nearly identical to the ELECTRON ORBITALS section.
#
# NATURAL ORBITALS (state averaged)
# =================================
#
# Orb Occ Energy Coefficients
#
# 1 s 1 s 1 s 1 z 1 z 1 xx 1 yy 1 zz 2 s 2 s
# 2 s 2 z 2 z 2 xx 2 yy 2 zz 3 s 3 s 3 z 3 y
#
# 1.1 2.00000 -20.678730 0.000141 -0.000057 0.001631 -0.001377 0.001117 0.000029 0.000293 -0.000852 1.000748 0.001746
# -0.002552 -0.002005 0.001658 -0.001266 -0.001274 -0.001001 0.000215 -0.000131 -0.000242 -0.000126
#
# 2.1 2.00000 -11.322823 1.000682 0.004626 -0.000485 0.006634 -0.002096 -0.003072 -0.003282 -0.001724 -0.000181 0.006734
# -0.002398 -0.000527 0.001335 0.000091 0.000058 0.000396 -0.003219 0.000981 0.000250 -0.000191
# (...)
# The assigment of final cclib attributes is different for
# canonical/natural orbitals.
self.naturalorbitals = (line[1:17] == "NATURAL ORBITALS")
# Make sure we didn't get here by mistake.
assert line[1:18] == "ELECTRON ORBITALS" or self.electronorbitals or self.naturalorbitals
# For unrestricted calculations, ELECTRON ORBITALS is followed on the same line
# by FOR POSITIVE SPIN or FOR NEGATIVE SPIN as appropriate.
spin = (line[19:36] == "FOR NEGATIVE SPIN") or (self.electronorbitals[19:36] == "FOR NEGATIVE SPIN")
if self.naturalorbitals:
self.skip_lines(inputfile, ['equals', 'b', 'headers', 'b'])
else:
if not self.electronorbitals:
self.skip_line(inputfile, 'equals')
self.skip_lines(inputfile, ['b', 'b', 'headers', 'b'])
aonames = []
atombasis = [[] for i in range(self.natom)]
moenergies = []
# Use for both canonical and natural orbital coefficients.
mocoeffs = []
occnos = []
line = next(inputfile)
# Besides a double blank line, stop when the next orbitals are encountered for unrestricted jobs
# or if there are stars on the line which always signifies the end of the block.
while line.strip() and (not "ORBITALS" in line) and (not set(line.strip()) == {'*'}):
# The function names are normally printed just once, but if symmetry is used then each irrep
# has its own mocoeff block with a preceding list of names.
is_aonames = line[:25].strip() == ""
if is_aonames:
# We need to save this offset for parsing the coefficients later.
offset = len(aonames)
aonum = len(aonames)
while line.strip():
for s in line.split():
if s.isdigit():
atomno = int(s)
atombasis[atomno-1].append(aonum)
aonum += 1
else:
functype = s
element = self.table.element[self.atomnos[atomno-1]]
aoname = "%s%i_%s" % (element, atomno, functype)
aonames.append(aoname)
line = next(inputfile)
# Now there can be one or two blank lines.
while not line.strip():
line = next(inputfile)
# Newer versions of Molpro (for example, 2012 test files) will print some
# more things here, such as HOMO and LUMO, but these have less than 10 columns.
if "HOMO" in line or "LUMO" in line:
break
# End of the NATURAL ORBITALS section.
if "Natural orbital dump" in line:
break
# Now parse the MO coefficients, padding the list with an appropriate amount of zeros.
coeffs = [0.0 for i in range(offset)]
while line.strip() != "":
if line[:31].rstrip():
tokens = line.split()
moenergy = float(tokens[2])
moenergy = utils.convertor(moenergy, "hartree", "eV")
moenergies.append(moenergy)
if self.naturalorbitals:
occno = float(tokens[1])
occnos.append(occno)
# Coefficients are in 10.6f format and splitting does not work since there are not
# always spaces between them. If the numbers are very large, there will be stars.
str_coeffs = line[31:]
ncoeffs = len(str_coeffs) // 10
coeff = []
for ic in range(ncoeffs):
p = str_coeffs[ic*10:(ic+1)*10]
try:
c = float(p)
except ValueError as detail:
self.logger.warn("setting coeff element to zero: %s" % detail)
c = 0.0
coeff.append(c)
coeffs.extend(coeff)
line = next(inputfile)
mocoeffs.append(coeffs)
# The loop should keep going until there is a double blank line, and there is
# a single line between each coefficient block.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
# If symmetry was used (offset was needed) then we will need to pad all MO vectors
# up to nbasis for all irreps before the last one.
if offset > 0:
for im, m in enumerate(mocoeffs):
if len(m) < self.nbasis:
mocoeffs[im] = m + [0.0 for i in range(self.nbasis - len(m))]
self.set_attribute('atombasis', atombasis)
self.set_attribute('aonames', aonames)
if self.naturalorbitals:
# Consistent with current cclib conventions, keep only the
# last possible set of natural orbital coefficients and
# occupation numbers.
self.nocoeffs = mocoeffs
self.nooccnos = occnos
else:
# Consistent with current cclib conventions, reset moenergies/mocoeffs if they have been
# previously parsed, since we want to produce only the final values.
if not hasattr(self, "moenergies") or spin == 0:
self.mocoeffs = []
self.moenergies = []
self.moenergies.append(moenergies)
self.mocoeffs.append(mocoeffs)
# Check if last line begins the next ELECTRON ORBITALS section, because we already used
# this line and need to know when this method is called next time.
if line[1:18] == "ELECTRON ORBITALS":
self.electronorbitals = line
else:
self.electronorbitals = ""
return
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# extract the version number first
if "Version" in line:
self.metadata["package_version"] = line.split()[1]
if line[1:19] == "ATOMIC COORDINATES":
if not hasattr(self, "atomcoords"):
self.atomcoords = []
atomcoords = []
atomnos = []
self.skip_lines(inputfile, ['line', 'line', 'line'])
line = next(inputfile)
while line.strip():
temp = line.strip().split()
atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[3:6]]) # bohrs to angs
atomnos.append(int(round(float(temp[2]))))
line = next(inputfile)
self.atomcoords.append(atomcoords)
self.set_attribute('atomnos', atomnos)
self.set_attribute('natom', len(self.atomnos))
# Use BASIS DATA to parse input for gbasis, aonames and atombasis. If symmetry is used,
# the function number starts from 1 for each irrep (the irrep index comes after the dot).
#
# BASIS DATA
#
# Nr Sym Nuc Type Exponents Contraction coefficients
#
# 1.1 A 1 1s 71.616837 0.154329
# 13.045096 0.535328
# 3.530512 0.444635
# 2.1 A 1 1s 2.941249 -0.099967
# 0.683483 0.399513
# ...
#
if line[1:11] == "BASIS DATA":
# We can do a sanity check with the header.
self.skip_line(inputfile, 'blank')
header = next(inputfile)
assert header.split() == ["Nr", "Sym", "Nuc", "Type", "Exponents", "Contraction", "coefficients"]
self.skip_line(inputfile, 'blank')
aonames = []
atombasis = [[] for i in range(self.natom)]
gbasis = [[] for i in range(self.natom)]
while line.strip():
# We need to read the line at the start of the loop here, because the last function
# will be added when a blank line signalling the end of the block is encountered.
line = next(inputfile)
# The formatting here can exhibit subtle differences, including the number of spaces
# or indentation size. However, we will rely on explicit slices since not all components
# are always available. In fact, components not being there has some meaning (see below).
line_nr = line[1:6].strip()
line_sym = line[7:9].strip()
line_nuc = line[11:15].strip()
line_type = line[16:22].strip()
line_exp = line[25:38].strip()
line_coeffs = line[38:].strip()
# If a new function type is printed or the BASIS DATA block ends with a blank line,
# then add the previous function to gbasis, except for the first function since
# there was no preceeding one. When translating the Molpro function name to gbasis,
# note that Molpro prints all components, but we want it only once, with the proper
# shell type (S,P,D,F,G). Molpro names also differ between Cartesian/spherical representations.
if (line_type and aonames) or line.strip() == "":
# All the possible AO names are created with the class. The function should always
# find a match in that dictionary, so we can check for that here and will need to
# update the dict if something unexpected comes up.
funcbasis = None
for fb, names in self.atomic_orbital_names.items():
if functype in names:
funcbasis = fb
assert funcbasis
# There is a separate basis function for each column of contraction coefficients. Since all
# atomic orbitals for a subshell will have the same parameters, we can simply check if
# the function tuple is already in gbasis[i] before adding it.
for i in range(len(coefficients[0])):
func = (funcbasis, [])
for j in range(len(exponents)):
func[1].append((exponents[j], coefficients[j][i]))
if func not in gbasis[funcatom-1]:
gbasis[funcatom-1].append(func)
# If it is a new type, set up the variables for the next shell(s). An exception is symmetry functions,
# which we want to copy from the previous function and don't have a new number on the line. For them,
# we just want to update the nuclear index.
if line_type:
if line_nr:
exponents = []
coefficients = []
functype = line_type
funcatom = int(line_nuc)
# Add any exponents and coefficients to lists.
if line_exp and line_coeffs:
funcexp = float(line_exp)
funccoeffs = [float(s) for s in line_coeffs.split()]
exponents.append(funcexp)
coefficients.append(funccoeffs)
# If the function number is present then add to atombasis and aonames, which is different from
# adding to gbasis since it enumerates AOs rather than basis functions. The number counts functions
# in each irrep from 1 and we could add up the functions for each irrep to get the global count,
# but it is simpler to just see how many aonames we have already parsed. Any symmetry functions
# are also printed, but they don't get numbers so they are nor parsed.
if line_nr:
element = self.table.element[self.atomnos[funcatom-1]]
aoname = "%s%i_%s" % (element, funcatom, functype)
aonames.append(aoname)
funcnr = len(aonames)
atombasis[funcatom-1].append(funcnr-1)
self.set_attribute('aonames', aonames)
self.set_attribute('atombasis', atombasis)
self.set_attribute('gbasis', gbasis)
if line[1:23] == "NUMBER OF CONTRACTIONS":
nbasis = int(line.split()[3])
self.set_attribute('nbasis', nbasis)
# Basis set name
if line[1:8] == "Library":
self.metadata["basis_set"] = line.split()[4]
# This is used to signalize whether we are inside an SCF calculation.
if line[1:8] == "PROGRAM" and line[14:18] == "-SCF":
self.insidescf = True
self.metadata["methods"].append("HF")
# Use this information instead of 'SETTING ...', in case the defaults are standard.
# Note that this is sometimes printed in each geometry optimization step.
if line[1:20] == "NUMBER OF ELECTRONS":
spinup = int(line.split()[3][:-1])
spindown = int(line.split()[4][:-1])
# Nuclear charges (atomnos) should be parsed by now.
nuclear = numpy.sum(self.atomnos)
charge = nuclear - spinup - spindown
self.set_attribute('charge', charge)
mult = spinup - spindown + 1
self.set_attribute('mult', mult)
# Convergenve thresholds for SCF cycle, should be contained in a line such as:
# CONVERGENCE THRESHOLDS: 1.00E-05 (Density) 1.40E-07 (Energy)
if self.insidescf and line[1:24] == "CONVERGENCE THRESHOLDS:":
if not hasattr(self, "scftargets"):
self.scftargets = []
scftargets = list(map(float, line.split()[2::2]))
self.scftargets.append(scftargets)
# Usually two criteria, but save the names this just in case.
self.scftargetnames = line.split()[3::2]
# Read in the print out of the SCF cycle - for scfvalues. For RHF looks like:
# ITERATION DDIFF GRAD ENERGY 2-EL.EN. DIPOLE MOMENTS DIIS
# 1 0.000D+00 0.000D+00 -379.71523700 1159.621171 0.000000 0.000000 0.000000 0
# 2 0.000D+00 0.898D-02 -379.74469736 1162.389787 0.000000 0.000000 0.000000 1
# 3 0.817D-02 0.144D-02 -379.74635529 1162.041033 0.000000 0.000000 0.000000 2
# 4 0.213D-02 0.571D-03 -379.74658063 1162.159929 0.000000 0.000000 0.000000 3
# 5 0.799D-03 0.166D-03 -379.74660889 1162.144256 0.000000 0.000000 0.000000 4
if self.insidescf and line[1:10] == "ITERATION":
if not hasattr(self, "scfvalues"):
self.scfvalues = []
line = next(inputfile)
energy = 0.0
scfvalues = []
while line.strip() != "":
chomp = line.split()
if chomp[0].isdigit():
ddiff = float(chomp[1].replace('D', 'E'))
grad = float(chomp[2].replace('D', 'E'))
newenergy = float(chomp[3])
ediff = newenergy - energy
energy = newenergy
# The convergence thresholds must have been read above.
# Presently, we recognize MAX DENSITY and MAX ENERGY thresholds.
numtargets = len(self.scftargetnames)
values = [numpy.nan]*numtargets
for n, name in zip(list(range(numtargets)), self.scftargetnames):
if "ENERGY" in name.upper():
values[n] = ediff
elif "DENSITY" in name.upper():
values[n] = ddiff
scfvalues.append(values)
try:
line = next(inputfile)
except StopIteration:
self.logger.warning('File terminated before end of last SCF! Last gradient: {}'.format(grad))
break
self.scfvalues.append(numpy.array(scfvalues))
# SCF result - RHF/UHF and DFT (RKS) energies.
if (line[1:5] in ["!RHF", "!UHF", "!RKS"] and line[16:22].lower() == "energy"):
if not hasattr(self, "scfenergies"):
self.scfenergies = []
scfenergy = float(line.split()[4])
self.scfenergies.append(utils.convertor(scfenergy, "hartree", "eV"))
# We are now done with SCF cycle (after a few lines).
self.insidescf = False
# MP2 energies.
if line[1:5] == "!MP2":
self.metadata["methods"].append("MP2")
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
mp2energy = float(line.split()[-1])
mp2energy = utils.convertor(mp2energy, "hartree", "eV")
self.mpenergies.append([mp2energy])
# MP2 energies if MP3 or MP4 is also calculated.
if line[1:5] == "MP2:":
self.metadata["methods"].append("MP2")
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
mp2energy = float(line.split()[2])
mp2energy = utils.convertor(mp2energy, "hartree", "eV")
self.mpenergies.append([mp2energy])
# MP3 (D) and MP4 (DQ or SDQ) energies.
if line[1:8] == "MP3(D):":
self.metadata["methods"].append("MP3")
mp3energy = float(line.split()[2])
mp2energy = utils.convertor(mp3energy, "hartree", "eV")
line = next(inputfile)
self.mpenergies[-1].append(mp2energy)
if line[1:9] == "MP4(DQ):":
self.metadata["methods"].append("MP4")
mp4energy = float(line.split()[2])
line = next(inputfile)
if line[1:10] == "MP4(SDQ):":
self.metadata["methods"].append("MP4")
mp4energy = float(line.split()[2])
mp4energy = utils.convertor(mp4energy, "hartree", "eV")
self.mpenergies[-1].append(mp4energy)
# The CCSD program operates all closed-shel coupled cluster runs.
if line[1:15] == "PROGRAM * CCSD":
self.metadata["methods"].append("CCSD")
if not hasattr(self, "ccenergies"):
self.ccenergies = []
while line[1:20] != "Program statistics:":
# The last energy (most exact) will be read last and thus saved.
if line[1:5] == "!CCD" or line[1:6] == "!CCSD" or line[1:9] == "!CCSD(T)":
ccenergy = float(line.split()[-1])
ccenergy = utils.convertor(ccenergy, "hartree", "eV")
line = next(inputfile)
self.ccenergies.append(ccenergy)
# Read the occupancy (index of HOMO s).
# For restricted calculations, there is one line here. For unrestricted, two:
# Final alpha occupancy: ...
# Final beta occupancy: ...
if line[1:17] == "Final occupancy:":
self.homos = [int(line.split()[-1])-1]
if line[1:23] == "Final alpha occupancy:":
self.homos = [int(line.split()[-1])-1]
line = next(inputfile)
self.homos.append(int(line.split()[-1])-1)
# Dipole is always printed on one line after the final RHF energy, and by default
# it seems Molpro uses the origin as the reference point.
if line.strip()[:13] == "Dipole moment":
assert line.split()[2] == "/Debye"
reference = [0.0, 0.0, 0.0]
dipole = [float(d) for d in line.split()[-3:]]
if not hasattr(self, 'moments'):
self.moments = [reference, dipole]
else:
self.moments[1] == dipole
# Static dipole polarizability.
if line.strip() == "SCF dipole polarizabilities":
if not hasattr(self, "polarizabilities"):
self.polarizabilities = []
polarizability = []
self.skip_lines(inputfile, ['b', 'directions'])
for _ in range(3):
line = next(inputfile)
polarizability.append(line.split()[1:])
self.polarizabilities.append(numpy.array(polarizability))
# Check for ELECTRON ORBITALS (canonical molecular orbitals).
if line[1:18] == "ELECTRON ORBITALS" or self.electronorbitals:
self._parse_orbitals(inputfile, line)
# If the MATROP program was called appropriately,
# the atomic obital overlap matrix S is printed.
# The matrix is printed straight-out, ten elements in each row, both halves.
# Note that is the entire matrix is not printed, then aooverlaps
# will not have dimensions nbasis x nbasis.
if line[1:9] == "MATRIX S":
if not hasattr(self, "aooverlaps"):
self.aooverlaps = [[]]
self.skip_lines(inputfile, ['b', 'symblocklabel'])
line = next(inputfile)
while line.strip() != "":
elements = [float(s) for s in line.split()]
if len(self.aooverlaps[-1]) + len(elements) <= self.nbasis:
self.aooverlaps[-1] += elements
else:
n = len(self.aooverlaps[-1]) + len(elements) - self.nbasis
self.aooverlaps[-1] += elements[:-n]
self.aooverlaps.append([])
self.aooverlaps[-1] += elements[-n:]
line = next(inputfile)
# Check for MCSCF natural orbitals.
if line[1:17] == "NATURAL ORBITALS":
self._parse_orbitals(inputfile, line)
# Thresholds are printed only if the defaults are changed with GTHRESH.
# In that case, we can fill geotargets with non-default values.
# The block should look like this as of Molpro 2006.1:
# THRESHOLDS:
# ZERO = 1.00D-12 ONEINT = 1.00D-12 TWOINT = 1.00D-11 PREFAC = 1.00D-14 LOCALI = 1.00D-09 EORDER = 1.00D-04
# ENERGY = 0.00D+00 ETEST = 0.00D+00 EDENS = 0.00D+00 THRDEDEF= 1.00D-06 GRADIENT= 1.00D-02 STEP = 1.00D-03
# ORBITAL = 1.00D-05 CIVEC = 1.00D-05 COEFF = 1.00D-04 PRINTCI = 5.00D-02 PUNCHCI = 9.90D+01 OPTGRAD = 3.00D-04
# OPTENERG= 1.00D-06 OPTSTEP = 3.00D-04 THRGRAD = 2.00D-04 COMPRESS= 1.00D-11 VARMIN = 1.00D-07 VARMAX = 1.00D-03
# THRDOUB = 0.00D+00 THRDIV = 1.00D-05 THRRED = 1.00D-07 THRPSP = 1.00D+00 THRDC = 1.00D-10 THRCS = 1.00D-10
# THRNRM = 1.00D-08 THREQ = 0.00D+00 THRDE = 1.00D+00 THRREF = 1.00D-05 SPARFAC = 1.00D+00 THRDLP = 1.00D-07
# THRDIA = 1.00D-10 THRDLS = 1.00D-07 THRGPS = 0.00D+00 THRKEX = 0.00D+00 THRDIS = 2.00D-01 THRVAR = 1.00D-10
# THRLOC = 1.00D-06 THRGAP = 1.00D-06 THRLOCT = -1.00D+00 THRGAPT = -1.00D+00 THRORB = 1.00D-06 THRMLTP = 0.00D+00
# THRCPQCI= 1.00D-10 KEXTA = 0.00D+00 THRCOARS= 0.00D+00 SYMTOL = 1.00D-06 GRADTOL = 1.00D-06 THROVL = 1.00D-08
# THRORTH = 1.00D-08 GRID = 1.00D-06 GRIDMAX = 1.00D-03 DTMAX = 0.00D+00
if line[1:12] == "THRESHOLDS":
self.skip_line(input, 'blank')
line = next(inputfile)
while line.strip():
if "OPTENERG" in line:
start = line.find("OPTENERG")
optenerg = line[start+10:start+20]
if "OPTGRAD" in line:
start = line.find("OPTGRAD")
optgrad = line[start+10:start+20]
if "OPTSTEP" in line:
start = line.find("OPTSTEP")
optstep = line[start+10:start+20]
line = next(inputfile)
self.geotargets = [optenerg, optgrad, optstep]
# The optimization history is the source for geovlues:
#
# END OF GEOMETRY OPTIMIZATION. TOTAL CPU: 246.9 SEC
#
# ITER. ENERGY(OLD) ENERGY(NEW) DE GRADMAX GRADNORM GRADRMS STEPMAX STEPLEN STEPRMS
# 1 -382.02936898 -382.04914450 -0.01977552 0.11354875 0.20127947 0.01183997 0.12972761 0.20171740 0.01186573
# 2 -382.04914450 -382.05059234 -0.00144784 0.03299860 0.03963339 0.00233138 0.05577169 0.06687650 0.00393391
# 3 -382.05059234 -382.05069136 -0.00009902 0.00694359 0.01069889 0.00062935 0.01654549 0.02016307 0.00118606
# ...
#
# The above is an exerpt from Molpro 2006, but it is a little bit different
# for Molpro 2012, namely the 'END OF GEOMETRY OPTIMIZATION occurs after the
# actual history list. It seems there is a another consistent line before the
# history, but this might not be always true -- so this is a potential weak link.
if line[1:30] == "END OF GEOMETRY OPTIMIZATION." or line.strip() == "Quadratic Steepest Descent - Minimum Search":
# I think this is the trigger for convergence, and it shows up at the top in Molpro 2006.
geometry_converged = line[1:30] == "END OF GEOMETRY OPTIMIZATION."
self.skip_line(inputfile, 'blank')
# Newer version of Molpro (at least for 2012) print and additional column
# with the timing information for each step. Otherwise, the history looks the same.
headers = next(inputfile).split()
if not len(headers) in (10, 11):
return
# Although criteria can be changed, the printed format should not change.
# In case it does, retrieve the columns for each parameter.
index_ITER = headers.index('ITER.')
index_THRENERG = headers.index('DE')
index_THRGRAD = headers.index('GRADMAX')
index_THRSTEP = headers.index('STEPMAX')
line = next(inputfile)
self.geovalues = []
while line.strip():
line = line.split()
istep = int(line[index_ITER])
geovalues = []
geovalues.append(float(line[index_THRENERG]))
geovalues.append(float(line[index_THRGRAD]))
geovalues.append(float(line[index_THRSTEP]))
self.geovalues.append(geovalues)
line = next(inputfile)
if line.strip() == "Freezing grid":
line = next(inputfile)
# The convergence trigger shows up somewhere at the bottom in Molpro 2012,
# before the final stars. If convergence is not reached, there is an additional
# line that can be checked for. This is a little tricky, though, since it is
# not the last line... so bail out of the loop if convergence failure is detected.
while "*****" not in line:
line = next(inputfile)
if line.strip() == "END OF GEOMETRY OPTIMIZATION.":
geometry_converged = True
if "No convergence" in line:
geometry_converged = False
break
# Finally, deal with optdone, append the last step to it only if we had convergence.
if not hasattr(self, 'optdone'):
self.optdone = []
if geometry_converged:
self.optdone.append(istep-1)
# This block should look like this:
# Normal Modes
#
# 1 Au 2 Bu 3 Ag 4 Bg 5 Ag
# Wavenumbers [cm-1] 151.81 190.88 271.17 299.59 407.86
# Intensities [km/mol] 0.33 0.28 0.00 0.00 0.00
# Intensities [relative] 0.34 0.28 0.00 0.00 0.00
# CX1 0.00000 -0.01009 0.02577 0.00000 0.06008
# CY1 0.00000 -0.05723 -0.06696 0.00000 0.06349
# CZ1 -0.02021 0.00000 0.00000 0.11848 0.00000
# CX2 0.00000 -0.01344 0.05582 0.00000 -0.02513
# CY2 0.00000 -0.06288 -0.03618 0.00000 0.00349
# CZ2 -0.05565 0.00000 0.00000 0.07815 0.00000
# ...
# Molpro prints low frequency modes in a subsequent section with the same format,
# which also contains zero frequency modes, with the title:
# Normal Modes of low/zero frequencies
if line[1:13] == "Normal Modes":
islow = (line[1:37] == "Normal Modes of low/zero frequencies")
self.skip_line(inputfile, 'blank')
# Each portion of five modes is followed by a single blank line.
# The whole block is followed by an additional blank line.
line = next(inputfile)
while line.strip():
if line[1:25].isspace():
if not islow: # vibsyms not printed for low freq modes
numbers = list(map(int, line.split()[::2]))
vibsyms = line.split()[1::2]
else:
# give low freq modes an empty str as vibsym
# note there could be other possibilities..
numbers = list(map(int, line.split()))
vibsyms = ['']*len(numbers)
if line[1:12] == "Wavenumbers":
vibfreqs = list(map(float, line.strip().split()[2:]))
if line[1:21] == "Intensities [km/mol]":
vibirs = list(map(float, line.strip().split()[2:]))
# There should always by 3xnatom displacement rows.
if line[1:11].isspace() and line[13:25].strip().isdigit():
# There are a maximum of 5 modes per line.
nmodes = len(line.split())-1
vibdisps = []
for i in range(nmodes):
vibdisps.append([])
for n in range(self.natom):
vibdisps[i].append([])
for i in range(nmodes):
disp = float(line.split()[i+1])
vibdisps[i][0].append(disp)
for i in range(self.natom*3 - 1):
line = next(inputfile)
iatom = (i+1)//3
for i in range(nmodes):
disp = float(line.split()[i+1])
vibdisps[i][iatom].append(disp)
line = next(inputfile)
if not line.strip():
if not hasattr(self, "vibfreqs"):
self.vibfreqs = []
if not hasattr(self, "vibsyms"):
self.vibsyms = []
if not hasattr(self, "vibirs") and "vibirs" in dir():
self.vibirs = []
if not hasattr(self, "vibdisps") and "vibdisps" in dir():
self.vibdisps = []
if not islow:
self.vibfreqs.extend(vibfreqs)
self.vibsyms.extend(vibsyms)
if "vibirs" in dir():
self.vibirs.extend(vibirs)
if "vibdisps" in dir():
self.vibdisps.extend(vibdisps)
else:
nonzero = [f > 0 for f in vibfreqs]
vibfreqs = [f for f in vibfreqs if f > 0]
self.vibfreqs = vibfreqs + self.vibfreqs
vibsyms = [vibsyms[i] for i in range(len(vibsyms)) if nonzero[i]]
self.vibsyms = vibsyms + self.vibsyms
if "vibirs" in dir():
vibirs = [vibirs[i] for i in range(len(vibirs)) if nonzero[i]]
self.vibirs = vibirs + self.vibirs
if "vibdisps" in dir():
vibdisps = [vibdisps[i] for i in range(len(vibdisps)) if nonzero[i]]
self.vibdisps = vibdisps + self.vibdisps
line = next(inputfile)
if line[1:16] == "Force Constants":
self.logger.info("Creating attribute hessian")
self.hessian = []
line = next(inputfile)
hess = []
tmp = []
while line.strip():
try:
list(map(float, line.strip().split()[2:]))
except:
line = next(inputfile)
line.strip().split()[1:]
hess.extend([list(map(float, line.strip().split()[1:]))])
line = next(inputfile)
lig = 0
while (lig == 0) or (len(hess[0]) > 1):
tmp.append(hess.pop(0))
lig += 1
k = 5
while len(hess) != 0:
tmp[k] += hess.pop(0)
k += 1
if (len(tmp[k-1]) == lig):
break
if k >= lig:
k = len(tmp[-1])
for l in tmp:
self.hessian += l
if line[1:14] == "Atomic Masses" and hasattr(self, "hessian"):
line = next(inputfile)
self.amass = list(map(float, line.strip().split()[2:]))
while line.strip():
line = next(inputfile)
self.amass += list(map(float, line.strip().split()[2:]))
#1PROGRAM * POP (Mulliken population analysis)
#
#
# Density matrix read from record 2100.2 Type=RHF/CHARGE (state 1.1)
#
# Population analysis by basis function type
#
# Unique atom s p d f g Total Charge
# 2 C 3.11797 2.88497 0.00000 0.00000 0.00000 6.00294 - 0.00294
# 3 C 3.14091 2.91892 0.00000 0.00000 0.00000 6.05984 - 0.05984
# ...
if line.strip() == "1PROGRAM * POP (Mulliken population analysis)":
self.skip_lines(inputfile, ['b', 'b', 'density_source', 'b', 'func_type', 'b'])
header = next(inputfile)
icharge = header.split().index('Charge')
charges = []
line = next(inputfile)
while line.strip():
cols = line.split()
charges.append(float(cols[icharge]+cols[icharge+1]))
line = next(inputfile)
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
self.atomcharges['mulliken'] = charges
if __name__ == "__main__":
import doctest, molproparser
doctest.testmod(molproparser, verbose=False)
|
{
"content_hash": "45d02dac1ed149b239a5e34c3f3e6570",
"timestamp": "",
"source": "github",
"line_count": 881,
"max_line_length": 140,
"avg_line_length": 46.82406356413167,
"alnum_prop": 0.5191263453893145,
"repo_name": "Schamnad/cclib",
"id": "f69236ac92352068f5d2d54b5ffa06afee626e79",
"size": "41450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cclib/parser/molproparser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arc",
"bytes": "18395"
},
{
"name": "DIGITAL Command Language",
"bytes": "21581"
},
{
"name": "Python",
"bytes": "836753"
},
{
"name": "Shell",
"bytes": "867"
},
{
"name": "TeX",
"bytes": "29388"
}
],
"symlink_target": ""
}
|
from django.utils.encoding import smart_unicode
from django.utils.xmlutils import SimplerXMLGenerator
from rest_framework.compat import StringIO
import re
import xml.etree.ElementTree as ET
# From xml2dict
class XML2Dict(object):
def __init__(self):
pass
def _parse_node(self, node):
node_tree = {}
# Save attrs and text, hope there will not be a child with same name
if node.text:
node_tree = node.text
for (k, v) in node.attrib.items():
k, v = self._namespace_split(k, v)
node_tree[k] = v
#Save childrens
for child in node.getchildren():
tag, tree = self._namespace_split(child.tag, self._parse_node(child))
if tag not in node_tree: # the first time, so store it in dict
node_tree[tag] = tree
continue
old = node_tree[tag]
if not isinstance(old, list):
node_tree.pop(tag)
node_tree[tag] = [old] # multi times, so change old dict to a list
node_tree[tag].append(tree) # add the new one
return node_tree
def _namespace_split(self, tag, value):
"""
Split the tag '{http://cs.sfsu.edu/csc867/myscheduler}patients'
ns = http://cs.sfsu.edu/csc867/myscheduler
name = patients
"""
result = re.compile("\{(.*)\}(.*)").search(tag)
if result:
value.namespace, tag = result.groups()
return (tag, value)
def parse(self, file):
"""parse a xml file to a dict"""
f = open(file, 'r')
return self.fromstring(f.read())
def fromstring(self, s):
"""parse a string"""
t = ET.fromstring(s)
unused_root_tag, root_tree = self._namespace_split(t.tag, self._parse_node(t))
return root_tree
def xml2dict(input):
return XML2Dict().fromstring(input)
# Piston:
class XMLRenderer():
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement("list-item", {})
self._to_xml(xml, item)
xml.endElement("list-item")
elif isinstance(data, dict):
for key, value in data.iteritems():
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
elif data is None:
# Don't output any value
pass
else:
xml.characters(smart_unicode(data))
def dict2xml(self, data):
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, "utf-8")
xml.startDocument()
xml.startElement("root", {})
self._to_xml(xml, data)
xml.endElement("root")
xml.endDocument()
return stream.getvalue()
def dict2xml(input):
return XMLRenderer().dict2xml(input)
|
{
"content_hash": "a77cad024cb13d11ebf1418d22ff3ef7",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 86,
"avg_line_length": 29.29,
"alnum_prop": 0.5547968589962444,
"repo_name": "Chilledheart/seahub",
"id": "84fcb5dbbe086c9f133fa05b4e4d5754687c2ad8",
"size": "2929",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "thirdpart/rest_framework/utils/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "319935"
},
{
"name": "HTML",
"bytes": "816154"
},
{
"name": "Java",
"bytes": "2137623"
},
{
"name": "JavaScript",
"bytes": "2884153"
},
{
"name": "Makefile",
"bytes": "1004"
},
{
"name": "PLSQL",
"bytes": "17176"
},
{
"name": "Python",
"bytes": "1625951"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
"""Astroid hooks for the ssl library."""
from astroid import MANAGER, parse, register_module_extender
def ssl_transform():
return parse(
"""
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext, MemoryBIO
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_add, RAND_bytes, RAND_pseudo_bytes
try:
from _ssl import RAND_egd
except ImportError:
# LibreSSL does not provide RAND_egd
pass
from _ssl import (OP_ALL, OP_CIPHER_SERVER_PREFERENCE,
OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3,
OP_NO_TLSv1, OP_NO_TLSv1_1, OP_NO_TLSv1_2,
OP_SINGLE_DH_USE, OP_SINGLE_ECDH_USE)
from _ssl import (ALERT_DESCRIPTION_ACCESS_DENIED, ALERT_DESCRIPTION_BAD_CERTIFICATE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE,
ALERT_DESCRIPTION_BAD_RECORD_MAC,
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED,
ALERT_DESCRIPTION_CERTIFICATE_REVOKED,
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN,
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE,
ALERT_DESCRIPTION_CLOSE_NOTIFY, ALERT_DESCRIPTION_DECODE_ERROR,
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE,
ALERT_DESCRIPTION_DECRYPT_ERROR,
ALERT_DESCRIPTION_HANDSHAKE_FAILURE,
ALERT_DESCRIPTION_ILLEGAL_PARAMETER,
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY,
ALERT_DESCRIPTION_INTERNAL_ERROR,
ALERT_DESCRIPTION_NO_RENEGOTIATION,
ALERT_DESCRIPTION_PROTOCOL_VERSION,
ALERT_DESCRIPTION_RECORD_OVERFLOW,
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE,
ALERT_DESCRIPTION_UNKNOWN_CA,
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY,
ALERT_DESCRIPTION_UNRECOGNIZED_NAME,
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE,
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION,
ALERT_DESCRIPTION_USER_CANCELLED)
from _ssl import (SSL_ERROR_EOF, SSL_ERROR_INVALID_ERROR_CODE, SSL_ERROR_SSL,
SSL_ERROR_SYSCALL, SSL_ERROR_WANT_CONNECT, SSL_ERROR_WANT_READ,
SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_X509_LOOKUP, SSL_ERROR_ZERO_RETURN)
from _ssl import VERIFY_CRL_CHECK_CHAIN, VERIFY_CRL_CHECK_LEAF, VERIFY_DEFAULT, VERIFY_X509_STRICT
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN
from _ssl import _OPENSSL_API_VERSION
from _ssl import PROTOCOL_SSLv23, PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
from _ssl import PROTOCOL_TLS, PROTOCOL_TLS_CLIENT, PROTOCOL_TLS_SERVER
"""
)
register_module_extender(MANAGER, "ssl", ssl_transform)
|
{
"content_hash": "76b426663e4789902fe019a5257a5a31",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 102,
"avg_line_length": 50.353846153846156,
"alnum_prop": 0.617781851512374,
"repo_name": "ruchee/vimrc",
"id": "ddccca94dfc31e97d4fe5bcf60e97c38b46a0a5a",
"size": "3764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/astroid/astroid/brain/brain_ssl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
}
|
"""Utility functions for handling times, dates, etc."""
import datetime
import logging
import re
import warnings
import cftime
import numpy as np
import pandas as pd
import xarray as xr
from ..internal_names import (
BOUNDS_STR, RAW_END_DATE_STR, RAW_START_DATE_STR,
SUBSET_END_DATE_STR, SUBSET_START_DATE_STR, TIME_BOUNDS_STR, TIME_STR,
TIME_WEIGHTS_STR
)
def apply_time_offset(time, years=0, months=0, days=0, hours=0):
"""Apply a specified offset to the given time array.
This is useful for GFDL model output of instantaneous values. For example,
3 hourly data postprocessed to netCDF files spanning 1 year each will
actually have time values that are offset by 3 hours, such that the first
value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the
subsequent year. This causes problems in xarray, e.g. when trying to group
by month. It is resolved by manually subtracting off those three hours,
such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired.
Parameters
----------
time : xarray.DataArray representing a timeseries
years, months, days, hours : int, optional
The number of years, months, days, and hours, respectively, to offset
the time array by. Positive values move the times later.
Returns
-------
pandas.DatetimeIndex
Examples
--------
Case of a length-1 input time array:
>>> times = xr.DataArray(datetime.datetime(1899, 12, 31, 21))
>>> apply_time_offset(times)
Timestamp('1900-01-01 00:00:00')
Case of input time array with length greater than one:
>>> times = xr.DataArray([datetime.datetime(1899, 12, 31, 21),
... datetime.datetime(1899, 1, 31, 21)])
>>> apply_time_offset(times) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['1900-01-01', '1899-02-01'], dtype='datetime64[ns]',
freq=None)
"""
return (pd.to_datetime(time.values) +
pd.tseries.offsets.DateOffset(years=years, months=months,
days=days, hours=hours))
def average_time_bounds(ds):
"""Return the average of each set of time bounds in the Dataset.
Useful for creating a new time array to replace the Dataset's native time
array, in the case that the latter matches either the start or end bounds.
This can cause errors in grouping (akin to an off-by-one error) if the
timesteps span e.g. one full month each. Note that the Dataset's times
must not have already undergone "CF decoding", wherein they are converted
from floats using the 'units' attribute into datetime objects.
Parameters
----------
ds : xarray.Dataset
A Dataset containing a time bounds array with name matching
internal_names.TIME_BOUNDS_STR. This time bounds array must have two
dimensions, one of which's coordinates is the Dataset's time array, and
the other is length-2.
Returns
-------
xarray.DataArray
The mean of the start and end times of each timestep in the original
Dataset.
Raises
------
ValueError
If the time bounds array doesn't match the shape specified above.
"""
bounds = ds[TIME_BOUNDS_STR]
new_times = bounds.mean(dim=BOUNDS_STR, keep_attrs=True)
new_times = new_times.drop(TIME_STR).rename(TIME_STR)
new_times[TIME_STR] = new_times
return new_times
def monthly_mean_ts(arr):
"""Convert a sub-monthly time-series into one of monthly means.
Also drops any months with no data in the original DataArray.
Parameters
----------
arr : xarray.DataArray
Timeseries of sub-monthly temporal resolution data
Returns
-------
xarray.DataArray
Array resampled to comprise monthly means
See Also
--------
monthly_mean_at_each_ind : Copy monthly means to each submonthly time
"""
return arr.resample(**{TIME_STR: '1M'}).mean(TIME_STR).dropna(TIME_STR)
def monthly_mean_at_each_ind(monthly_means, sub_monthly_timeseries):
"""Copy monthly mean over each time index in that month.
Parameters
----------
monthly_means : xarray.DataArray
array of monthly means
sub_monthly_timeseries : xarray.DataArray
array of a timeseries at sub-monthly time resolution
Returns
-------
xarray.DataArray with eath monthly mean value from `monthly_means` repeated
at each time within that month from `sub_monthly_timeseries`
See Also
--------
monthly_mean_ts : Create timeseries of monthly mean values
"""
time = monthly_means[TIME_STR]
start = time.indexes[TIME_STR][0].replace(day=1, hour=0)
end = time.indexes[TIME_STR][-1]
new_indices = pd.DatetimeIndex(start=start, end=end, freq='MS')
arr_new = monthly_means.reindex(time=new_indices, method='backfill')
return arr_new.reindex_like(sub_monthly_timeseries, method='pad')
def yearly_average(arr, dt):
"""Average a sub-yearly time-series over each year.
Resulting timeseries comprises one value for each year in which the
original array had valid data. Accounts for (i.e. ignores) masked values
in original data when computing the annual averages.
Parameters
----------
arr : xarray.DataArray
The array to be averaged
dt : xarray.DataArray
Array of the duration of each timestep
Returns
-------
xarray.DataArray
Has the same shape and mask as the original ``arr``, except for the
time dimension, which is truncated to one value for each year that
``arr`` spanned
"""
assert_matching_time_coord(arr, dt)
yr_str = TIME_STR + '.year'
# Retain original data's mask.
dt = dt.where(np.isfinite(arr))
return ((arr*dt).groupby(yr_str).sum(TIME_STR) /
dt.groupby(yr_str).sum(TIME_STR))
def ensure_datetime(obj):
"""Return the object if it is a datetime-like object
Parameters
----------
obj : Object to be tested.
Returns
-------
The original object if it is a datetime-like object
Raises
------
TypeError if `obj` is not datetime-like
"""
_VALID_TYPES = (str, datetime.datetime, cftime.datetime,
np.datetime64)
if isinstance(obj, _VALID_TYPES):
return obj
raise TypeError("datetime-like object required. "
"Type given: {}".format(type(obj)))
def datetime_or_default(date, default):
"""Return a datetime-like object or a default.
Parameters
----------
date : `None` or datetime-like object or str
default : The value to return if `date` is `None`
Returns
-------
`default` if `date` is `None`, otherwise returns the result of
`utils.times.ensure_datetime(date)`
"""
if date is None:
return default
else:
return ensure_datetime(date)
def month_indices(months):
"""Convert string labels for months to integer indices.
Parameters
----------
months : str, int
If int, number of the desired month, where January=1, February=2,
etc. If str, must match either 'ann' or some subset of
'jfmamjjasond'. If 'ann', use all months. Otherwise, use the
specified months.
Returns
-------
np.ndarray of integers corresponding to desired month indices
Raises
------
TypeError : If `months` is not an int or str
See also
--------
_month_conditional
"""
if not isinstance(months, (int, str)):
raise TypeError("`months` must be of type int or str: "
"type(months) == {}".format(type(months)))
if isinstance(months, int):
return [months]
if months.lower() == 'ann':
return np.arange(1, 13)
first_letter = 'jfmamjjasond' * 2
# Python indexing starts at 0; month indices start at 1 for January.
count = first_letter.count(months)
if (count == 0) or (count > 2):
message = ("The user must provide a unique pattern of consecutive "
"first letters of months within '{}'. The provided "
"string '{}' does not comply."
" For individual months use integers."
"".format(first_letter, months))
raise ValueError(message)
st_ind = first_letter.find(months.lower())
return np.arange(st_ind, st_ind + len(months)) % 12 + 1
def _month_conditional(time, months):
"""Create a conditional statement for selecting data in a DataArray.
Parameters
----------
time : xarray.DataArray
Array of times for which to subsample for specific months.
months : int, str, or xarray.DataArray of times
If int or str, passed to `month_indices`
Returns
-------
Array of bools specifying which months to keep
See Also
--------
month_indices
"""
if isinstance(months, (int, str)):
months_array = month_indices(months)
else:
months_array = months
cond = False
for month in months_array:
cond |= (time['{}.month'.format(TIME_STR)] == month)
return cond
def extract_months(time, months):
"""Extract times within specified months of the year.
Parameters
----------
time : xarray.DataArray
Array of times that can be represented by numpy.datetime64 objects
(i.e. the year is between 1678 and 2262).
months : Desired months of the year to include
Returns
-------
xarray.DataArray of the desired times
"""
inds = _month_conditional(time, months)
return time.sel(time=inds)
def ensure_time_avg_has_cf_metadata(ds):
"""Add time interval length and bounds coordinates for time avg data.
If the Dataset or DataArray contains time average data, enforce
that there are coordinates that track the lower and upper bounds of
the time intervals, and that there is a coordinate that tracks the
amount of time per time average interval.
CF conventions require that a quantity stored as time averages
over time intervals must have time and time_bounds coordinates [1]_.
aospy further requires AVERAGE_DT for time average data, for accurate
time-weighted averages, which can be inferred from the CF-required
time_bounds coordinate if needed. This step should be done
prior to decoding CF metadata with xarray to ensure proper
computed timedeltas for different calendar types.
.. [1] http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_data_representative_of_cells
Parameters
----------
ds : Dataset or DataArray
Input data
Returns
-------
Dataset or DataArray
Time average metadata attributes added if needed.
""" # noqa: E501
if TIME_WEIGHTS_STR not in ds:
time_weights = ds[TIME_BOUNDS_STR].diff(BOUNDS_STR)
time_weights = time_weights.rename(TIME_WEIGHTS_STR).squeeze()
if BOUNDS_STR in time_weights.coords:
time_weights = time_weights.drop(BOUNDS_STR)
ds[TIME_WEIGHTS_STR] = time_weights
raw_start_date = ds[TIME_BOUNDS_STR].isel(**{TIME_STR: 0, BOUNDS_STR: 0})
ds[RAW_START_DATE_STR] = raw_start_date.reset_coords(drop=True)
raw_end_date = ds[TIME_BOUNDS_STR].isel(**{TIME_STR: -1, BOUNDS_STR: 1})
ds[RAW_END_DATE_STR] = raw_end_date.reset_coords(drop=True)
for coord in [TIME_BOUNDS_STR, RAW_START_DATE_STR, RAW_END_DATE_STR]:
ds[coord].attrs['units'] = ds[TIME_STR].attrs['units']
if 'calendar' in ds[TIME_STR].attrs:
ds[coord].attrs['calendar'] = ds[TIME_STR].attrs['calendar']
unit_interval = ds[TIME_STR].attrs['units'].split('since')[0].strip()
ds[TIME_WEIGHTS_STR].attrs['units'] = unit_interval
return ds
def add_uniform_time_weights(ds):
"""Append uniform time weights to a Dataset.
All DataArrays with a time coordinate require a time weights coordinate.
For Datasets read in without a time bounds coordinate or explicit
time weights built in, aospy adds uniform time weights at each point
in the time coordinate.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
"""
time = ds[TIME_STR]
unit_interval = time.attrs['units'].split('since')[0].strip()
time_weights = xr.ones_like(time)
time_weights.attrs['units'] = unit_interval
del time_weights.attrs['calendar']
ds[TIME_WEIGHTS_STR] = time_weights
return ds
def _assert_has_data_for_time(da, start_date, end_date):
"""Check to make sure data is in Dataset for the given time range.
Parameters
----------
da : DataArray
DataArray with a time variable
start_date : datetime-like object or str
start date
end_date : datetime-like object or str
end date
Raises
------
AssertionError
If the time range is not within the time range of the DataArray
"""
if isinstance(start_date, str) and isinstance(end_date, str):
logging.warning(
'When using strings to specify start and end dates, the check '
'to determine if data exists for the full extent of the desired '
'interval is not implemented. Therefore it is possible that '
'you are doing a calculation for a lesser interval than you '
'specified. If you would like this check to occur, use explicit '
'datetime-like objects for bounds instead.')
return
if RAW_START_DATE_STR in da.coords:
with warnings.catch_warnings(record=True):
da_start = da[RAW_START_DATE_STR].values
da_end = da[RAW_END_DATE_STR].values
else:
times = da.time.isel(**{TIME_STR: [0, -1]})
da_start, da_end = times.values
message = ('Data does not exist for requested time range: {0} to {1};'
' found data from time range: {2} to {3}.')
# Add tolerance of one second, due to precision of cftime.datetimes
tol = datetime.timedelta(seconds=1)
if isinstance(da_start, np.datetime64):
tol = np.timedelta64(tol, 'ns')
range_exists = ((da_start - tol) <= start_date and
(da_end + tol) >= end_date)
assert (range_exists), message.format(start_date, end_date,
da_start, da_end)
def sel_time(da, start_date, end_date):
"""Subset a DataArray or Dataset for a given date range.
Ensures that data are present for full extent of requested range.
Appends start and end date of the subset to the DataArray.
Parameters
----------
da : DataArray or Dataset
data to subset
start_date : np.datetime64
start of date interval
end_date : np.datetime64
end of date interval
Returns
----------
da : DataArray or Dataset
subsetted data
Raises
------
AssertionError
if data for requested range do not exist for part or all of
requested range
"""
_assert_has_data_for_time(da, start_date, end_date)
da[SUBSET_START_DATE_STR] = xr.DataArray(start_date)
da[SUBSET_END_DATE_STR] = xr.DataArray(end_date)
return da.sel(**{TIME_STR: slice(start_date, end_date)})
def assert_matching_time_coord(arr1, arr2):
"""Check to see if two DataArrays have the same time coordinate.
Parameters
----------
arr1 : DataArray or Dataset
First DataArray or Dataset
arr2 : DataArray or Dataset
Second DataArray or Dataset
Raises
------
ValueError
If the time coordinates are not identical between the two Datasets
"""
message = ('Time weights not indexed by the same time coordinate as'
' computed data. This will lead to an improperly computed'
' time weighted average. Exiting.\n'
'arr1: {}\narr2: {}')
if not (arr1[TIME_STR].identical(arr2[TIME_STR])):
raise ValueError(message.format(arr1[TIME_STR], arr2[TIME_STR]))
def ensure_time_as_index(ds):
"""Ensures that time is an indexed coordinate on relevant quantites.
Sometimes when the data we load from disk has only one timestep, the
indexing of time-defined quantities in the resulting xarray.Dataset gets
messed up, in that the time bounds array and data variables don't get
indexed by time, even though they should. Therefore, we need this helper
function to (possibly) correct this.
Note that this must be applied before CF-conventions are decoded; otherwise
it casts ``np.datetime64[ns]`` as ``int`` values.
Parameters
----------
ds : Dataset
Dataset with a time coordinate
Returns
-------
Dataset
"""
time_indexed_coords = {TIME_WEIGHTS_STR, TIME_BOUNDS_STR}
time_indexed_vars = set(ds.data_vars).union(time_indexed_coords)
time_indexed_vars = time_indexed_vars.intersection(ds.variables)
for name in time_indexed_vars:
if TIME_STR not in ds[name].indexes:
da = ds[name].expand_dims(TIME_STR)
da[TIME_STR] = ds[TIME_STR]
ds[name] = da
return ds
def infer_year(date):
"""Given a datetime-like object or string infer the year.
Parameters
----------
date : datetime-like object or str
Input date
Returns
-------
int
Examples
--------
>>> infer_year('2000')
2000
>>> infer_year('2000-01')
2000
>>> infer_year('2000-01-31')
2000
>>> infer_year(datetime.datetime(2000, 1, 1))
2000
>>> infer_year(np.datetime64('2000-01-01'))
2000
>>> infer_year(DatetimeNoLeap(2000, 1, 1))
2000
>>>
"""
if isinstance(date, str):
# Look for a string that begins with four numbers; the first four
# numbers found are the year.
pattern = r'(?P<year>\d{4})'
result = re.match(pattern, date)
if result:
return int(result.groupdict()['year'])
else:
raise ValueError('Invalid date string provided: {}'.format(date))
elif isinstance(date, np.datetime64):
return date.item().year
else:
return date.year
def maybe_convert_to_index_date_type(index, date):
"""Convert a datetime-like object to the index's date type.
Datetime indexing in xarray can be done using either a pandas
DatetimeIndex or a CFTimeIndex. Both support partial-datetime string
indexing regardless of the calendar type of the underlying data;
therefore if a string is passed as a date, we return it unchanged. If a
datetime-like object is provided, it will be converted to the underlying
date type of the index. For a DatetimeIndex that is np.datetime64; for a
CFTimeIndex that is an object of type cftime.datetime specific to the
calendar used.
Parameters
----------
index : pd.Index
Input time index
date : datetime-like object or str
Input datetime
Returns
-------
date of the type appropriate for the time index of the Dataset
"""
if isinstance(date, str):
return date
if isinstance(index, pd.DatetimeIndex):
if isinstance(date, np.datetime64):
return date
else:
return np.datetime64(str(date))
else:
date_type = index.date_type
if isinstance(date, date_type):
return date
else:
if isinstance(date, np.datetime64):
# Convert to datetime.date or datetime.datetime object
date = date.item()
if isinstance(date, datetime.date):
# Convert to a datetime.datetime object
date = datetime.datetime.combine(
date, datetime.datetime.min.time())
return date_type(date.year, date.month, date.day, date.hour,
date.minute, date.second, date.microsecond)
|
{
"content_hash": "f891fe33f59addaa95e3713f53698c40",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 107,
"avg_line_length": 32.6078431372549,
"alnum_prop": 0.6340950090198436,
"repo_name": "spencerkclark/aospy",
"id": "beaea138d470a3eccbf67d993fa9f434d834c73f",
"size": "19956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aospy/utils/times.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "105431"
},
{
"name": "PowerShell",
"bytes": "3190"
},
{
"name": "Python",
"bytes": "362762"
}
],
"symlink_target": ""
}
|
"""Handle Python version/platform incompatibilities."""
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py32 = sys.version_info >= (3, 2)
py3k_warning = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
py3k = sys.version_info >= (3, 0)
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
win32 = sys.platform.startswith('win')
cpython = not pypy and not jython # TODO: something better for this ?
if py3k_warning:
set_types = set
elif sys.version_info < (2, 6):
import sets
set_types = set, sets.Set
else:
# 2.6 deprecates sets.Set, but we still need to be able to detect them
# in user code and as return values from DB-APIs
ignore = ('ignore', None, DeprecationWarning, None, 0)
import warnings
try:
warnings.filters.insert(0, ignore)
except Exception:
import sets
else:
import sets
warnings.filters.remove(ignore)
set_types = set, sets.Set
if sys.version_info < (2, 6):
def next(iter):
return iter.next()
else:
next = next
if py3k_warning:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
if sys.version_info < (2, 6):
# emits a nasty deprecation warning
# in newer pythons
from cgi import parse_qsl
else:
from urlparse import parse_qsl
# Py3K
#from inspect import getfullargspec as inspect_getfullargspec
# Py2K
from inspect import getargspec as inspect_getfullargspec
# end Py2K
if py3k_warning:
# they're bringing it back in 3.2. brilliant !
def callable(fn):
return hasattr(fn, '__call__')
def cmp(a, b):
return (a > b) - (a < b)
from functools import reduce
else:
callable = callable
cmp = cmp
reduce = reduce
try:
from collections import namedtuple
except ImportError:
def namedtuple(typename, fieldnames):
def __new__(cls, *values):
tup = tuple.__new__(cls, values)
for i, fname in enumerate(fieldnames):
setattr(tup, fname, tup[i])
return tup
tuptype = type(typename, (tuple, ), {'__new__': __new__})
return tuptype
try:
from weakref import WeakSet
except:
import weakref
class WeakSet(object):
"""Implement the small subset of set() which SQLAlchemy needs
here. """
def __init__(self, values=None):
self._storage = weakref.WeakKeyDictionary()
if values is not None:
self._storage.update((value, None) for value in values)
def __iter__(self):
return iter(self._storage)
def union(self, other):
return WeakSet(set(self).union(other))
def add(self, other):
self._storage[other] = True
import time
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
if sys.version_info >= (2, 6):
from operator import attrgetter as dottedgetter
else:
def dottedgetter(attr):
def g(obj):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
return g
|
{
"content_hash": "c6a32d1ddf1c28c0588b7c2e4be39006",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 24.9765625,
"alnum_prop": 0.6199562089458868,
"repo_name": "femmerling/DirMaker",
"id": "12d349a5b86e36d05afabada774fe2b34513ed5f",
"size": "3430",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "box/lib/python2.7/site-packages/sqlalchemy/util/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "JavaScript",
"bytes": "7702"
},
{
"name": "Python",
"bytes": "7333770"
},
{
"name": "Shell",
"bytes": "3518"
}
],
"symlink_target": ""
}
|
from .dataset import Dataset
class DocumentDbCollectionDataset(Dataset):
"""Microsoft Azure Document Database Collection dataset.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Constant filled by server.
:type type: str
:param collection_name: Document Database collection name. Type: string
(or Expression with resultType string).
:type collection_name: object
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
'collection_name': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'structure': {'key': 'structure', 'type': 'object'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'collection_name': {'key': 'typeProperties.collectionName', 'type': 'object'},
}
def __init__(self, linked_service_name, collection_name, additional_properties=None, description=None, structure=None, parameters=None, annotations=None):
super(DocumentDbCollectionDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, linked_service_name=linked_service_name, parameters=parameters, annotations=annotations)
self.collection_name = collection_name
self.type = 'DocumentDbCollection'
|
{
"content_hash": "03b46845d2e96dd3fcaa6d8d3f86b4e1",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 237,
"avg_line_length": 47.8235294117647,
"alnum_prop": 0.6867568675686757,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "60c3df76af60def4aa9d17d98dbad47625927bbb",
"size": "2913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-datafactory/azure/mgmt/datafactory/models/document_db_collection_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import changelog
import re
import subprocess
import sys
def checkExistingTag(version):
if (subprocess.call(('git show-ref --verify --quiet refs/heads/%s' % version).split()) == 0 or
subprocess.call(('git show-ref --verify --quiet refs/tags/%s' % version).split()) == 0):
print "Error: The tag '%s' already exists" % version
raise Exception()
def updateHomepage(version):
file_str = None
with open('build/includes/home/home.html') as f:
file_str = f.read()
file_str = re.sub(r'href="[^"]*"',
'href="https://github.com/lumapps/lumX/archive/%s.zip"' % version,
file_str)
file_str = re.sub(r'<span class="home-banner__version">[^"]*<\/span>',
'<span class="home-banner__version">%s</span>' % version,
file_str)
with open('demo/includes/home/home.html', "w") as f:
f.write(file_str)
def addAndCommitReleaseFiles(version):
subprocess.call(['git', 'add', '-f', 'demo/includes/home/home.html', 'CHANGELOG.md', 'dist'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
subprocess.call(['git', 'commit', '-m', 'chore release: new release %s' % version], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
def commit(version):
changelog.main(version)
print "Adding and committing files..."
addAndCommitReleaseFiles(version)
print "Publishing new commit to master..."
subprocess.call(('git push origin master').split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
print "Publishing to NPM registry..."
subprocess.call(('npm version %s' % version).split())
subprocess.call(('npm publish').split())
print "Push git repository..."
subprocess.call(('git push origin %s' % version).split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
print "Release %s created!" % version
if __name__ == "__main__":
try:
if len(sys.argv) == 1:
print "Error: The version name is required"
raise Exception()
version = sys.argv[1]
checkExistingTag(version)
updateHomepage(version)
commit(version)
except Exception as e:
exit(-1)
|
{
"content_hash": "790a1adcaab3e6c0f5a9d02c5b042985",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 147,
"avg_line_length": 30.75,
"alnum_prop": 0.6183378500451672,
"repo_name": "lumapps/lumX",
"id": "e0a639a7ef8f3dc827ef8e655c949c44899ce740",
"size": "2238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "release.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "161679"
},
{
"name": "HTML",
"bytes": "216875"
},
{
"name": "JavaScript",
"bytes": "268160"
},
{
"name": "Python",
"bytes": "4795"
},
{
"name": "Shell",
"bytes": "933"
}
],
"symlink_target": ""
}
|
"""
Created on Wed Jul 5 16:51:39 2017
@author: AnthonyN
https://www.quantstart.com/articles/Forecasting-Financial-Time-Series-Part-1
Predicting Price Returns
"""
import numpy as np
import pandas as pd
lags = 5
start_test = pd.to_datetime('2017-06-18')
from sklearn.linear_model import LogisticRegression
ts = pd.read_csv('data\XMA.csv', index_col='Date')
ts.index = pd.to_datetime(ts.index)
tslag = ts[['XMA']].copy()
for i in range(0,lags):
tslag["Lag_" + str(i+1)] = tslag["XMA"].shift(i+1)
tslag["returns"] = tslag["XMA"].pct_change()
# Create the lagged percentage returns columns
for i in range(0,lags):
tslag["Lag_" + str(i+1)] = tslag["Lag_" + str(i+1)].pct_change()
tslag.fillna(0, inplace=True)
tslag["Direction"] = np.sign(tslag["returns"])
# Use the prior two days of returns as predictor values, with direction as the response
X = tslag[["Lag_1", "Lag_2"]]
y = tslag["Direction"]
# Create training and test sets
X_train = X[X.index < start_test]
X_test = X[X.index >= start_test]
y_train = y[y.index < start_test]
y_test = y[y.index >= start_test]
# Create prediction DataFrame
pred = pd.DataFrame(index=y_test.index)
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
# pred = (1.0 + y_pred * y_test)/2.0
pred = (1.0 + (y_pred == y_test))/2.0
hit_rate = np.mean(pred)
print('Logistic Regresstion {:.4f}'.format(hit_rate))
|
{
"content_hash": "d11c8845a99de2adbabdd4975bf76128",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 87,
"avg_line_length": 27.20754716981132,
"alnum_prop": 0.6525658807212206,
"repo_name": "anthonyng2/Machine-Learning-For-Finance",
"id": "bcf933d7ed5d072bd45256611324464da12be191",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Classification Based Machine Learning for Algorithmic Trading/Predict Next Day Return/spyder_LogisticRegression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12806877"
},
{
"name": "Python",
"bytes": "74616"
}
],
"symlink_target": ""
}
|
import os
import sys
import yaml
import random
import tempfile
import threading
from rackattack.physical.tests.integration import use_local_inaugurator
import rackattack.physical.config
from rackattack.physical.ipmi import IPMI
use_local_inaugurator.verify()
VAR_DIRPATH = os.path.join("/var", "lib", "rackattackphysical")
RACK_CONFIG_FILE_PATH = os.path.join(VAR_DIRPATH, "integration-test.rack.yaml")
FAKE_REBOOTS_PIPE_NAME = os.path.join(VAR_DIRPATH, "fake-reboots-pipe")
GENERAL_CONFIG_FILE_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
rackattack.physical.config.EXAMPLE_CONF_YAML))
def useFakeRackConf():
assert hasattr(rackattack.physical.config, "RACK_YAML")
rackattack.physical.config.RACK_YAML = RACK_CONFIG_FILE_PATH
def useFakeIPMITool():
assert hasattr(IPMI, "IPMITOOL_FILENAME")
IPMI.IPMITOOL_FILENAME = "sh/ipmitool_mock"
def useFakeGeneralConfiguration():
rackattack.physical.config.CONFIGURATION_FILE = GENERAL_CONFIG_FILE_PATH
if __name__ == "__main__":
if not os.path.exists(VAR_DIRPATH):
os.makedirs(VAR_DIRPATH)
useFakeRackConf()
useFakeIPMITool()
useFakeGeneralConfiguration()
nrRacks = 1
nrHostsInRack = 8
hosts = [dict(id="rack%02d-server%02d" % (rackIdx, hostIdx),
ipmiLogin=dict(username="root",
password="strato",
hostname="rack%02d-server%02d-fake-ipmi" % (rackIdx, hostIdx)),
primaryMAC="rack%02d-server%02d-primary-mac" % (rackIdx, hostIdx),
secondaryMAC="rack%02d-server%02d-secondary-mac" % (rackIdx, hostIdx),
topology=dict(rackID="rack%02d" % (rackIdx,)),
state="online") for hostIdx in xrange(1, nrHostsInRack + 1)
for rackIdx in xrange(1, nrRacks + 1)]
rackConf = dict(HOSTS=hosts)
with open(rackattack.physical.config.RACK_YAML, "w") as configFile:
yaml.dump(rackConf, configFile)
# Cannot import main since python does not support spwaning threads from an import context
mainPath = os.path.join(os.curdir, "rackattack", "physical", "main.py")
execfile(mainPath)
neverEnds = threading.Event()
neverEnds.wait()
|
{
"content_hash": "9f326cc5b0535fb3453083f8dff140ce",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 107,
"avg_line_length": 39.28813559322034,
"alnum_prop": 0.6553062985332183,
"repo_name": "Stratoscale/rackattack-physical",
"id": "dc5e5d8235d6256afa5191ec15683af123d47a08",
"size": "2318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rackattack/physical/tests/integration/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1851"
},
{
"name": "M4",
"bytes": "688"
},
{
"name": "Makefile",
"bytes": "8407"
},
{
"name": "Python",
"bytes": "232666"
},
{
"name": "Shell",
"bytes": "6319"
}
],
"symlink_target": ""
}
|
import pandas as pd
import torch
import os
from time import time
from tqdm import tqdm
from bert_serving.client import BertClient
data_folder = os.path.dirname(os.getcwd()) + "/data"
train = pd.read_csv(data_folder + "/raw/train.csv")
bc = BertClient()
def gen_encodings(df, column):
t0 = time()
_list = list(df.loc[:, column])
for i, text in enumerate(_list):
if not isinstance(_list[i], str):
_list[i] = str(text)
if not _list[i].strip():
_list[i] = _list[i].strip()
if len(_list[i]) == 0:
_list[i] = "temp"
arr = bc.encode(_list)
temp = pd.DataFrame(arr)
temp.columns = [f"{column}_{c}" for c in range(len(arr[0]))]
temp = temp.join(df.id)
print(f"time: {time() - t0}")
return temp
encoded_train = gen_encodings(train, "title1_en")
encoded_train.to_csv("encoded_train1.csv")
encoded_train = gen_encodings(train, "title2_en")
encoded_train.to_csv("encoded_train2.csv")
|
{
"content_hash": "00c97f14157ca314bee71082c316eed6",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 64,
"avg_line_length": 27.8,
"alnum_prop": 0.6145940390544707,
"repo_name": "Diyago/Machine-Learning-scripts",
"id": "f142ca156776f35811ee57a6be6eefc455b8d45b",
"size": "1861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DEEP LEARNING/NLP/WSDM - Fake News Classification/Berd generate embeddings/0_bert_encode_en_train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55701529"
},
{
"name": "Python",
"bytes": "390944"
},
{
"name": "Shell",
"bytes": "536"
}
],
"symlink_target": ""
}
|
import ctypes
import platform
import sys
libfilename = "libcstreamgeo.so"
if platform.uname()[0] == "Darwin":
libfilename = "libcstreamgeo.dylib"
try:
libcstreamgeo = ctypes.cdll.LoadLibrary(libfilename)
except OSError as e:
sys.stderr.write('ERROR: cannot open shared library %s\n' % libfilename)
sys.stderr.write(' Please make sure that the library can be found.\n')
sys.stderr.write(' For instance, on GNU/Linux, it could be in /usr/lib and /usr/include directories,\n')
sys.stderr.write(' or in some other directory with the environment variable LD_LIBRARY_PATH correctly set.\n')
raise e
int_type = ctypes.c_int
float_type = ctypes.c_float
intp_type = ctypes.POINTER(int_type)
floatp_type = ctypes.POINTER(float_type)
libcstreamgeo.align.argtypes = [int_type, floatp_type, int_type, floatp_type, int_type, intp_type, intp_type]
libcstreamgeo.align.restype = float_type
libcstreamgeo.similarity.argtypes = [int_type, floatp_type, int_type, floatp_type, int_type]
libcstreamgeo.similarity.restype = float_type
libcstreamgeo.reduce_by_rdp.argtypes = [int_type, floatp_type, float_type, intp_type, floatp_type]
libcstreamgeo.reduce_by_rdp.restype = None
libcstreamgeo.stream_distance.argtypes = [int_type, floatp_type]
libcstreamgeo.stream_distance.restype = float_type
libcstreamgeo.stream_sparsity.argtypes = [int_type, floatp_type, intp_type, floatp_type]
libcstreamgeo.stream_sparsity.restype = None
|
{
"content_hash": "cc907d25346239bba099a85e855eb7f9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 120,
"avg_line_length": 39.45945945945946,
"alnum_prop": 0.75,
"repo_name": "mrdmnd/streamgeo",
"id": "76e66eed50e2d664717ffcffd2ab5a896b889ac8",
"size": "1460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pystreamgeo/pystreamgeo/typemap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "86238"
},
{
"name": "CMake",
"bytes": "6487"
},
{
"name": "Java",
"bytes": "1482"
},
{
"name": "Python",
"bytes": "4898"
},
{
"name": "Shell",
"bytes": "1686"
}
],
"symlink_target": ""
}
|
import collections
import os
import sys
import abbreviations
import config
import extractor_util as util
import levenshtein
CACHE = dict() # Cache results of disk I/O
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('pheno_wordidxs', 'int[]'),
('entity', 'text')])
# This defines the output Mention object
Mention = collections.namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'short_wordidxs',
'long_wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'abbrev_word',
'definition_words',
'entity',
'is_correct'])
### CANDIDATE EXTRACTION ###
# HF = config.PHENO_ACRONYMS['HF']
SR = config.PHENO_ACRONYMS['SR']
def extract_candidate_mentions(row, pos_count, neg_count):
mentions = []
if max(row.pheno_wordidxs) + 2 < len(row.words) and len(row.words[max(row.pheno_wordidxs) + 2]) > 0:
for (is_correct, abbrev, definition, detector_message) in abbreviations.getabbreviations(row.words, abbrev_index=max(row.pheno_wordidxs) + 2):
m = create_supervised_mention(row, is_correct, abbrev, definition, detector_message, pos_count, neg_count)
if m:
mentions.append(m)
return mentions
### DISTANT SUPERVISION ###
VALS = config.PHENO_ACRONYMS['vals']
def create_supervised_mention(row, is_correct,
(start_abbrev, stop_abbrev, abbrev),
(start_definition, stop_definition,
definition), detector_message, pos_count,
neg_count):
assert stop_abbrev == start_abbrev + 1
mid = '%s_%s_%s_%s' % (row.doc_id, row.section_id, row.sent_id, start_abbrev)
include = None
if is_correct:
supertype = 'TRUE_DETECTOR'
subtype = None
elif is_correct is False:
supertype = 'FALSE_DETECTOR'
subtype = detector_message
else:
supertype = 'DETECTOR_OMITTED_SENTENCE'
subtype = None
include = False
if include is not False and is_correct and abbrev.islower():
is_correct = False
supertype = 'FALSE_ALL_LOWERCASE'
subtype = None
if include is not False and is_correct and abbrev in SR['short-words']:
is_correct = False
supertype = 'FALSE_SHORT_WORD'
subtype = None
if include is not False and is_correct and abbrev in SR['bad-pheno-names']:
is_correct = False
supertype = 'FALSE_BAD_PHENO_NAME'
subtype = None
if include is True or (include is not False and (is_correct is True or (is_correct is False and neg_count < pos_count))):
m = Mention(None, row.doc_id, row.section_id,
row.sent_id, [i for i in xrange(start_abbrev, stop_abbrev + 1)],
[i for i in xrange(start_definition, stop_definition + 1)],
mid, supertype, subtype, abbrev, definition, row.entity.strip(), is_correct);
else:
m = None
return m
if __name__ == '__main__':
# load static data
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
# generate the mentions, while trying to keep the supervision approx. balanced
# print out right away so we don't bloat memory...
pos_count = 0
neg_count = 0
for line in sys.stdin:
row = parser.parse_tsv_row(line)
try:
if '-LRB-' not in row.words[row.pheno_wordidxs[len(row.pheno_wordidxs)-1] + 1]:
continue
except:
pass
#print >> sys.stderr, 'error in condition for extractor pheno_acronyms extract candidates'
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# Find candidate mentions & supervise
mentions = extract_candidate_mentions(row, pos_count, neg_count)
pos_count += len([m for m in mentions if m.is_correct])
neg_count += len([m for m in mentions if m.is_correct is False])
# print output
for mention in mentions:
util.print_tsv_output(mention)
|
{
"content_hash": "82615e448dc7e6909c0c3af179834c0e",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 146,
"avg_line_length": 34.34920634920635,
"alnum_prop": 0.6046672828096118,
"repo_name": "HazyResearch/dd-genomics",
"id": "d345724deea65db0ad621c4aea7787c504d1f3ab",
"size": "4350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/pheno_acronyms_extract_candidates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "22186"
},
{
"name": "Java",
"bytes": "25863"
},
{
"name": "JavaScript",
"bytes": "10928"
},
{
"name": "Jupyter Notebook",
"bytes": "19968"
},
{
"name": "Python",
"bytes": "510253"
},
{
"name": "Shell",
"bytes": "196808"
}
],
"symlink_target": ""
}
|
"""Support for Huawei LTE router notifications."""
import logging
import voluptuous as vol
import attr
from homeassistant.components.notify import (
BaseNotificationService, ATTR_TARGET, PLATFORM_SCHEMA)
from homeassistant.const import CONF_RECIPIENT, CONF_URL
import homeassistant.helpers.config_validation as cv
from . import DATA_KEY
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_URL): cv.url,
vol.Required(CONF_RECIPIENT): vol.All(cv.ensure_list, [cv.string]),
})
async def async_get_service(hass, config, discovery_info=None):
"""Get the notification service."""
return HuaweiLteSmsNotificationService(hass, config)
@attr.s
class HuaweiLteSmsNotificationService(BaseNotificationService):
"""Huawei LTE router SMS notification service."""
hass = attr.ib()
config = attr.ib()
def send_message(self, message="", **kwargs):
"""Send message to target numbers."""
from huawei_lte_api.exceptions import ResponseErrorException
targets = kwargs.get(ATTR_TARGET, self.config.get(CONF_RECIPIENT))
if not targets or not message:
return
data = self.hass.data[DATA_KEY].get_data(self.config)
if not data:
_LOGGER.error("Router not available")
return
try:
resp = data.client.sms.send_sms(
phone_numbers=targets, message=message)
_LOGGER.debug("Sent to %s: %s", targets, resp)
except ResponseErrorException as ex:
_LOGGER.error("Could not send to %s: %s", targets, ex)
|
{
"content_hash": "6ee593a7d138c5c4bc374d486f3a7ba3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 74,
"avg_line_length": 31.01923076923077,
"alnum_prop": 0.6769993800371977,
"repo_name": "aequitas/home-assistant",
"id": "2222c1333dd556b075bc76f3789bb697b11a596f",
"size": "1613",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/huawei_lte/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
'''
Testing functionality of function designed generating spreadsheet
for quintiles on another data set.
Created on 13 Nov 2014
@author: chris
'''
def main():
fname = '/home/chris/Projects/Cookit/family-food-datasets/ConsGORHH-12dec13.xls'
from quintiles import get_fruit_n_veg_data, plottity
regions_dat = get_fruit_n_veg_data(fname)
Countries = [ 'England', 'Wales', 'Scotland', 'Northern Ireland' ]
plottity(regions_dat, Countries, Countries, 'Countries in the UK', 'country', 1)
England_areas = [ 'North East', 'North West', 'Yorkshire and The Humber', 'South West', 'South East', 'East Midlands', 'West Midlands', 'London' ]
England_labels = [ 'North\n East', 'North\n West', 'Yorkshire', 'South\n West', 'South\n East', 'East\n Midlands', 'West\n Midlands', 'London' ]
plottity(regions_dat, England_areas, England_labels, 'Regions in England', 'region', 2)
import matplotlib.pyplot as pl
pl.show()
if __name__ == '__main__':
main()
|
{
"content_hash": "320472f48e6209075e6250a6a3cfe2d0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 150,
"avg_line_length": 34.03333333333333,
"alnum_prop": 0.6571988246816847,
"repo_name": "chrisjdavie/Cookit",
"id": "8b954e556892ee99c50864cd84ae91a771bebf1d",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "5-a-day-thing/Regions/Countries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44601"
}
],
"symlink_target": ""
}
|
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.paged_maintenance_window import PagedMaintenanceWindow # noqa: E501
from wavefront_api_client.rest import ApiException
class TestPagedMaintenanceWindow(unittest.TestCase):
"""PagedMaintenanceWindow unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPagedMaintenanceWindow(self):
"""Test PagedMaintenanceWindow"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.paged_maintenance_window.PagedMaintenanceWindow() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c0e1fb234f69ac03ba113585b7f07b81",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 409,
"avg_line_length": 35.026315789473685,
"alnum_prop": 0.7317806160781367,
"repo_name": "wavefrontHQ/python-client",
"id": "41368e5700c1537aa69642c6369a049cd7f15e48",
"size": "1348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_paged_maintenance_window.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4642252"
},
{
"name": "Shell",
"bytes": "3458"
}
],
"symlink_target": ""
}
|
from ktane.model.modules.abstract_module import AbstractModule, ModuleState
class MemoryModule(AbstractModule):
def export_to_string(self):
pass
def import_from_string(self, string):
pass
def translate_to_commands(self):
pass
def __init__(self):
super().__init__()
self.name = "MemoryModule"
self.type_number = 6
self.state = ModuleState.ARMED
|
{
"content_hash": "ae7607e76cf65961c048f33da5c3e163",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.6294536817102138,
"repo_name": "hanzikl/ktane-controller",
"id": "d205172c5267331e21e50f5e3e9a3a313231ecb5",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ktane/model/modules/memory_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29426"
}
],
"symlink_target": ""
}
|
'''craigslist blob event module.
This should only be used internally by the client module.'''
import hashlib
import clblob
import clcommon.anybase
import clcommon.profile
class Event(object):
'''Base class for various events used in the client.'''
params = []
def __init__(self, client, method, name, timeout, http_method=None):
self._client = client
if method not in self._client.events:
self._client.events[method] = dict(current=0, max=0, total=0)
self._client.events[method]['total'] += 1
self._client.events[method]['current'] += 1
current = self._client.events[method]['current']
if current > self._client.events[method]['max']:
self._client.events[method]['max'] = current
self.method = method
self.name = name
self.timeout = timeout
self.http_method = http_method or method.upper()
self.parse_response = True
self.profile = clcommon.profile.Profile()
self.data = None
self.modified = None
self.deleted = None
self.modified_deleted = None
self.index_id = None
self.store_id = None
self.encoded = None
self._buckets = None
self._replicas = None
self._is_local = None
def __del__(self):
if hasattr(self, '_client'):
self._client.events[self.method]['current'] -= 1
if hasattr(self, 'profile') and len(self.profile.marks) > 0:
self._client.log.info('profile %s', self.profile)
@property
def url(self):
'''Make a URL for this event.'''
url = '/%s' % self.name
separator = '?'
for param in self.params:
value = getattr(self, param)
if value is not None:
url = '%s%s%s=%s' % (url, separator, param, value)
separator = '&'
return url
def buckets(self, buckets=None):
'''Get or set the buckets for this event.'''
if buckets is not None:
self._buckets = buckets
return
if self._buckets is not None:
return self._buckets
self._buckets = {}
if self.encoded is not False and self._client.config['encode_name']:
self._get_encoded_buckets()
else:
self._get_buckets()
return self._buckets
def _get_buckets(self):
'''Get buckets for a name.'''
name_hash = hashlib.md5(self.name).hexdigest() # pylint: disable=E1101
name_hash = int(name_hash[:8], 16)
for cluster in xrange(len(self._client.weighted_clusters)):
weighted_cluster = self._client.weighted_clusters[cluster]
bucket = weighted_cluster[name_hash % len(weighted_cluster)]
self._buckets[cluster] = bucket
def _get_encoded_buckets(self):
'''Get buckets for an encoded name.'''
if clcommon.anybase.decode(self.name[0], 62) != 0:
raise clblob.InvalidRequest(_('Name version not valid: %s') %
self.name)
buckets = self.name[1:].split('_', 1)[0]
if len(buckets) % 2 != 0:
raise clblob.InvalidRequest(_('Name bucket list corrupt: %s') %
self.name)
buckets = [buckets[offset:offset + 2]
for offset in xrange(0, len(buckets), 2)]
for cluster, bucket in enumerate(buckets):
self._buckets[cluster] = clcommon.anybase.decode(bucket, 62)
def replicas(self, replicas=None):
'''Get or set the replicas for this event.'''
if replicas is not None:
self._replicas = replicas
return
if self._replicas is None:
self._get_replicas()
return self._replicas
def _get_replicas(self):
'''Get a preferred list of replicas for the given buckets. This
will ignore replicas in other clusters if a cluster is configured,
as well as the local replica if the client is a replica.'''
self._replicas = {}
self._is_local = False
for cluster, bucket in self.buckets().iteritems():
if self._client.cluster is None or self._client.cluster == cluster:
if self._client.bucket == bucket:
self._is_local = True
bucket = self._client.config['clusters'][cluster][bucket]
for replica in bucket['replicas']:
if self._client.replica != replica:
self._replicas[replica] = True
@property
def is_local(self):
'''Check to see if the local replica can handle this event.'''
if self._is_local is None:
self._get_replicas()
return self._is_local
@property
def info(self):
'''Make an info dictionary for responses.'''
return dict(name=self.name, modified=self.modified,
deleted=self.deleted, modified_deleted=self.modified_deleted,
buckets=self.buckets())
class Get(Event):
'''Event for tracking getting a blob.'''
params = ['response']
def __init__(self, client, name, response):
super(Get, self).__init__(client, 'get', name,
client.config['request_timeout'])
self.response = response
if response == 'data':
self.parse_response = False
class Delete(Event):
'''Event for tracking deleting a blob.'''
params = ['deleted', 'modified_deleted', 'replicate']
def __init__(self, client, name, replicate):
super(Delete, self).__init__(client, 'delete', name,
client.config['request_timeout'])
self.replicate = replicate
class Put(Event):
'''Event for tracking putting a blob.'''
params = ['modified', 'deleted', 'modified_deleted', 'replicate',
'encoded']
def __init__(self, client, name, replicate, encoded):
super(Put, self).__init__(client, 'put', name,
client.config['request_timeout'])
self.replicate = replicate
self.encoded = encoded
if encoded is False and client.config['encode_name']:
self._encode_name()
def _encode_name(self, version=0):
'''Make a name encoded with clusters and buckets.'''
encoded = [clcommon.anybase.encode(version, 62)]
for _cluster, bucket in sorted(self.buckets().iteritems()):
encoded.append(clcommon.anybase.encode(bucket, 62).zfill(2))
self.name = '%s_%s' % (''.join(encoded), self.name)
self.encoded = True
class Admin(Event):
'''Event for tracking various admin tasks.'''
def __init__(self, client, method, replica=None):
replica = replica or client.replica
if replica is None:
raise clblob.RequestError(_('Must give replica'))
elif replica not in client.config['replicas']:
raise clblob.RequestError(_('Unknown replica: %s') % replica)
super(Admin, self).__init__(client, method,
'_%s/%s' % (method, replica), client.config['admin_timeout'],
'GET')
self.replica = replica
class ConfigCheck(Event):
'''Event for tracking configcheck requests.'''
params = ['brief', 'tolerance']
def __init__(self, client, replica=None):
replica = replica or client.replica
if replica is not None and replica not in client.config['replicas']:
raise clblob.RequestError(_('Unknown replica: %s') % replica)
super(ConfigCheck, self).__init__(client, 'configcheck',
'_configcheck/%s' % replica, client.config['request_timeout'],
'PUT')
self.replica = replica
self.brief = None
self.tolerance = None
class List(Admin):
'''Event for tracking list requests.'''
params = ['modified_start', 'modified_stop', 'checksum', 'checksum_modulo']
def __init__(self, client, replica=None):
super(List, self).__init__(client, 'list', replica)
self.modified_start = None
self.modified_stop = None
self.checksum = None
self.checksum_modulo = None
class Sync(Admin):
'''Event for tracking list requests.'''
params = ['source', 'modified_start', 'modified_stop']
def __init__(self, client, replica=None):
super(Sync, self).__init__(client, 'sync', replica)
self.source = None
self.modified_start = None
self.modified_stop = None
|
{
"content_hash": "0115f9292d7a2c49028423c4f38fbfa8",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 79,
"avg_line_length": 35.30672268907563,
"alnum_prop": 0.5846721409020588,
"repo_name": "pdfernhout/python-clblob",
"id": "101a2a2b5fb61ed1ee3c81bb5c28772094ca1bfe",
"size": "8978",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "clblob/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "34277"
},
{
"name": "Python",
"bytes": "163879"
},
{
"name": "Shell",
"bytes": "5549"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingMedian'] , ['Seasonal_Minute'] , ['ARX'] );
|
{
"content_hash": "5c00f236e8aa03806c715747db6f2191",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 91,
"avg_line_length": 41,
"alnum_prop": 0.7195121951219512,
"repo_name": "antoinecarme/pyaf",
"id": "da8369d4e5d0a253558d31f9a3e40d2d5f388bde",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingMedian_Seasonal_Minute_ARX.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
sentry.conf.settings
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from hashlib import md5
from sentry.conf.defaults import *
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import warnings
# Some sane overrides to better mix with Django
DEBUG = getattr(settings, 'DEBUG', False) and not getattr(settings, 'SENTRY_TESTING', False)
KEY = getattr(settings, 'SENTRY_KEY', md5(settings.SECRET_KEY.encode('utf-8')).hexdigest())
EMAIL_SUBJECT_PREFIX = getattr(settings, 'EMAIL_SUBJECT_PREFIX', EMAIL_SUBJECT_PREFIX)
INTERNAL_IPS = getattr(settings, 'INTERNAL_IPS', INTERNAL_IPS)
SERVER_EMAIL = getattr(settings, 'SERVER_EMAIL', SERVER_EMAIL)
for k in dir(settings):
if k.startswith('SENTRY_'):
locals()[k.split('SENTRY_', 1)[1]] = getattr(settings, k)
LOG_LEVELS = [(k, _(v)) for k, v in LOG_LEVELS]
if locals().get('REMOTE_URL'):
if isinstance(REMOTE_URL, str):
SERVERS = [REMOTE_URL]
elif not isinstance(REMOTE_URL, (list, tuple)):
raise ValueError("Sentry setting 'REMOTE_URL' must be of type list.")
if locals().get('REMOTE_TIMEOUT'):
TIMEOUT = REMOTE_TIMEOUT
def configure(**kwargs):
for k, v in kwargs.items():
if k.upper() != k:
warnings.warn('Invalid setting, \'%s\' which is not defined by Sentry' % k)
else:
locals[k] = v
|
{
"content_hash": "58435e6f1c151ae89bcd489f88a38b22",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 92,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.6707734428473648,
"repo_name": "optimal-outsource/django-sentry",
"id": "eae9dbd0ae1962c239bd538ee5caa203c39c9015",
"size": "1461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry/conf/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21335"
},
{
"name": "HTML",
"bytes": "54027"
},
{
"name": "JavaScript",
"bytes": "10544"
},
{
"name": "Python",
"bytes": "160610"
}
],
"symlink_target": ""
}
|
"""
Generate data to be used with e.g. network_space.py
"""
import os
import sys
import copy
import pickle
import itertools
from multiprocessing import Pool, cpu_count
from multiprocessing.pool import ThreadPool
import numpy as np
import numpy.random as npr
from tqdm import tqdm, trange
from setup import generate_basic_system, generate_two_node_system, generate_motifs
from main import analyze_system
def add_node_to_system(syst):
""" Add additional node to given system in all possible ways
"""
tmp = copy.deepcopy(syst)
# adjust vectors
tmp.fluctuation_vector = np.append(tmp.fluctuation_vector, 0)
tmp.external_influence = np.append(tmp.external_influence, 0)
tmp.initial_state = np.append(tmp.initial_state, 1)
# generate jacobians
dim = tmp.jacobian.shape[0]
horz_stacks = list(itertools.product([0, 1], repeat=dim))
vert_stacks = list(itertools.product([0, 1], repeat=dim))
systems = []
for hs in horz_stacks:
for vs in vert_stacks:
cur = copy.deepcopy(tmp)
vs = np.append(vs, -1) # force self-inhibition
cur.jacobian = np.hstack(
(cur.jacobian, np.array(hs).reshape(-1, 1)))
cur.jacobian = np.vstack(
(cur.jacobian, vs))
systems.append(cur)
return systems
def handle_systems(raw, enhanced):
""" Simulate given systems
"""
# generate control data
raw_res_diff = analyze_system(raw, filter_mask=[3], use_ode_sde_diff=True, save_stdev='results/corr_stdev')
raw_res = analyze_system(raw, filter_mask=[3], use_ode_sde_diff=False)
if raw_res[1] is None or raw_res_diff[1] is None:
return None
# generate data from altered motifs
row = []
for enh in enhanced:
enh_res_diff = analyze_system(
enh, filter_mask=[3],
use_ode_sde_diff=True)
enh_res = analyze_system(
enh, filter_mask=[3],
use_ode_sde_diff=False)
row.append((enh_res, enh_res_diff))
return [(raw_res, raw_res_diff), row]
def generate_system_data(motifs_three):
""" Generate data for a given system
"""
res = []
for motif in motifs_three:
getter = lambda k_m, k_23: motif
cur = generate_data(None, gen_func=getter)
res.append(cur)
return (getter(1,1), res)
def generate_motif_data(prefix):
""" Generate data for all motifs
"""
motifs = generate_motifs()
with tqdm(total=len(motifs)) as pbar:
resolution = int(cpu_count() * 3/4)
with ThreadPool(resolution) as p:
for motif, rows_three in p.map(generate_system_data, motifs):
fname = '{}_{}'.format(prefix, pbar.n)
with open(fname, 'wb') as fd:
pickle.dump({
'data': rows_three,
'motif': motif
}, fd)
pbar.update()
def generate_data(fname, gen_func=generate_basic_system, paramter_shift=10):
""" Generate and cache data of the form
{
'data': [
[raw_res, [enh_res, ...]], # some parameter configuration
...
] # rows in output plot
}
"""
param_range = np.linspace(0.1, 5, paramter_shift)
# iterate over parameter configurations and simulate system accordingly
rows = []
configurations = []
for k_m in param_range:
for k_23 in param_range:
syst = gen_func(k_m=k_m, k_23=k_23)
more = add_node_to_system(syst)
configurations.append((syst, more))
# only one parameter to vary in case of two nodes
if gen_func == generate_two_node_system:
break
rows = []
with tqdm(total=len(configurations)) as pbar:
resolution = int(cpu_count() * 3/4)
with Pool(resolution) as p:
for res in p.starmap(handle_systems, configurations):
if not res is None:
rows.append(res)
pbar.update()
# store matrix
if not fname is None:
with open(fname, 'wb') as fd:
pickle.dump({
'data': rows
}, fd)
else:
return rows
def generate_random_data(fname, paramter_shift=10):
""" Generate random data for comparison with experimental one
"""
def gen_rand_mat(dim=3):
""" Generate random correlation matrix
"""
tmp = npr.uniform(-1, 1, (dim,dim))
# make matrix symmetric
for i in range(dim):
for j in range(i+1, dim):
tmp[i,j] = tmp[j,i]
return tmp
def handle_random_case(size):
random_raw = None, gen_rand_mat(), None
row = []
for _ in range(size):
random_enh = None, gen_rand_mat(4), None
row.append((None, random_enh))
return [(None, random_raw), row]
# generate random data
rows = []
for _ in trange(paramter_shift**2):
rows.append(handle_random_case(64))
# store matrix
with open(fname, 'wb') as fd:
pickle.dump({
'data': rows,
'corr_stdev': np.std([r[0][1][1] for r in rows], axis=0)
}, fd)
def main():
"""
Main interface
"""
if len(sys.argv) != 2:
print('Usage: %s <data file>' % sys.argv[0])
exit(-1)
generate_data(sys.argv[1], gen_func=generate_basic_system)
#generate_random_data(sys.argv[1])
#generate_motif_data(sys.argv[1])
if __name__ == '__main__':
main()
|
{
"content_hash": "12b8c947366105113e2f37cad3d9c602",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 111,
"avg_line_length": 28.685567010309278,
"alnum_prop": 0.5683737646001797,
"repo_name": "kpj/SDEMotif",
"id": "460ed6fad3e0e8a68a9366e26036da6210550d06",
"size": "5565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nm_data_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "200009"
},
{
"name": "Shell",
"bytes": "114"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.local_settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "e76ccb1ca04cebaffa659970e7dc5fd0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 77,
"avg_line_length": 26,
"alnum_prop": 0.7136752136752137,
"repo_name": "caktus/django-comps",
"id": "9c6e9a4c0d01855198ce5ec18679c8cf3150737e",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1926"
},
{
"name": "Python",
"bytes": "19465"
}
],
"symlink_target": ""
}
|
"""CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
and collects config for the current request based on the path_info, other
request attributes, and the application architecture. The core calls the
dispatcher as early as possible, passing it a 'path_info' argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
"""
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
from cherrypy._cpcompat import set
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""
Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = inspect.getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw, defaults) = inspect.getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and args[0] == 'self':
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message="Missing parameters: %s" % ",".join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message="Multiple values for parameters: "\
"%s" % ",".join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message="Unexpected query string "\
"parameters: %s" % ", ".join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message="Unexpected body parameters: "\
"%s" % ", ".join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
test_callable_spec = lambda callable, args, kwargs: None
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
def _get_kwargs(self):
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
def _set_kwargs(self, kwargs):
self._kwargs = kwargs
kwargs = property(_get_kwargs, _set_kwargs,
doc='page handler kwargs (with '
'cherrypy.request.params copied in)')
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError("The translate argument must be a str of len 256.")
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError("The translate argument must be a dict.")
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each hierarchical
component in the path_info argument is matched to a corresponding nested
attribute of the root object. Matching handlers must have an 'exposed'
attribute which evaluates to True. The special method name "index"
matches a URI which ends in a slash ("/"). The special method name
"default" may match a portion of the path_info (but only when no longer
substring of the path_info matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
dispatch_method_name = '_cp_dispatch'
"""
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler.
These virtual path components are passed to the handler as
positional arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, "_cp_config"):
nodeconf.update(root._cp_config)
if "/" in app.config:
nodeconf.update(app.config["/"])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
#Don't expose the hidden 'index' token to _cp_dispatch
#We skip this if pre_len == 1 since it makes no sense
#to call a dispatcher when we have no tokens left.
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
#We didn't find a path, but keep processing in case there
#is a default() handler.
iternames.pop(0)
else:
#We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
#No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
"A vpath segment was added. Custom dispatchers may only "
+ "remove elements. While trying to process "
+ "{0} in {1}".format(name, fullpath)
)
elif segleft == pre_len:
#Assume that the handler used the current path segment, but
#did not pop it. This allows things like
#return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, "_cp_config"):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config."""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + '/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, "default"):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, "_cp_config", {})
object_trail.insert(i+1, ["default", defhandler, conf, segleft])
request.config = set_conf()
# See https://bitbucket.org/cherrypy/cherrypy/issue/613
request.is_index = path.endswith("/")
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class.
The method names must be all caps; the appropriate Allow header
will be output showing all capitalized method names as allowable
HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if "GET" in avail and "HEAD" not in avail:
avail.append("HEAD")
avail.sort()
cherrypy.serving.response.headers['Allow'] = ", ".join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == "HEAD":
func = getattr(resource, "GET", None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, "_cp_config"):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False):
"""
Routes dispatcher
Set full_result to True if you wish the controller
and the action to be passed on to the page handler
parameters. By default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper()
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ""
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or "/"
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, "_cp_config"):
merge(root._cp_config)
if "/" in app.config:
merge(app.config["/"])
# Mix in values from app.config.
atoms = [x for x in path_info.split("/") if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = "/".join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, "_cp_config"):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, "_cp_config"):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = "/".join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains):
"""
Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example::
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config::
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher
The next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host
If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
``**domains``
A dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header("X-Forwarded-Host", domain)
prefix = domains.get(domain, "")
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See https://bitbucket.org/cherrypy/cherrypy/issue/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
|
{
"content_hash": "68c980c2a763405acd40edfe7d5d6374",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 104,
"avg_line_length": 38.01729559748428,
"alnum_prop": 0.5928284875305017,
"repo_name": "bmbove/omxremote",
"id": "a6774cd18ce2f8bccc6c858a6baeceb7c9d01ade",
"size": "24179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cherrypy/_cpdispatch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2684"
},
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "1349"
},
{
"name": "HTML",
"bytes": "499"
},
{
"name": "Python",
"bytes": "2060401"
}
],
"symlink_target": ""
}
|
"""
If "modified" date/time is not defined in article metadata, fall back to the "created" date.
"""
from pelican import signals
from pelican.contents import Content, Article
def add_modified(content):
if not isinstance(content, Article):
return
if not content.settings.get('ALWAYS_MODIFIED', False):
return
if hasattr(content, 'date') and not hasattr(content, 'modified'):
content.modified = content.date
content.locale_modified = content.locale_date
def register():
signals.content_object_init.connect(add_modified)
|
{
"content_hash": "fc5f82e5088aed0fc4868f6243a0513f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 92,
"avg_line_length": 28.95,
"alnum_prop": 0.696027633851468,
"repo_name": "jeorryb/datarambler",
"id": "0f2578b6675199962a5c910075dad27c0b02cb07",
"size": "579",
"binary": false,
"copies": "58",
"ref": "refs/heads/master",
"path": "plugins/always_modified/always_modified.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2120417"
},
{
"name": "Emacs Lisp",
"bytes": "2465"
},
{
"name": "HTML",
"bytes": "1536698"
},
{
"name": "Haskell",
"bytes": "2645"
},
{
"name": "JavaScript",
"bytes": "844135"
},
{
"name": "Jupyter Notebook",
"bytes": "96905"
},
{
"name": "Makefile",
"bytes": "5252"
},
{
"name": "Python",
"bytes": "476895"
},
{
"name": "Ruby",
"bytes": "490"
},
{
"name": "Shell",
"bytes": "4580"
},
{
"name": "Smarty",
"bytes": "4340"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import pytest
from sentry.models import EventError
@pytest.mark.parametrize(
"error,type,message,data",
(
({"type": "unknown_error"}, "unknown_error", "Unknown error", {}),
({"type": "unknown_error", "foo": "bar"}, "unknown_error", "Unknown error", {"foo": "bar"}),
(
{"type": "invalid_data", "name": "foo"},
"invalid_data",
"Discarded invalid value",
{"name": "foo"},
),
({"type": "invalid_data"}, "invalid_data", "Discarded invalid value", {}),
({"type": "INVALID_ERROR_TYPE"}, "INVALID_ERROR_TYPE", "Unknown error", {}),
),
)
def test_event_error(error, type, message, data):
assert EventError.get_message(error) == message
assert EventError(error).type == type
assert EventError(error).message == message
assert EventError(error).data == data
def test_api_context():
error = {"type": "unknown_error", "foo": "bar"}
assert EventError(error).get_api_context() == {
"type": "unknown_error",
"message": "Unknown error",
"data": {"foo": "bar"},
}
|
{
"content_hash": "9a7009d94c80b544eec2fcce800a57f1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 100,
"avg_line_length": 31.97222222222222,
"alnum_prop": 0.5682015638575152,
"repo_name": "beeftornado/sentry",
"id": "166632a4ada01c07219b1dc4713b124762be9219",
"size": "1151",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/models/test_eventerror.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
import datetime
import io
import json
from PIL import Image
import re
from urllib import urlencode
import urllib2
from urlparse import urlparse
from openerp import api, fields, models, SUPERUSER_ID, _
from openerp.tools import image
from openerp.exceptions import Warning
from openerp.addons.website.models.website import slug
class Channel(models.Model):
""" A channel is a container of slides. It has group-based access configuration
allowing to configure slide upload and access. Slides can be promoted in
channels. """
_name = 'slide.channel'
_description = 'Channel for Slides'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_order = 'sequence, id'
_order_by_strategy = {
'most_viewed': 'total_views desc',
'most_voted': 'likes desc',
'latest': 'date_published desc',
}
name = fields.Char('Name', translate=True, required=True)
description = fields.Html('Description', translate=True)
sequence = fields.Integer(default=10, help='Display order')
category_ids = fields.One2many('slide.category', 'channel_id', string="Categories")
slide_ids = fields.One2many('slide.slide', 'channel_id', string="Slides")
promote_strategy = fields.Selection([
('none', 'No Featured Presentation'),
('latest', 'Latest Published'),
('most_voted', 'Most Voted'),
('most_viewed', 'Most Viewed'),
('custom', 'Featured Presentation')],
string="Featuring Policy", default='most_voted', required=True)
custom_slide_id = fields.Many2one('slide.slide', string='Slide to Promote')
promoted_slide_id = fields.Many2one('slide.slide', string='Featured Slide', compute='_compute_promoted_slide_id', store=True)
@api.depends('custom_slide_id', 'promote_strategy', 'slide_ids.likes',
'slide_ids.total_views', "slide_ids.date_published")
def _compute_promoted_slide_id(self):
for record in self:
if record.promote_strategy == 'none':
record.promoted_slide_id = False
elif record.promote_strategy == 'custom':
record.promoted_slide_id = record.custom_slide_id
elif record.promote_strategy:
slides = self.env['slide.slide'].search(
[('website_published', '=', True), ('channel_id', '=', record.id)],
limit=1, order=self._order_by_strategy[record.promote_strategy])
record.promoted_slide_id = slides and slides[0] or False
nbr_presentations = fields.Integer('Number of Presentations', compute='_count_presentations', store=True)
nbr_documents = fields.Integer('Number of Documents', compute='_count_presentations', store=True)
nbr_videos = fields.Integer('Number of Videos', compute='_count_presentations', store=True)
nbr_infographics = fields.Integer('Number of Infographics', compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('channel_id', 'in', self.ids)],
['channel_id', 'slide_type'], ['channel_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['channel_id'][0]][res_group['slide_type']] = result[res_group['channel_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
publish_template_id = fields.Many2one(
'mail.template', string='Published Template',
help="Email template to send slide publication through email",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_published'))
share_template_id = fields.Many2one(
'mail.template', string='Shared Template',
help="Email template used when sharing a slide",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_shared'))
visibility = fields.Selection([
('public', 'Public'),
('private', 'Private'),
('partial', 'Show channel but restrict presentations')],
default='public', required=True)
group_ids = fields.Many2many(
'res.groups', 'rel_channel_groups', 'channel_id', 'group_id',
string='Channel Groups', help="Groups allowed to see presentations in this channel")
access_error_msg = fields.Html(
'Error Message', help="Message to display when not accessible due to access rights",
default="<p>This channel is private and its content is restricted to some users.</p>", translate=True)
upload_group_ids = fields.Many2many(
'res.groups', 'rel_upload_groups', 'channel_id', 'group_id',
string='Upload Groups', help="Groups allowed to upload presentations in this channel. If void, every user can upload.")
# not stored access fields, depending on each user
can_see = fields.Boolean('Can See', compute='_compute_access', search='_search_can_see')
can_see_full = fields.Boolean('Full Access', compute='_compute_access')
can_upload = fields.Boolean('Can Upload', compute='_compute_access')
def _search_can_see(self, operator, value):
if operator not in ('=', '!=', '<>'):
raise ValueError('Invalid operator: %s' % (operator,))
if not value:
operator = operator == "=" and '!=' or '='
if self._uid == SUPERUSER_ID:
return [(1, '=', 1)]
# Better perfs to split request and use inner join that left join
req = """
SELECT id FROM slide_channel WHERE visibility='public'
UNION
SELECT c.id
FROM slide_channel c
INNER JOIN rel_channel_groups rg on c.id = rg.channel_id
INNER JOIN res_groups g on g.id = rg.group_id
INNER JOIN res_groups_users_rel u on g.id = u.gid and uid = %s
"""
op = operator == "=" and "inselect" or "not inselect"
# don't use param named because orm will add other param (test_active, ...)
return [('id', op, (req, (self._uid)))]
@api.one
@api.depends('visibility', 'group_ids', 'upload_group_ids')
def _compute_access(self):
self.can_see = self.visibility in ['public', 'private'] or bool(self.group_ids & self.env.user.groups_id)
self.can_see_full = self.visibility == 'public' or bool(self.group_ids & self.env.user.groups_id)
self.can_upload = self.can_see and (not self.upload_group_ids or bool(self.upload_group_ids & self.env.user.groups_id))
@api.multi
@api.depends('name')
def _website_url(self, name, arg):
res = super(Channel, self)._website_url(name, arg)
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
res.update({(channel.id, '%s/slides/%s' % (base_url, slug(channel))) for channel in self})
return res
@api.onchange('visibility')
def change_visibility(self):
if self.visibility == 'public':
self.group_ids = False
class Category(models.Model):
""" Channel contain various categories to manage its slides """
_name = 'slide.category'
_description = "Slides Category"
_order = "sequence, id"
name = fields.Char('Name', translate=True, required=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True, ondelete='cascade')
sequence = fields.Integer(default=10, help='Display order')
slide_ids = fields.One2many('slide.slide', 'category_id', string="Slides")
nbr_presentations = fields.Integer("Number of Presentations", compute='_count_presentations', store=True)
nbr_documents = fields.Integer("Number of Documents", compute='_count_presentations', store=True)
nbr_videos = fields.Integer("Number of Videos", compute='_count_presentations', store=True)
nbr_infographics = fields.Integer("Number of Infographics", compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('category_id', 'in', self.ids)],
['category_id', 'slide_type'], ['category_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['category_id'][0]][res_group['slide_type']] = result[res_group['category_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
class EmbeddedSlide(models.Model):
""" Embedding in third party websites. Track view count, generate statistics. """
_name = 'slide.embed'
_description = 'Embedded Slides View Counter'
_rec_name = 'slide_id'
slide_id = fields.Many2one('slide.slide', string="Presentation", required=True, index=True)
url = fields.Char('Third Party Website URL', required=True)
count_views = fields.Integer('# Views', default=1)
def add_embed_url(self, slide_id, url):
schema = urlparse(url)
baseurl = schema.netloc
embeds = self.search([('url', '=', baseurl), ('slide_id', '=', int(slide_id))], limit=1)
if embeds:
embeds.count_views += 1
else:
embeds = self.create({
'slide_id': slide_id,
'url': baseurl,
})
return embeds.count_views
class SlideTag(models.Model):
""" Tag to search slides accross channels. """
_name = 'slide.tag'
_description = 'Slide Tag'
name = fields.Char('Name', required=True)
_sql_constraints = [
('slide_tag_unique', 'UNIQUE(name)', 'A tag must be unique!'),
]
class Slide(models.Model):
""" This model represents actual presentations. Those must be one of four
types:
- Presentation
- Document
- Infographic
- Video
Slide has various statistics like view count, embed count, like, dislikes """
_name = 'slide.slide'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_description = 'Slides'
_PROMOTIONAL_FIELDS = [
'__last_update', 'name', 'image_thumb', 'image_medium', 'slide_type', 'total_views', 'category_id',
'channel_id', 'description', 'tag_ids', 'write_date', 'create_date',
'website_published', 'website_url', 'website_meta_title', 'website_meta_description', 'website_meta_keywords']
_sql_constraints = [
('name_uniq', 'UNIQUE(channel_id, name)', 'The slide name must be unique within a channel')
]
# description
name = fields.Char('Title', required=True, translate=True)
description = fields.Text('Description', translate=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True)
category_id = fields.Many2one('slide.category', string="Category", domain="[('channel_id', '=', channel_id)]")
tag_ids = fields.Many2many('slide.tag', 'rel_slide_tag', 'slide_id', 'tag_id', string='Tags')
download_security = fields.Selection(
[('none', 'No One'), ('user', 'Authentified Users Only'), ('public', 'Everyone')],
string='Download Security',
required=True, default='user')
image = fields.Binary('Image', attachment=True)
image_medium = fields.Binary('Medium', compute="_get_image", store=True, attachment=True)
image_thumb = fields.Binary('Thumbnail', compute="_get_image", store=True, attachment=True)
@api.depends('image')
def _get_image(self):
for record in self:
if record.image:
record.image_medium = image.crop_image(record.image, type='top', ratio=(4, 3), thumbnail_ratio=4)
record.image_thumb = image.crop_image(record.image, type='top', ratio=(4, 3), thumbnail_ratio=6)
else:
record.image_medium = False
record.iamge_thumb = False
# content
slide_type = fields.Selection([
('infographic', 'Infographic'),
('presentation', 'Presentation'),
('document', 'Document'),
('video', 'Video')],
string='Type', required=True,
default='document',
help="Document type will be set automatically depending on file type, height and width.")
index_content = fields.Text('Transcript')
datas = fields.Binary('Content')
url = fields.Char('Document URL', help="Youtube or Google Document URL")
document_id = fields.Char('Document ID', help="Youtube or Google Document ID")
mime_type = fields.Char('Mime-type')
@api.onchange('url')
def on_change_url(self):
self.ensure_one()
if self.url:
res = self._parse_document_url(self.url)
if res.get('error'):
raise Warning(_('Could not fetch data from url. Document or access right not available:\n%s') % res['error'])
values = res['values']
if not values.get('document_id'):
raise Warning(_('Please enter valid Youtube or Google Doc URL'))
for key, value in values.iteritems():
setattr(self, key, value)
# website
date_published = fields.Datetime('Publish Date')
website_message_ids = fields.One2many(
'mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name), ('message_type', '=', 'comment')],
string='Website Messages', help="Website communication history")
likes = fields.Integer('Likes')
dislikes = fields.Integer('Dislikes')
# views
embedcount_ids = fields.One2many('slide.embed', 'slide_id', string="Embed Count")
slide_views = fields.Integer('# of Website Views')
embed_views = fields.Integer('# of Embedded Views')
total_views = fields.Integer("Total # Views", default="0", compute='_compute_total', store=True)
@api.depends('slide_views', 'embed_views')
def _compute_total(self):
for record in self:
record.total_views = record.slide_views + record.embed_views
embed_code = fields.Text('Embed Code', readonly=True, compute='_get_embed_code')
def _get_embed_code(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
for record in self:
if record.datas and (not record.document_id or record.slide_type in ['document', 'presentation']):
record.embed_code = '<iframe src="%s/slides/embed/%s?page=1" allowFullScreen="true" height="%s" width="%s" frameborder="0"></iframe>' % (base_url, record.id, 315, 420)
elif record.slide_type == 'video' and record.document_id:
if not record.mime_type:
# embed youtube video
record.embed_code = '<iframe src="//www.youtube.com/embed/%s?theme=light" allowFullScreen="true" frameborder="0"></iframe>' % (record.document_id)
else:
# embed google doc video
record.embed_code = '<embed src="https://video.google.com/get_player?ps=docs&partnerid=30&docid=%s" type="application/x-shockwave-flash"></embed>' % (record.document_id)
else:
record.embed_code = False
@api.multi
@api.depends('name')
def _website_url(self, name, arg):
res = super(Slide, self)._website_url(name, arg)
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
#link_tracker is not in dependencies, so use it to shorten url only if installed.
if self.env.registry.get('link.tracker'):
LinkTracker = self.env['link.tracker']
res.update({(slide.id, LinkTracker.sudo().create({'url': '%s/slides/slide/%s' % (base_url, slug(slide))}).short_url) for slide in self})
else:
res.update({(slide.id, '%s/slides/slide/%s' % (base_url, slug(slide))) for slide in self})
return res
@api.model
def create(self, values):
if not values.get('index_content'):
values['index_content'] = values.get('description')
if values.get('slide_type') == 'infographic' and not values.get('image'):
values['image'] = values['datas']
if values.get('website_published') and not values.get('date_published'):
values['date_published'] = datetime.datetime.now()
if values.get('url'):
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.iteritems():
values.setdefault(key, value)
# Do not publish slide if user has not publisher rights
if not self.user_has_groups('base.group_website_publisher'):
values['website_published'] = False
slide = super(Slide, self).create(values)
slide.channel_id.message_subscribe_users()
slide._post_publication()
return slide
@api.multi
def write(self, values):
if values.get('url'):
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.iteritems():
values.setdefault(key, value)
if values.get('channel_id'):
custom_channels = self.env['slide.channel'].search([('custom_slide_id', '=', self.id), ('id', '!=', values.get('channel_id'))])
custom_channels.write({'custom_slide_id': False})
res = super(Slide, self).write(values)
if values.get('website_published'):
self.date_published = datetime.datetime.now()
self._post_publication()
return res
@api.model
def check_field_access_rights(self, operation, fields):
""" As per channel access configuration (visibility)
- public ==> no restriction on slides access
- private ==> restrict all slides of channel based on access group defined on channel group_ids field
- partial ==> show channel, but presentations based on groups means any user can see channel but not slide's content.
For private: implement using record rule
For partial: user can see channel, but channel gridview have slide detail so we have to implement
partial field access mechanism for public user so he can have access of promotional field (name, view_count) of slides,
but not all fields like data (actual pdf content)
all fields should be accessible only for user group defined on channel group_ids
"""
if self.env.uid == SUPERUSER_ID:
return fields or list(self._fields)
fields = super(Slide, self).check_field_access_rights(operation, fields)
# still read not perform so we can not access self.channel_id
if self.ids:
self.env.cr.execute('SELECT DISTINCT channel_id FROM ' + self._table + ' WHERE id IN %s', (tuple(self.ids),))
channel_ids = [x[0] for x in self.env.cr.fetchall()]
channels = self.env['slide.channel'].sudo().browse(channel_ids)
limited_access = all(channel.visibility == 'partial' and
not len(channel.group_ids & self.env.user.groups_id)
for channel in channels)
if limited_access:
fields = [field for field in fields if field in self._PROMOTIONAL_FIELDS]
return fields
def get_related_slides(self, limit=20):
domain = [('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)]
if self.category_id:
domain += [('category_id', '=', self.category_id.id)]
for record in self.search(domain, limit=limit):
yield record
def get_most_viewed_slides(self, limit=20):
for record in self.search([('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)], limit=limit, order='total_views desc'):
yield record
def _post_publication(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
for slide in self.filtered(lambda slide: slide.website_published):
publish_template = slide.channel_id.publish_template_id
html_body = publish_template.with_context({'base_url': base_url}).render_template(publish_template.body_html, 'slide.slide', slide.id)
slide.channel_id.message_post(body=html_body, subtype='website_slides.mt_channel_slide_published')
return True
@api.one
def send_share_email(self, email):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
return self.channel_id.share_template_id.with_context({'email': email, 'base_url': base_url}).send_mail(self.id)
# --------------------------------------------------
# Parsing methods
# --------------------------------------------------
@api.model
def _fetch_data(self, base_url, data, content_type=False):
result = {'values': dict()}
try:
if data:
base_url = base_url + '?%s' % urlencode(data)
req = urllib2.Request(base_url)
content = urllib2.urlopen(req).read()
if content_type == 'json':
result['values'] = json.loads(content)
elif content_type in ('image', 'pdf'):
result['values'] = content.encode('base64')
else:
result['values'] = content
except urllib2.HTTPError as e:
result['error'] = e.read()
e.close()
except urllib2.URLError as e:
result['error'] = e.reason
return result
def _find_document_data_from_url(self, url):
expr = re.compile(r'^.*((youtu.be/)|(v/)|(\/u\/\w\/)|(embed\/)|(watch\?))\??v?=?([^#\&\?]*).*')
arg = expr.match(url)
document_id = arg and arg.group(7) or False
if document_id:
return ('youtube', document_id)
expr = re.compile(r'(^https:\/\/docs.google.com|^https:\/\/drive.google.com).*\/d\/([^\/]*)')
arg = expr.match(url)
document_id = arg and arg.group(2) or False
if document_id:
return ('google', document_id)
return (None, False)
def _parse_document_url(self, url, only_preview_fields=False):
document_source, document_id = self._find_document_data_from_url(url)
if document_source and hasattr(self, '_parse_%s_document' % document_source):
return getattr(self, '_parse_%s_document' % document_source)(document_id, only_preview_fields)
return {'error': _('Unknown document')}
def _parse_youtube_document(self, document_id, only_preview_fields):
key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/youtube/v3/videos', {'id': document_id, 'key': key, 'part': 'snippet', 'fields': 'items(id,snippet)'}, 'json')
if fetch_res.get('error'):
return fetch_res
values = {'slide_type': 'video', 'document_id': document_id}
items = fetch_res['values'].get('items')
if not items:
return {'error': _('Please enter valid Youtube or Google Doc URL')}
youtube_values = items[0]
if youtube_values.get('snippet'):
snippet = youtube_values['snippet']
if only_preview_fields:
values.update({
'url_src': snippet['thumbnails']['high']['url'],
'title': snippet['title'],
'description': snippet['description']
})
return values
values.update({
'name': snippet['title'],
'image': self._fetch_data(snippet['thumbnails']['high']['url'], {}, 'image')['values'],
'description': snippet['description'],
})
return {'values': values}
@api.model
def _parse_google_document(self, document_id, only_preview_fields):
def get_slide_type(vals):
# TDE FIXME: WTF ??
image = Image.open(io.BytesIO(vals['image'].decode('base64')))
width, height = image.size
if height > width:
return 'document'
else:
return 'presentation'
key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/drive/v2/files/%s' % document_id, {'projection': 'BASIC', 'key': key}, "json")
if fetch_res.get('error'):
return fetch_res
google_values = fetch_res['values']
if only_preview_fields:
return {
'url_src': google_values['thumbnailLink'],
'title': google_values['title'],
}
values = {
'name': google_values['title'],
'image': self._fetch_data(google_values['thumbnailLink'].replace('=s220', ''), {}, 'image')['values'],
'mime_type': google_values['mimeType'],
'document_id': document_id,
}
if google_values['mimeType'].startswith('video/'):
values['slide_type'] = 'video'
elif google_values['mimeType'].startswith('image/'):
values['datas'] = values['image']
values['slide_type'] = 'infographic'
elif google_values['mimeType'].startswith('application/vnd.google-apps'):
values['datas'] = self._fetch_data(google_values['exportLinks']['application/pdf'], {}, 'pdf')['values']
values['slide_type'] = get_slide_type(values)
if google_values['exportLinks'].get('text/plain'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/plain'], {})['values']
if google_values['exportLinks'].get('text/csv'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/csv'], {})['values']
elif google_values['mimeType'] == 'application/pdf':
# TODO: Google Drive PDF document doesn't provide plain text transcript
values['datas'] = self._fetch_data(google_values['webContentLink'], {}, 'pdf')['values']
values['slide_type'] = get_slide_type(values)
return {'values': values}
|
{
"content_hash": "8b2973d43d3f9b9aa99424f9940174dc",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 189,
"avg_line_length": 49.66304347826087,
"alnum_prop": 0.6075727730356753,
"repo_name": "vileopratama/vitech",
"id": "474fa747ccdba62cea4086fd6f015c9171ad8907",
"size": "27439",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/addons/website_slides/models/slides.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
from f5_cccl.resource.ltm.policy import Rule
from mock import Mock
import pytest
@pytest.fixture
def bigip():
bigip = Mock()
return bigip
action_0 = {
"request": True,
"redirect": True,
"location": "http://boulder-dev.f5.com",
"httpReply": True
}
action_1 = {
"request": True,
"redirect": True,
"location": "http://seattle-dev.f5.com",
"httpReply": True
}
action_2 = {
"request": True,
"forward": True,
"virtual": "/Test/my_virtual"
}
condition_0 = {
'httpUri': True,
'pathSegment': True,
'contains': True,
'values': ["colorado"],
}
condition_1 = {
'httpUri': True,
'pathSegment': True,
'contains': True,
'values': ["washington"],
}
condition_2 = {
'httpUri': True,
'queryString': True,
'contains': True,
'values': ["washington"],
}
@pytest.fixture
def rule_0():
data = {'ordinal': "0",
'actions': [],
'conditions': []}
data['conditions'].append(condition_0)
data['actions'].append(action_0)
return Rule(name="rule_0", **data)
@pytest.fixture
def rule_0_clone():
data = {'ordinal': "0",
'actions': [],
'conditions': []}
data['conditions'].append(condition_0)
data['actions'].append(action_0)
return Rule(name="rule_0", **data)
@pytest.fixture
def rule_1():
data = {'ordinal': "1",
'actions': [],
'conditions': []}
data['conditions'].append(condition_1)
data['actions'].append(action_1)
return Rule(name="rule_1", **data)
@pytest.fixture
def rule_no_actions():
data = {'ordinal': "0",
'actions': [],
'conditions': []}
data['conditions'].append(condition_0)
return Rule(name="rule_0", **data)
@pytest.fixture
def rule_no_conditions():
data = {'ordinal': "0",
'actions': [],
'conditions': []}
data['actions'].append(action_1)
return Rule(name="rule_1", **data)
def test_create_rule():
data = {'ordinal': "0",
'actions': [],
'conditions': [],
'description': 'This is a rule description'}
rule = Rule(name="rule_0", **data)
assert rule.name == "rule_0"
assert len(rule.data['conditions']) == 0
assert len(rule.data['actions']) == 0
assert rule.data['description'] == 'This is a rule description'
data['conditions'].append(condition_0)
data['actions'].append(action_0)
rule = Rule(name="rule_1", **data)
assert len(rule.data['conditions']) == 1
assert len(rule.data['actions']) == 1
data['conditions'] = [condition_2]
data['actions'] = [action_0]
rule = Rule(name="rule_1", **data)
assert len(rule.data['conditions']) == 0
assert len(rule.data['actions']) == 1
data['conditions'].append(condition_0)
rule = Rule(name="rule_1", **data)
assert len(rule.data['conditions']) == 1
assert len(rule.data['actions']) == 1
data['conditions'] = [condition_0]
data['actions'] = [action_2]
rule = Rule(name="rule_1", **data)
assert len(rule.data['conditions']) == 1
assert len(rule.data['actions']) == 0
def test_uri_path(bigip, rule_0):
with pytest.raises(NotImplementedError):
rule_0._uri_path(bigip)
def test_less_than(rule_0, rule_1):
assert rule_0 < rule_1
def test_tostring(rule_0):
assert str(rule_0) != ""
def test_compare_rules(rule_0, rule_0_clone, rule_1,
rule_no_actions, rule_no_conditions):
assert rule_0 == rule_0_clone
assert rule_0 != rule_1
assert rule_0 != rule_no_actions
assert rule_0 != rule_no_conditions
fake_rule = {'ordinal': "0",
'actions': [],
'conditions': []}
assert rule_0 != fake_rule
rule_0_clone.data['actions'][0]['location'] = \
"http://seattle-dev.f5.com"
assert rule_0 != rule_0_clone
|
{
"content_hash": "0e11877fc7924c6231dd2b446348e625",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 67,
"avg_line_length": 22.651162790697676,
"alnum_prop": 0.5657084188911704,
"repo_name": "richbrowne/f5-cccl",
"id": "d3e9487530ccb17f3481bfdae476bd6d96c1c64d",
"size": "4500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_cccl/resource/ltm/policy/test/test_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "342330"
},
{
"name": "Shell",
"bytes": "2369"
}
],
"symlink_target": ""
}
|
from collections import deque
import re
import time
import logging
from twisted.python import log
from txstatsd.metrics.metermetric import MeterMetricReporter
SPACES = re.compile("\s+")
SLASHES = re.compile("\/+")
NON_ALNUM = re.compile("[^a-zA-Z_\-0-9\.]")
RATE = re.compile("^@([\d\.]+)")
def normalize_key(key):
"""
Normalize a key that might contain spaces, forward-slashes and other
special characters into something that is acceptable by graphite.
"""
key = SPACES.sub("_", key)
key = SLASHES.sub("-", key)
key = NON_ALNUM.sub("", key)
return key
class BaseMessageProcessor(object):
def process(self, message):
"""
"""
if not ":" in message:
return self.fail(message)
key, data = message.strip().split(":", 1)
if not "|" in data:
return self.fail(message)
fields = data.split("|")
if len(fields) < 2 or len(fields) > 3:
return self.fail(message)
key = normalize_key(key)
metric_type = fields[1]
return self.process_message(message, metric_type, key, fields)
def rebuild_message(self, metric_type, key, fields):
return key + ":" + "|".join(fields)
def fail(self, message):
"""Log and discard malformed message."""
log.msg("Bad line: %r" % message, logLevel=logging.DEBUG)
class MessageProcessor(BaseMessageProcessor):
"""
This C{MessageProcessor} produces StatsD-compliant messages
for publishing to a Graphite server.
Metrics behaviour that varies from StatsD should be placed in
some specialised C{MessageProcessor} (see L{ConfigurableMessageProcessor
<txstatsd.server.configurableprocessor.ConfigurableMessageProcessor>}).
"""
def __init__(self, time_function=time.time, plugins=None):
self.time_function = time_function
self.stats_prefix = "stats."
self.internal_metrics_prefix = "statsd."
self.count_prefix = "stats_counts."
self.timer_prefix = self.stats_prefix + "timers."
self.gauge_prefix = self.stats_prefix + "gauge."
self.process_timings = {}
self.by_type = {}
self.last_flush_duration = 0
self.last_process_duration = 0
self.timer_metrics = {}
self.counter_metrics = {}
self.gauge_metrics = deque()
self.meter_metrics = {}
self.plugins = {}
self.plugin_metrics = {}
if plugins is not None:
for plugin in plugins:
self.plugins[plugin.metric_type] = plugin
def get_metric_names(self):
"""Return the names of all seen metrics."""
metrics = set()
metrics.update(self.timer_metrics.keys())
metrics.update(self.counter_metrics.keys())
metrics.update(v for k, v in self.gauge_metrics)
metrics.update(self.meter_metrics.keys())
metrics.update(self.plugin_metrics.keys())
return list(metrics)
def process_message(self, message, metric_type, key, fields):
"""
Process a single entry, adding it to either C{counters}, C{timers},
or C{gauge_metrics} depending on which kind of message it is.
"""
start = self.time_function()
if metric_type == "c":
self.process_counter_metric(key, fields, message)
elif metric_type == "ms":
self.process_timer_metric(key, fields[0], message)
elif metric_type == "g":
self.process_gauge_metric(key, fields[0], message)
elif metric_type == "m":
self.process_meter_metric(key, fields[0], message)
elif metric_type in self.plugins:
self.process_plugin_metric(metric_type, key, fields, message)
else:
return self.fail(message)
self.process_timings.setdefault(metric_type, 0)
self.process_timings[metric_type] += self.time_function() - start
self.by_type.setdefault(metric_type, 0)
self.by_type[metric_type] += 1
def get_message_prefix(self, kind):
return "stats." + kind
def process_plugin_metric(self, metric_type, key, items, message):
if not key in self.plugin_metrics:
factory = self.plugins[metric_type]
metric = factory.build_metric(
self.get_message_prefix(factory.name),
name=key, wall_time_func=self.time_function)
self.plugin_metrics[key] = metric
self.plugin_metrics[key].process(items)
def process_timer_metric(self, key, duration, message):
try:
duration = float(duration)
except (TypeError, ValueError):
return self.fail(message)
self.compose_timer_metric(key, duration)
def compose_timer_metric(self, key, duration):
if key not in self.timer_metrics:
self.timer_metrics[key] = []
self.timer_metrics[key].append(duration)
def process_counter_metric(self, key, composite, message):
try:
value = float(composite[0])
except (TypeError, ValueError):
return self.fail(message)
rate = 1
if len(composite) == 3:
match = RATE.match(composite[2])
if match is None:
return self.fail(message)
rate = match.group(1)
self.compose_counter_metric(key, value, rate)
def compose_counter_metric(self, key, value, rate):
if key not in self.counter_metrics:
self.counter_metrics[key] = 0
self.counter_metrics[key] += value * (1 / float(rate))
def process_gauge_metric(self, key, composite, message):
values = composite.split(":")
if not len(values) == 1:
return self.fail(message)
try:
value = float(values[0])
except (TypeError, ValueError):
self.fail(message)
self.compose_gauge_metric(key, value)
def compose_gauge_metric(self, key, value):
metric = [value, key]
self.gauge_metrics.append(metric)
def process_meter_metric(self, key, composite, message):
values = composite.split(":")
if not len(values) == 1:
return self.fail(message)
try:
value = float(values[0])
except (TypeError, ValueError):
self.fail(message)
self.compose_meter_metric(key, value)
def compose_meter_metric(self, key, value):
if not key in self.meter_metrics:
metric = MeterMetricReporter(key, self.time_function,
prefix="stats.meter")
self.meter_metrics[key] = metric
self.meter_metrics[key].mark(value)
def flush(self, interval=10000, percent=90):
"""
Flush all queued stats, computing a normalized count based on
C{interval} and mean timings based on C{threshold}.
"""
per_metric = {}
num_stats = 0
interval = interval / 1000
timestamp = int(self.time_function())
start = self.time_function()
events = 0
for metrics in self.flush_counter_metrics(interval, timestamp):
for metric in metrics:
yield metric
events += 1
duration = self.time_function() - start
num_stats += events
per_metric["counter"] = (events, duration)
start = self.time_function()
events = 0
for metrics in self.flush_timer_metrics(percent, timestamp):
for metric in metrics:
yield metric
events += 1
duration = self.time_function() - start
num_stats += events
per_metric["timer"] = (events, duration)
start = self.time_function()
events = 0
for metrics in self.flush_gauge_metrics(timestamp):
for metric in metrics:
yield metric
events += 1
duration = self.time_function() - start
num_stats += events
per_metric["gauge"] = (events, duration)
start = self.time_function()
events = 0
for metrics in self.flush_meter_metrics(timestamp):
for metric in metrics:
yield metric
events += 1
duration = self.time_function() - start
num_stats += events
per_metric["meter"] = (events, duration)
start = self.time_function()
events = 0
for metrics in self.flush_plugin_metrics(interval, timestamp):
for metric in metrics:
yield metric
events += 1
duration = self.time_function() - start
num_stats += events
per_metric["plugin"] = (events, duration)
for metrics in self.flush_metrics_summary(num_stats, per_metric,
timestamp):
for metric in metrics:
yield metric
def flush_counter_metrics(self, interval, timestamp):
for key, count in self.counter_metrics.iteritems():
self.counter_metrics[key] = 0
value = count / interval
yield ((self.stats_prefix + key, value, timestamp),
(self.count_prefix + key, count, timestamp))
def flush_timer_metrics(self, percent, timestamp):
threshold_value = ((100 - percent) / 100.0)
for key, timers in self.timer_metrics.iteritems():
count = len(timers)
if count > 0:
self.timer_metrics[key] = []
timers.sort()
lower = timers[0]
upper = timers[-1]
count = len(timers)
mean = lower
threshold_upper = upper
if count > 1:
index = count - int(round(threshold_value * count))
timers = timers[:index]
threshold_upper = timers[-1]
mean = sum(timers) / index
items = {".mean": mean,
".upper": upper,
".upper_%s" % percent: threshold_upper,
".lower": lower,
".count": count}
yield sorted((self.timer_prefix + key + item, value, timestamp)
for item, value in items.iteritems())
def flush_gauge_metrics(self, timestamp):
for metric in self.gauge_metrics:
value = metric[0]
key = metric[1]
yield ((self.gauge_prefix + key + ".value", value, timestamp),)
def flush_meter_metrics(self, timestamp):
for metric in self.meter_metrics.itervalues():
messages = metric.report(timestamp)
yield messages
def flush_plugin_metrics(self, interval, timestamp):
for metric in self.plugin_metrics.itervalues():
messages = metric.flush(interval, timestamp)
yield messages
def flush_metrics_summary(self, num_stats, per_metric, timestamp):
yield ((self.internal_metrics_prefix + "numStats",
num_stats, timestamp),)
self.last_flush_duration = 0
for name, (value, duration) in per_metric.iteritems():
yield ((self.internal_metrics_prefix +
"flush.%s.count" % name,
value, timestamp),
(self.internal_metrics_prefix +
"flush.%s.duration" % name,
duration * 1000, timestamp))
log.msg("Flushed %d %s metrics in %.6f" %
(value, name, duration))
self.last_flush_duration += duration
self.last_process_duration = 0
for metric_type, duration in self.process_timings.iteritems():
yield ((self.internal_metrics_prefix +
"receive.%s.count" %
metric_type, self.by_type[metric_type], timestamp),
(self.internal_metrics_prefix +
"receive.%s.duration" %
metric_type, duration * 1000, timestamp))
log.msg("Processing %d %s metrics took %.6f" %
(self.by_type[metric_type], metric_type, duration))
self.last_process_duration += duration
self.process_timings.clear()
self.by_type.clear()
|
{
"content_hash": "f01033f5c9dad42d72cb87af1a21456b",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 79,
"avg_line_length": 35.09116809116809,
"alnum_prop": 0.5659657384103272,
"repo_name": "wikimedia/operations-debs-txstatsd",
"id": "425a0b9f16b9ecff2dc7eba8aeb179e2dbc2d10c",
"size": "13426",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "txstatsd/server/processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301852"
}
],
"symlink_target": ""
}
|
from urlparse import urlparse
from lastuserapp import init_for, app
init_for('dev')
REDIS_URL = app.config.get('REDIS_URL', 'redis://localhost:6379/0')
# REDIS_URL is not taken by setup_default_arguments function of rq/scripts/__init__.py
# so, parse it into pieces and give it
r = urlparse(REDIS_URL)
REDIS_HOST = r.hostname
REDIS_PORT = r.port
REDIS_PASSWORD = r.password
REDIS_DB = 0
|
{
"content_hash": "2538c283c132b0e982dacfc006d6bca6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 86,
"avg_line_length": 26.066666666666666,
"alnum_prop": 0.7340153452685422,
"repo_name": "sindhus/lastuser",
"id": "4387a35f6b20a24bdd2808f1154231b7dba023a3",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rqdev.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3623"
},
{
"name": "HTML",
"bytes": "35810"
},
{
"name": "JavaScript",
"bytes": "145"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "349287"
},
{
"name": "Ruby",
"bytes": "404"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
def read_markers (filename):
"Read an ordered list of marker names from a file."
with open(filename, 'r') as f:
lines = f.readlines()
return [line.strip() for line in lines]
class Person:
"Person class, to contain the data on a subject."
def __init__ (self,family, id, dad, mom, sex):
self.family = family
self.id = id
self.dad = dad
self.mom = mom
self.sex = "0" if sex == "2" else sex # convert from 1/2 to 1/0
self.famid = family + '-' + id
self.gen = {}
def read_families (filename):
"Read family info and return a hash of people."
with open(filename, 'r') as file:
file.readline() # header row
people = {}
for line in file:
vals = line.strip().split() # strip removes leading and ending white space
person = Person(vals[0], vals[1], vals[2], vals[3], vals[4])
people[person.famid] = person
return people
def parse_genotype (string):
"Clean up string -> genotype"
string = string.replace(' ', '')
string = "0/0" if string == "" else string
return string.replace('/', ' ')
def read_genotypes (filename, people):
"Read genotype data, fill in genotypes within people hash"
with open(filename, 'r') as file:
header = file.readline().strip().split()
header = header[1:] # omit the first field, "Marker"
for line in file:
marker = line[:9].replace(' ', '')
line = line[9:]
for i in range(len(header)):
person = header[i]
start = i*7
people[person].gen[marker] = parse_genotype(line[start:(start+7)])
def get_families (people):
"Return a vector of distinct families"
return set([people[key].family for key in people])
def get_family_members (people, family):
"Return a vector of famids for subjects within a family."
return [key for key in people if people[key].family == family]
def write_genfile (filename, people, markers):
"Write genotype data to a file, in CRI-MAP format."
with open(filename, 'w') as file:
families = sorted(get_families(people))
print(len(families), file=file)
print(len(markers), file=file)
for marker in markers:
print(marker, file=file)
for family in families:
print(family, file=file)
members = sorted(get_family_members(people, family), key=lambda famid: int(people[famid].id))
print(len(members), file=file)
for famid in members:
person = people[famid]
print("%s %s %s %s" % (person.id, person.mom, person.dad, person.sex), file=file)
for marker in markers:
print(person.gen[marker], end=" ", file=file)
print(file=file)
if __name__ == '__main__':
# file names
gfile = "genotypes.txt" # genotype data
mfile = "markers.txt" # list of markers, in order
ffile = "families.txt" # family information
ofile = "data.gen" # output file
# read the data
markers = read_markers(mfile)
people = read_families(ffile)
read_genotypes(gfile, people)
# write the data
write_genfile(ofile, people, markers)
|
{
"content_hash": "cccb1bd229ebdd425e8d747d8e125761",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 99,
"avg_line_length": 31.40625,
"alnum_prop": 0.6371475953565506,
"repo_name": "kbroman/PyBroman",
"id": "c51d46bb2133f8f50d0f10432ebd465f30b9df0e",
"size": "3357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ParseGenotypeData/convert3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "2728"
},
{
"name": "Python",
"bytes": "14140"
}
],
"symlink_target": ""
}
|
import os
#from flask import Flask
from flask import Blueprint
app = Blueprint("urls", __name__,
template_folder='templates',
static_folder='static')
#app = Flask(__name__)
app.debug = True
app.config = {}
app.config.update(dict(
#DATABASE=os.path.join(app.root_path, 'urls.db'),
DATABASE='urls.db',
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='123',
DEBUG=True,
))
from .views import *
|
{
"content_hash": "1e5a3b96a0b3f6904fe3a778348012f9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 53,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.6418918918918919,
"repo_name": "anokata/pythonPetProjects",
"id": "0241b9a5ce0878fcadbd7e89ceb0c8ea678bd0d4",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urls/src/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6467"
},
{
"name": "HTML",
"bytes": "56632"
},
{
"name": "JavaScript",
"bytes": "603"
},
{
"name": "Makefile",
"bytes": "889"
},
{
"name": "Python",
"bytes": "840906"
},
{
"name": "Shell",
"bytes": "2407"
},
{
"name": "TSQL",
"bytes": "1299"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python3
import re
class foo:
"""Kinda sort not really string-y object"""
def __init__(self, val):
self.val = val
def __iter__(self):
for x in self.val:
yield x
print(re.compile('foo').match(foo('foo')))
#!/usr/bin/python3
class bar(foo):
"""slightly stringier but not object"""
def __getitem__(self, n):
return x[n]
print(re.compile('bar').match(foo('bar')))
# Really this might be, saaay, a buffer-gap backed writable string
#!/usr/bin/python3
import reap
print(reap.compile('foo').match(foo('foo')))
#!/usr/bin/python3
print(reap.compile('A?' * 33 + 'A' * 33).match('A' * 33))
#!/usr/bin/python3
print('This may take a while')
print(re.compile('A?' * 33 + 'A' * 33).match('A' * 33))
#
#
# 871 lines (right now)
# Only supports the most of subset of regular expresions that I use
# (so far)
# (I got distracted from the thing that I needed it for)
# I hope to pass eventually the tests for the re module
# Could be faster (DFAs, etc.)
# should have better error reporting (parsley instead of rply?)
# "what is a regular expression anyway"
# could fall back to a recursive matcher for backreferences
# github.com/kcr/reap
|
{
"content_hash": "d70929c72a8ae81a17fdce88a6ccd978",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 67,
"avg_line_length": 19.0625,
"alnum_prop": 0.6409836065573771,
"repo_name": "kcr/reap",
"id": "c9835cb4533ca4cb22b74d0a89b89b7b15f3d3fd",
"size": "1374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presentation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "28998"
}
],
"symlink_target": ""
}
|
"""
its very common and normal to control program flow in Python
using exception handling using try..except.
try:
# some code that might raise exception.
except Exception as e:
# handle any exception raised in try block.
else:
# code that runs in case no exceptions were raised.
finally:
# code that runs regardless of the exception.
"""
def exception_handler():
try:
1/0
except Exception as e:
print("exception: {}".format(e))
else:
print("else: no exceptions were raised")
finally:
print("finally: always runs")
def file_exists(filename):
try:
f = open(filename)
except IOError as e:
print(e)
else:
print("file exists")
def for_else():
for i in range(5):
print("for {}".format(i))
# return (then else clause will not be executed)
# break (then else clause will not be executed)
# raise Exception (then else clause will not be executed)
else:
# loop completed all iterations
# this else clause will be executed if
# no break, no return no exception raised.
print("else {}".format(i))
def while_else():
i = 0
while i < 5:
print("while {}".format(i))
i += 1
# return (then else clause will not be executed)
# break (then else clause will not be executed)
# raise Exception (then else clause will not be executed)
else:
# loop completed all iterations
# this else clause will be executed if
# no break, no return no exception raised.
print("while.else {}".format(i))
# WHY use else?
# most common use case would b search loops.
# suppose you are searching over a sequence and the lookup
# depends upon a condition being met.
# at the end of the loop, you may want to raise an exception
# if the item is not found or do additional processing if the
# item was found.
# else helps here.
# without else, you will have to use a flag to set when a
# particular condition is met and then check the flag after
# loop completes.
def main():
exception_handler()
file_exists("try_except.py")
for_else()
while_else()
if __name__ == '__main__':
main()
|
{
"content_hash": "95fc1519c11d06dbffce6c6e2713c042",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 65,
"avg_line_length": 27.24705882352941,
"alnum_prop": 0.6101036269430051,
"repo_name": "rajendrauppal/coding-interview",
"id": "2f13914acd30077b69f6168209f4b4172843c614",
"size": "2316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "programming_languages/Python/try_except_for_while_else.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16892"
},
{
"name": "C++",
"bytes": "341312"
},
{
"name": "Java",
"bytes": "3765"
},
{
"name": "Python",
"bytes": "23182"
}
],
"symlink_target": ""
}
|
import os, cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
_projectDirectory = os.path.dirname(__file__)
_imagesDirectory = os.path.join(_projectDirectory, "images")
_images = []
for _root, _dirs, _files in os.walk(_imagesDirectory):
for _file in _files:
if _file.endswith(".jpg"):
_images.append(os.path.join(_imagesDirectory, _file))
_imageIndex = 0
_imageTotal = len(_images)
_img = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED)
_img = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)
_imgHeight, _imgWidth = _img.shape
_fig = plt.figure("Median Smoothing")
_gs = GridSpec(1, 2)
_nRollingWindow = 3
_fig1 = plt.subplot(_gs[0, 0])
_fig1.set_title("Image with Salt-and-pepper Noise")
plt.imshow(_img, cmap = "gray")
_fig2 = plt.subplot(_gs[0, 1])
_fig2.set_title("Window: " + str(_nRollingWindow))
_blurImg = cv2.medianBlur(_img, _nRollingWindow)
plt.imshow(_blurImg, cmap = "gray")
plt.tight_layout()
plt.show()
|
{
"content_hash": "44bfae6f0b1c502b5ed2ca08fdcbc475",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 61,
"avg_line_length": 27.37837837837838,
"alnum_prop": 0.6771964461994077,
"repo_name": "userdw/RaspberryPi_3_Starter_Kit",
"id": "fe1857c11005a873843a25b129cde0c85f914a10",
"size": "1013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "08_Image_Processing/Smoothing_Filter/medianFilter/medianFilter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79309"
}
],
"symlink_target": ""
}
|
from mitsuba.core import *
from mitsuba.core import Transform as tf
from coll_detection import *
class Torso:
def __init__(self, itp): # itp is initial torso point
self._torso_radius = 5.5
self._torso_length = 18.0
self._init_clavile_toWorld = tf.translate(Vector(itp.x, itp.y+(self._torso_length/2), itp.z)) * tf.scale(Vector(self._torso_radius, 1, self._torso_radius)) * tf.rotate(Vector(1, 0, 0), -90)
self._init_torso_cylinder_toWorld = tf.translate(Vector(itp.x, itp.y, itp.z)) * tf.scale(Vector(1, self._torso_length, 1)) * tf.rotate(Vector(1, 0, 0), -90) * tf.translate(Vector(0, 0, -0.5))
self._init_hip_toWorld = tf.translate(Vector(itp.x, itp.y-(self._torso_length/2), itp.z)) * tf.scale(Vector(self._torso_radius, 1, self._torso_radius)) * tf.rotate(Vector(1, 0, 0), 90)
self._torso_cylinder_point = itp
self.clavile_prop = {
'type' : 'disk',
'toWorld': self._init_clavile_toWorld,
'bsdf' : {
'type' : 'ward',
'alphaU' : 0.003,
'alphaV' : 0.003,
'specularReflectance' : Spectrum(0.01),
'diffuseReflectance' : Spectrum([0.05, 0.05, 0.05])
}
}
self.torso_cylinder_prop = {
'type' : 'cylinder',
'toWorld': self._init_torso_cylinder_toWorld,
'radius' : self._torso_radius,
'bsdf' : {
'type' : 'ward',
'alphaU' : 0.003,
'alphaV' : 0.003,
'specularReflectance' : Spectrum(0.01),
'diffuseReflectance' : Spectrum([0.05, 0.05, 0.05])
}
}
self.hip_prop = {
'type' : 'disk',
'toWorld': self._init_hip_toWorld,
'bsdf' : {
'type' : 'ward',
'alphaU' : 0.003,
'alphaV' : 0.003,
'specularReflectance' : Spectrum(0.01),
'diffuseReflectance' : Spectrum([0.05, 0.05, 0.05])
}
}
def horiz_move(self, gx, gy, gz):
movement = tf.translate(Vector(gx, gy, gz)) # Affine matrix for horizontal movement.
self._init_clavile_toWorld = movement * self._init_clavile_toWorld
self._init_torso_cylinder_toWorld = movement * self._init_torso_cylinder_toWorld
self._init_hip_toWorld = movement * self._init_hip_toWorld
self.clavile_prop['toWorld'] = movement * self.clavile_prop['toWorld']
self.torso_cylinder_prop['toWorld'] = movement * self.torso_cylinder_prop['toWorld']
self.hip_prop['toWorld'] = movement * self.hip_prop['toWorld']
self._torso_cylinder_point = movement * self._torso_cylinder_point
def calc_torso_point(self):
return self._torso_cylinder_point
def calc_torso_upper_point(self):
return Point(self._torso_cylinder_point.x, self._torso_cylinder_point.y + self._torso_length/2, self._torso_cylinder_point.z)
def calc_torso_lower_point(self):
return Point(self._torso_cylinder_point.x, self._torso_cylinder_point.y - self._torso_length/2, self._torso_cylinder_point.z)
def get_torso_radius(self):
return self._torso_radius
def get_torso_length(self):
return self._torso_length
def get_property_list(self):
property_list = []
property_list.append(self.clavile_prop)
property_list.append(self.torso_cylinder_prop)
property_list.append(self.hip_prop)
return property_list
def constr_json_data(self):
# Construct JSON data
json_data = {
'true_position': {
'torso': dict(zip(['x', 'y', 'z'], self.calc_torso_point()))
}
}
return json_data
def is_collision_to_capsule(self, p1, p2, radius):
print 'Now, calculating the collision of torso and capsule'
min_dist = calc_min_dist_segment_segment(self.calc_torso_upper_point(), self.calc_torso_lower_point(), p1, p2)
return True if (min_dist <= radius + self.get_torso_radius()) else False
def is_collision_to_sphere(self, p1, radius):
print 'Now, calculating the collision of torso and sphere'
min_dist = calc_min_dist_segment_point(self.calc_torso_upper_point(), self.calc_torso_lower_point(), p1)
return True if (min_dist <= radius + self.get_torso_radius()) else False
|
{
"content_hash": "32379fea49aaba56d6b5fa4ba868b000",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 200,
"avg_line_length": 44.18446601941748,
"alnum_prop": 0.5638321248077346,
"repo_name": "wbap/Hackathon2015",
"id": "31978b81ff7f22338e56d9dd5e50023b899d76be",
"size": "4551",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Hiroshiba/mitsuba/torso.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36349"
},
{
"name": "HTML",
"bytes": "59"
},
{
"name": "JavaScript",
"bytes": "30754"
},
{
"name": "Jupyter Notebook",
"bytes": "39079"
},
{
"name": "PHP",
"bytes": "17783"
},
{
"name": "Python",
"bytes": "536916"
},
{
"name": "Shell",
"bytes": "220"
}
],
"symlink_target": ""
}
|
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class BiasaddMatMulTest(trt_test.TfTrtIntegrationTestBase):
def _ConstOp(self, shape):
return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)
def GetParams(self):
"""Testing conversion of BiasAdd MatMul in TF-TRT conversion."""
input_name = "input"
input_matrix_rows = 4
input_matrix_columns = 144
input_dims = [input_matrix_rows, input_matrix_columns]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
dtype=dtypes.float32, shape=input_dims, name=input_name)
b = self._ConstOp((input_matrix_columns, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = x1 + b
b = self._ConstOp((input_matrix_rows, 144))
x2 = self.trt_incompatible_op(x)
x2 = math_ops.matmul(x2, b, transpose_a=True)
x2 = gen_array_ops.reshape(x2, [4, -1])
x2 = self.trt_incompatible_op(x2)
b = self._ConstOp((4, input_matrix_columns))
x3 = math_ops.matmul(x, b, transpose_b=True)
b = self._ConstOp((16, input_matrix_rows))
x4 = self.trt_incompatible_op(x)
x4 = math_ops.matmul(x4, b, transpose_b=True, transpose_a=True)
x4 = gen_array_ops.reshape(x4, [4, -1])
x4 = self.trt_incompatible_op(x4)
b = self._ConstOp((input_matrix_columns, 48))
x5 = math_ops.matmul(x, b)
b = self._ConstOp((48,))
x5 = nn.bias_add(x5, b)
x5 = gen_array_ops.reshape(x5, [4, -1])
x6 = gen_array_ops.reshape(x, [4, 12, 12])
b = self._ConstOp((12,))
x6 = nn.bias_add(x6, b, data_format="NHWC")
x6 = gen_array_ops.reshape(x6, [4, -1])
x7 = gen_array_ops.reshape(x, [4, 12, 3, 4])
b = self._ConstOp((4,))
x7 = nn.bias_add(x7, b, data_format="NHWC")
x7 = gen_array_ops.reshape(x7, [4, -1])
x8 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
b = self._ConstOp((2,))
x8 = nn.bias_add(x8, b, data_format="NHWC")
x8 = gen_array_ops.reshape(x8, [4, -1])
x9 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
b = self._ConstOp((12,))
x9 = nn.bias_add(x9, b, data_format="NCHW")
x9 = gen_array_ops.reshape(x9, [4, -1])
x10 = gen_array_ops.reshape(x, [4, 12, 3, 4])
b = self._ConstOp((12,))
x10 = nn.bias_add(x10, b, data_format="NCHW")
x10 = gen_array_ops.reshape(x10, [4, -1])
x11 = gen_array_ops.reshape(x, [4, 12, 12])
b = self._ConstOp((12,))
x11 = nn.bias_add(x11, b, data_format="NCHW")
x11 = gen_array_ops.reshape(x11, [4, -1])
out = array_ops.concat([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11],
axis=-1)
out = array_ops.squeeze(out, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(4, 6680)])
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
return super(BiasaddMatMulTest,
self).GetConversionParams(run_params)._replace(
max_batch_size=4, maximum_cached_engines=1)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["my_trt_op_0"]
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# TODO(aaroey): Trt 4.0 forbids conversion for tensors with rank <3 in int8
# mode, which is a bug. Re-enable this when trt library is fixed.
return not trt_test.IsQuantizationMode(run_params.precision_mode)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "07986f101b5403c8c959e763945a1f5c",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 85,
"avg_line_length": 35.586776859504134,
"alnum_prop": 0.6212261960055736,
"repo_name": "seanli9jan/tensorflow",
"id": "7545bb9df20f295a8fdbc82b573cdb3407f8c5e4",
"size": "4995",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/tensorrt/test/biasadd_matmul_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
"""
Wrapper classes for Cif input and output from Structures.
"""
import math
import os
import re
import textwrap
import warnings
from collections import OrderedDict, deque
from functools import partial
from inspect import getfullargspec as getargspec
from io import StringIO
from itertools import groupby
from pathlib import Path
import numpy as np
from monty.io import zopen
from monty.string import remove_non_ascii
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import MagSymmOp, SymmOp
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer, SpacegroupOperations
from pymatgen.symmetry.groups import SYMM_DATA, SpaceGroup
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.util.coord import find_in_coord_list_pbc, in_coord_list_pbc
__author__ = "Shyue Ping Ong, Will Richards, Matthew Horton"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k for k in SYMM_DATA["space_group_encoding"].keys()} # type: ignore
space_groups.update({sub_spgrp(k): k for k in SYMM_DATA["space_group_encoding"].keys()}) # type: ignore
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0], "symm_ops.json")) as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock:
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
"""
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops and self.data == other.data and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += "\n " + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = "\n " + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ";\n" + textwrap.fill(v, self.maxlen) + "\n;"
# add quotes if necessary
if v == "":
return '""'
if (" " in v or v[0] == "_") and not (v[0] == "'" and v[-1] == "'") and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub(r"(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub(r"^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r"""([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)""")
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(("", "", "", " ".join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
"""
Reads CifBlock from string.
:param string: String representation.
:return: CifBlock
"""
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
try:
data[s[0]] = "".join(q.popleft())
except IndexError:
data[s[0]] = ""
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible issue in cif file" " at line: {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile:
"""
Reads and parses CifBlocks from a .cif file or string
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
"""
Reads CifFile from a string.
:param string: String representation.
:return: CifFile
"""
d = OrderedDict()
for x in re.split(r"^\s*data_", "x\n" + string, flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in
# Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and
# CifParser was also not parsing it.
if "powder_pattern" in re.split(r"\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
"""
Reads CifFile from a filename.
:param filename: Filename
:return: CifFile
"""
with zopen(str(filename), "rt", errors="replace") as f:
return cls.from_string(f.read())
class CifParser:
"""
Parses a CIF file. Attempts to fix CIFs that are out-of-spec, but will
issue warnings if corrections applied. These are also stored in the
CifParser's errors attribute.
"""
def __init__(self, filename, occupancy_tolerance=1.0, site_tolerance=1e-4):
"""
Args:
filename (str): CIF filename, bzipped or gzipped CIF files are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, (str, Path)):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
# store if CIF contains features from non-core CIF dictionaries
# e.g. magCIF
self.feature_flags = {}
self.warnings = []
def is_magcif():
"""
Checks to see if file appears to be a magCIF file (heuristic).
"""
# Doesn't seem to be a canonical way to test if file is magCIF or
# not, so instead check for magnetic symmetry datanames
prefixes = [
"_space_group_magn",
"_atom_site_moment",
"_space_group_symop_magn",
]
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags["magcif"] = is_magcif()
def is_magcif_incommensurate():
"""
Checks to see if file contains an incommensurate magnetic
structure (heuristic).
"""
# Doesn't seem to be a canonical way to test if magCIF file
# describes incommensurate strucure or not, so instead check
# for common datanames
if not self.feature_flags["magcif"]:
return False
prefixes = ["_cell_modulation_dimension", "_cell_wave_vector"]
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags["magcif_incommensurate"] = is_magcif_incommensurate()
for k in self._cif.data.keys():
# pass individual CifBlocks to _sanitize_data
self._cif.data[k] = self._sanitize_data(self._cif.data[k])
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.0):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = StringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data["_atom_site_attached_hydrogens"] if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.warnings.append(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added."
)
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(" + ")) > len(
data["_atom_site_label"][idx].split(" + ")
):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(" + ")
for elocc_idx, sym in enumerate(symbol_str_lst):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(r"\([0-9]*\)", "", sym.strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[
str(re.findall(r"\D+", symbol_str_lst[elocc_idx].strip())[1]).replace("<sup>", "")
] = float("0" + re.findall(r"\.?\d+", symbol_str_lst[elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(et + "_fix" + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.warnings.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = [
"_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label",
]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc": "_space_group_magn.transform_BNS_Pp_abc"
}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.warnings.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1 / 3.0, 2 / 3.0)
fracs_to_change = {}
for label in ("_atom_site_fract_x", "_atom_site_fract_y", "_atom_site_fract_z"):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except Exception:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac / comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.warnings.append(
"Some fractional co-ordinates rounded to ideal values to " "avoid issues with finite precision."
)
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom), lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord, atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord, atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) # return dummy magmoms
def get_lattice(
self,
data,
length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None,
):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i]) for i in length_strings]
angles = [str2float(data["_cell_angle_" + i]) for i in angle_strings]
if not lattice_type:
return Lattice.from_parameters(*lengths, *angles)
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in [
"_symmetry_cell_setting",
"_space_group_crystal_system",
]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings if l in required_args)
angles = (a for a in angle_strings if a in required_args)
return self.get_lattice(data, lengths, angles, lattice_type=lattice_type)
except AttributeError as exc:
self.warnings.append(str(exc))
warnings.warn(exc)
else:
return None
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in [
"_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_",
]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, str):
msg = "A 1-line symmetry op P1 CIF is detected!"
warnings.warn(msg)
self.warnings.append(msg)
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s) for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in [
"_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_",
]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
msg = (
"No _symmetry_equiv_pos_as_xyz type key found. "
"Spacegroup from %s used." % symmetry_label
)
warnings.warn(msg)
self.warnings.append(msg)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub(r"\s+", "", d["hermann_mauguin"]):
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s) for s in xyz]
msg = (
"No _symmetry_equiv_pos_as_xyz type key found. "
"Spacegroup from %s used." % symmetry_label
)
warnings.warn(msg)
self.warnings.append(msg)
break
except Exception:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in [
"_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_",
]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
msg = "No _symmetry_equiv_pos_as_xyz type key found. " "Defaulting to P1."
warnings.warn(msg)
self.warnings.append(msg)
symops = [SymmOp.from_xyz_string(s) for s in ["x", "y", "z"]]
return symops
def get_magsymops(self, data):
"""
Equivalent to get_symops except for magnetic symmetry groups.
Separate function since additional operation for time reversal symmetry
(which changes magnetic moments on sites) needs to be returned.
"""
magsymmops = []
# check to see if magCIF file explicitly contains magnetic symmetry operations
if data.data.get("_space_group_symop_magn_operation.xyz"):
xyzt = data.data.get("_space_group_symop_magn_operation.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
if data.data.get("_space_group_symop_magn_centering.xyz"):
xyzt = data.data.get("_space_group_symop_magn_centering.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
all_ops = []
for op in magsymmops:
for centering_op in centering_symops:
new_translation = [
i - np.floor(i) for i in op.translation_vector + centering_op.translation_vector
]
new_time_reversal = op.time_reversal * centering_op.time_reversal
all_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=new_translation,
time_reversal=new_time_reversal,
)
)
magsymmops = all_ops
# else check to see if it specifies a magnetic space group
elif data.data.get("_space_group_magn.name_BNS") or data.data.get("_space_group_magn.number_BNS"):
if data.data.get("_space_group_magn.name_BNS"):
# get BNS label for MagneticSpaceGroup()
id = data.data.get("_space_group_magn.name_BNS")
else:
# get BNS number for MagneticSpaceGroup()
# by converting string to list of ints
id = list(map(int, (data.data.get("_space_group_magn.number_BNS").split("."))))
if data.data.get("_space_group_magn.transform_BNS_Pp_abc"):
if data.data.get("_space_group_magn.transform_BNS_Pp_abc") != "a,b,c;0,0,0":
jf = data.data.get("_space_group_magn.transform_BNS_Pp_abc")
msg = MagneticSpaceGroup(id, jf)
elif data.data.get("_space_group_magn.transform_BNS_Pp"):
return NotImplementedError("Incomplete specification to implement.")
else:
msg = MagneticSpaceGroup(id)
magsymmops = msg.symmetry_ops
if not magsymmops:
msg = "No magnetic symmetry detected, using primitive symmetry."
warnings.warn(msg)
self.warnings.append(msg)
magsymmops = [MagSymmOp.from_xyzt_string("x, y, z, 1")]
return magsymmops
@staticmethod
def parse_oxi_states(data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]: str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))
}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
@staticmethod
def parse_magmoms(data, lattice=None):
"""
Parse atomic magnetic moments from data dictionary
"""
if lattice is None:
raise Exception("Magmoms given in terms of crystal axes in magCIF spec.")
try:
magmoms = {
data["_atom_site_moment_label"][i]: np.array(
[
str2float(data["_atom_site_moment_crystalaxis_x"][i]),
str2float(data["_atom_site_moment_crystalaxis_y"][i]),
str2float(data["_atom_site_moment_crystalaxis_z"][i]),
]
)
for i in range(len(data["_atom_site_moment_label"]))
}
except (ValueError, KeyError):
return None
return magmoms
def _parse_symbol(self, sym):
"""
Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible.
"""
# Common representations for elements/water in cif files
# TODO: fix inconsistent handling of water
special = {
"Hw": "H",
"Ow": "O",
"Wat": "O",
"wat": "O",
"OH": "",
"OH2": "",
"NO3": "N",
}
parsed_sym = None
# try with special symbols, otherwise check the first two letters,
# then the first letter alone. If everything fails try extracting the
# first letters.
m_sp = re.match("|".join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match(r"w?[A-Z][a-z]*", sym)
if m:
parsed_sym = m.group()
if parsed_sym is not None and (m_sp or not re.match(r"{}\d*".format(parsed_sym), sym)):
msg = "{} parsed as {}".format(sym, parsed_sym)
warnings.warn(msg)
self.warnings.append(msg)
return parsed_sym
def _get_structure(self, data, primitive, symmetrized):
"""
Generate structure from part of the cif.
"""
def get_num_implicit_hydrogens(sym):
num_h = {"Wat": 2, "wat": 2, "O-H": 1}
return num_h.get(sym[:3], 0)
lattice = self.get_lattice(data)
# if magCIF, get magnetic symmetry moments and magmoms
# else standard CIF, and use empty magmom dict
if self.feature_flags["magcif_incommensurate"]:
raise NotImplementedError("Incommensurate structures not currently supported.")
if self.feature_flags["magcif"]:
self.symmetry_operations = self.get_magsymops(data)
magmoms = self.parse_magmoms(data, lattice=lattice)
else:
self.symmetry_operations = self.get_symops(data)
magmoms = {}
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
coord_to_magmoms = OrderedDict()
def get_matching_coord(coord):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in self.symmetry_operations:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c, atol=self._site_tolerance)
# cant use if inds, because python is dumb and np.array([0]) evaluates
# to False
if len(inds):
return keys[inds[0]]
return False
for i in range(len(data["_atom_site_label"])):
try:
# If site type symbol exists, use it. Otherwise, we use the
# label.
symbol = self._parse_symbol(data["_atom_site_type_symbol"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_type_symbol"][i])
except KeyError:
symbol = self._parse_symbol(data["_atom_site_label"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_label"][i])
if not symbol:
continue
if oxi_states is not None:
o_s = oxi_states.get(symbol, 0)
# use _atom_site_type_symbol if possible for oxidation state
if "_atom_site_type_symbol" in data.data.keys():
oxi_symbol = data["_atom_site_type_symbol"][i]
o_s = oxi_states.get(oxi_symbol, o_s)
try:
el = Species(symbol, o_s)
except Exception:
el = DummySpecies(symbol, o_s)
else:
el = get_el_sp(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
magmom = magmoms.get(data["_atom_site_label"][i], np.array([0, 0, 0]))
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
comp_d = {el: occu}
if num_h > 0:
comp_d["H"] = num_h
self.warnings.append(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added."
)
comp = Composition(comp_d)
if not match:
coord_to_species[coord] = comp
coord_to_magmoms[coord] = magmom
else:
coord_to_species[match] += comp
# disordered magnetic not currently supported
coord_to_magmoms[match] = None
sum_occu = [
sum(c.values()) for c in coord_to_species.values() if not set(c.elements) == {Element("O"), Element("H")}
]
if any(o > 1 for o in sum_occu):
msg = (
"Some occupancies ({}) sum to > 1! If they are within "
"the occupancy_tolerance, they will be rescaled. "
"The current occupancy_tolerance is set to: {}".format(sum_occu, self._occupancy_tolerance)
)
warnings.warn(msg)
self.warnings.append(msg)
allspecies = []
allcoords = []
allmagmoms = []
allhydrogens = []
equivalent_indices = []
# check to see if magCIF file is disordered
if self.feature_flags["magcif"]:
for k, v in coord_to_magmoms.items():
if v is None:
# Proposed solution to this is to instead store magnetic
# moments as Species 'spin' property, instead of site
# property, but this introduces ambiguities for end user
# (such as unintended use of `spin` and Species will have
# fictious oxidation state).
raise NotImplementedError("Disordered magnetic structures not currently supported.")
if coord_to_species.items():
for idx, (comp, group) in enumerate(
groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1],
)
):
tmp_coords = [site[0] for site in group]
tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in tmp_coords]
if self.feature_flags["magcif"]:
coords, magmoms = self._unique_coords(tmp_coords, magmoms_in=tmp_magmom, lattice=lattice)
else:
coords, magmoms = self._unique_coords(tmp_coords)
if set(comp.elements) == {Element("O"), Element("H")}:
# O with implicit hydrogens
im_h = comp["H"]
species = Composition({"O": comp["O"]})
else:
im_h = 0
species = comp
# The following might be a more natural representation of equivalent indicies,
# but is not in the format expect by SymmetrizedStructure:
# equivalent_indices.append(list(range(len(allcoords), len(coords)+len(allcoords))))
# The above gives a list like:
# [[0, 1, 2, 3], [4, 5, 6, 7, 8, 9, 10, 11]] where the
# integers are site indices, whereas the version used below will give a version like:
# [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
# which is a list in the same order as the sites, but where if a site has the same integer
# it is equivalent.
equivalent_indices += len(coords) * [idx]
allhydrogens.extend(len(coords) * [im_h])
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
allmagmoms.extend(magmoms)
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords) and len(allspecies) == len(allmagmoms):
site_properties = dict()
if any(allhydrogens):
assert len(allhydrogens) == len(allcoords)
site_properties["implicit_hydrogens"] = allhydrogens
if self.feature_flags["magcif"]:
site_properties["magmom"] = allmagmoms
if len(site_properties) == 0:
site_properties = None
struct = Structure(lattice, allspecies, allcoords, site_properties=site_properties)
if symmetrized:
# Wyckoff labels not currently parsed, note that not all CIFs will contain Wyckoff labels
# TODO: extract Wyckoff labels (or other CIF attributes) and include as site_properties
wyckoffs = ["Not Parsed"] * len(struct)
# Names of space groups are likewise not parsed (again, not all CIFs will contain this information)
# What is stored are the lists of symmetry operations used to generate the structure
# TODO: ensure space group labels are stored if present
sg = SpacegroupOperations("Not Parsed", -1, self.symmetry_operations)
return SymmetrizedStructure(struct, sg, equivalent_indices, wyckoffs)
struct = struct.get_sorted_structure()
if primitive and self.feature_flags["magcif"]:
struct = struct.get_primitive_structure(use_site_props=True)
elif primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True, symmetrized=False):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
symmetrized (bool): If True, return a SymmetrizedStructure which will
include the equivalent indices and symmetry operations used to
create the Structure as provided by the CIF (if explicit symmetry
operations are included in the CIF) or generated from information
in the CIF (if only space group labels are provided). Note that
currently Wyckoff labels and space group labels or numbers are
not included in the generated SymmetrizedStructure, these will be
notated as "Not Parsed" or -1 respectively.
Returns:
List of Structures.
"""
if primitive and symmetrized:
raise ValueError(
"Using both 'primitive' and 'symmetrized' arguments is not currently supported "
"since unexpected behavior might result."
)
structures = []
for i, d in enumerate(self._cif.data.values()):
try:
s = self._get_structure(d, primitive, symmetrized)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
self.warnings.append(str(exc))
warnings.warn("No structure parsed for %d structure in CIF. Section of CIF file below." % (i + 1))
warnings.warn(str(d))
warnings.warn("Error is %s." % str(exc))
if self.warnings:
warnings.warn("Issues encountered while parsing CIF: %s" % "\n".join(self.warnings))
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
def get_bibtex_string(self):
"""
Get BibTeX reference from CIF file.
:param data:
:return: BibTeX string
"""
try:
from pybtex.database import BibliographyData, Entry
except ImportError:
raise RuntimeError("Bibliographic data extraction requires pybtex.")
bibtex_keys = {
"author": ("_publ_author_name", "_citation_author_name"),
"title": ("_publ_section_title", "_citation_title"),
"journal": (
"_journal_name_full",
"_journal_name_abbrev",
"_citation_journal_full",
"_citation_journal_abbrev",
),
"volume": ("_journal_volume", "_citation_journal_volume"),
"year": ("_journal_year", "_citation_year"),
"number": ("_journal_number", "_citation_number"),
"page_first": ("_journal_page_first", "_citation_page_first"),
"page_last": ("_journal_page_last", "_citation_page_last"),
"doi": ("_journal_DOI", "_citation_DOI"),
}
entries = {}
# TODO: parse '_publ_section_references' when it exists?
# TODO: CIF specification supports multiple citations.
for idx, data in enumerate(self._cif.data.values()):
# convert to lower-case keys, some cif files inconsistent
data = {k.lower(): v for k, v in data.data.items()}
bibtex_entry = {}
for field, tags in bibtex_keys.items():
for tag in tags:
if tag in data:
if isinstance(data[tag], list):
bibtex_entry[field] = data[tag][0]
else:
bibtex_entry[field] = data[tag]
# convert to bibtex author format ('and' delimited)
if "author" in bibtex_entry:
# separate out semicolon authors
if isinstance(bibtex_entry["author"], str):
if ";" in bibtex_entry["author"]:
bibtex_entry["author"] = bibtex_entry["author"].split(";")
if isinstance(bibtex_entry["author"], list):
bibtex_entry["author"] = " and ".join(bibtex_entry["author"])
# convert to bibtex page range format, use empty string if not specified
if ("page_first" in bibtex_entry) or ("page_last" in bibtex_entry):
bibtex_entry["pages"] = "{0}--{1}".format(
bibtex_entry.get("page_first", ""),
bibtex_entry.get("page_last", ""),
)
bibtex_entry.pop("page_first", None) # and remove page_first, page_list if present
bibtex_entry.pop("page_last", None)
# cite keys are given as cif-reference-idx in order they are found
entries["cifref{}".format(idx)] = Entry("article", list(bibtex_entry.items()))
return BibliographyData(entries).to_string(bib_format="bibtex")
def as_dict(self):
"""
:return: MSONable dict
"""
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
@property
def has_errors(self):
"""
:return: Whether there are errors/warnings detected in CIF parsing.
"""
return len(self.warnings) > 0
class CifWriter:
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
"""
def __init__(
self,
struct,
symprec=None,
write_magmoms=False,
significant_figures=8,
angle_tolerance=5.0,
refine_struct=True,
):
"""
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer. See also refine_struct.
write_magmoms (bool): If True, will write magCIF file. Incompatible
with symprec
significant_figures (int): Specifies precision for formatting of floats.
Defaults to 8.
angle_tolerance (float): Angle tolerance for symmetry finding. Passes
angle_tolerance to the SpacegroupAnalyzer. Used only if symprec
is not None.
refine_struct: Used only if symprec is not None. If True, get_refined_structure
is invoked to convert input structure from primitive to conventional.
"""
if write_magmoms and symprec:
warnings.warn("Magnetic symmetry cannot currently be detected by pymatgen," "disabling symmetry detection.")
symprec = None
format_str = "{:.%df}" % significant_figures
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec, angle_tolerance=angle_tolerance)
spacegroup = (sf.get_space_group_symbol(), sf.get_space_group_number())
if refine_struct:
# Needs the refined structure when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ["a", "b", "c"]:
block["_cell_length_" + cell_attr] = format_str.format(getattr(latt, cell_attr))
for cell_attr in ["alpha", "beta", "gamma"]:
block["_cell_angle_" + cell_attr] = format_str.format(getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = format_str.format(latt.volume)
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = ["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id", "_symmetry_equiv_pos_as_xyz"])
try:
symbol_to_oxinum = OrderedDict([(el.__str__(), float(el.oxi_state)) for el in sorted(comp.elements)])
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
except (TypeError, AttributeError):
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in sorted(comp.elements)])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
atom_site_moment_label = []
atom_site_moment_crystalaxis_x = []
atom_site_moment_crystalaxis_y = []
atom_site_moment_crystalaxis_z = []
count = 0
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append(format_str.format(site.a))
atom_site_fract_y.append(format_str.format(site.b))
atom_site_fract_z.append(format_str.format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
magmom = Magmom(site.properties.get("magmom", getattr(sp, "spin", 0)))
if write_magmoms and abs(magmom) > 0:
moment = Magmom.get_moment_relative_to_crystal_axes(magmom, latt)
atom_site_moment_label.append("{}{}".format(sp.symbol, count))
atom_site_moment_crystalaxis_x.append(format_str.format(moment[0]))
atom_site_moment_crystalaxis_y.append(format_str.format(moment[1]))
atom_site_moment_crystalaxis_z.append(format_str.format(moment[2]))
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(
sorted(sites, key=lambda s: tuple(abs(x) for x in s.frac_coords))[0],
len(sites),
)
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (
t[0].species.average_electroneg,
-t[1],
t[0].a,
t[0].b,
t[0].c,
),
):
for sp, occu in site.species.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append(format_str.format(site.a))
atom_site_fract_y.append(format_str.format(site.b))
atom_site_fract_z.append(format_str.format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(
[
"_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy",
]
)
if write_magmoms:
block["_atom_site_moment_label"] = atom_site_moment_label
block["_atom_site_moment_crystalaxis_x"] = atom_site_moment_crystalaxis_x
block["_atom_site_moment_crystalaxis_y"] = atom_site_moment_crystalaxis_y
block["_atom_site_moment_crystalaxis_z"] = atom_site_moment_crystalaxis_z
loops.append(
[
"_atom_site_moment_label",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
]
)
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
@property
def ciffile(self):
"""
Returns: CifFile associated with the CifWriter.
"""
return self._cf
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
# Note that the ending ) is sometimes missing. That is why the code has
# been modified to treat it as optional. Same logic applies to lists.
return float(re.sub(r"\(.+\)*", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub(r"\(.+\)*", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
raise ValueError(f"{text} cannot be converted to float")
|
{
"content_hash": "3494c47cbf71ee6a7a75405562df47ed",
"timestamp": "",
"source": "github",
"line_count": 1481,
"max_line_length": 120,
"avg_line_length": 41.072248480756244,
"alnum_prop": 0.5232623134082988,
"repo_name": "gmatteo/pymatgen",
"id": "4c0249877ff399f0656dc839f7f897671acb0738",
"size": "60939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/cif.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "7840569"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
}
|
"""Tests for Volume Code."""
import datetime
import os
import shutil
import socket
import sys
import tempfile
import time
import enum
import eventlet
import mock
from mox3 import mox
import os_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from stevedore import extension
from taskflow.engines.action_engine import engine
from cinder.api import common
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder import keymgr
from cinder import objects
import cinder.policy
from cinder import quota
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.brick import fake_lvm
from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_driver
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit.keymgr import fake as fake_keymgr
from cinder.tests.unit import utils as tests_utils
from cinder import utils
import cinder.volume
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers import lvm
from cinder.volume import manager as vol_manager
from cinder.volume import rpcapi as volume_rpcapi
import cinder.volume.targets.tgt
from cinder.volume import utils as volutils
from cinder.volume import volume_types
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
CONF = cfg.CONF
ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
fake_opt = [
cfg.StrOpt('fake_opt1', default='fake', help='fake opts')
]
class FakeImageService(object):
def __init__(self, db_driver=None, image_service=None):
pass
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
class BaseVolumeTestCase(test.TestCase):
"""Test Case for volumes."""
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa'
def setUp(self):
super(BaseVolumeTestCase, self).setUp()
self.extension_manager = extension.ExtensionManager(
"BaseVolumeTestCase")
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir,
notification_driver=["test"])
self.addCleanup(self._cleanup)
self.volume = importutils.import_object(CONF.volume_manager)
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = 'fake'
# NOTE(mriedem): The id is hard-coded here for tracking race fail
# assertions with the notification code, it's part of an
# elastic-recheck query so don't remove it or change it.
self.project_id = '7f265bd4-3a85-465e-a899-5dc4854a86d3'
self.context.project_id = self.project_id
self.volume_params = {
'status': 'creating',
'host': CONF.host,
'size': 1}
self.stubs.Set(brick_lvm.LVM,
'get_all_volume_groups',
self.fake_get_all_volume_groups)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
# keep ordered record of what we execute
self.called = []
def _cleanup(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True):
return [{'name': 'cinder-volumes',
'size': '5.00',
'available': '2.50',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
class AvailabilityZoneTestCase(BaseVolumeTestCase):
def test_list_availability_zones_cached(self):
volume_api = cinder.volume.api.API()
with mock.patch.object(volume_api.db,
'service_get_all_by_topic') as get_all:
get_all.return_value = [
{
'availability_zone': 'a',
'disabled': False,
},
]
azs = volume_api.list_availability_zones(enable_cache=True)
self.assertEqual([{"name": 'a', 'available': True}], list(azs))
self.assertIsNotNone(volume_api.availability_zones_last_fetched)
self.assertTrue(get_all.called)
volume_api.list_availability_zones(enable_cache=True)
self.assertEqual(1, get_all.call_count)
def test_list_availability_zones_no_cached(self):
volume_api = cinder.volume.api.API()
with mock.patch.object(volume_api.db,
'service_get_all_by_topic') as get_all:
get_all.return_value = [
{
'availability_zone': 'a',
'disabled': False,
},
]
azs = volume_api.list_availability_zones(enable_cache=False)
self.assertEqual([{"name": 'a', 'available': True}], list(azs))
self.assertIsNone(volume_api.availability_zones_last_fetched)
with mock.patch.object(volume_api.db,
'service_get_all_by_topic') as get_all:
get_all.return_value = [
{
'availability_zone': 'a',
'disabled': True,
},
]
azs = volume_api.list_availability_zones(enable_cache=False)
self.assertEqual([{"name": 'a', 'available': False}], list(azs))
self.assertIsNone(volume_api.availability_zones_last_fetched)
def test_list_availability_zones_refetched(self):
timeutils.set_time_override()
volume_api = cinder.volume.api.API()
with mock.patch.object(volume_api.db,
'service_get_all_by_topic') as get_all:
get_all.return_value = [
{
'availability_zone': 'a',
'disabled': False,
},
]
azs = volume_api.list_availability_zones(enable_cache=True)
self.assertEqual([{"name": 'a', 'available': True}], list(azs))
self.assertIsNotNone(volume_api.availability_zones_last_fetched)
last_fetched = volume_api.availability_zones_last_fetched
self.assertTrue(get_all.called)
volume_api.list_availability_zones(enable_cache=True)
self.assertEqual(1, get_all.call_count)
# The default cache time is 3600, push past that...
timeutils.advance_time_seconds(3800)
get_all.return_value = [
{
'availability_zone': 'a',
'disabled': False,
},
{
'availability_zone': 'b',
'disabled': False,
},
]
azs = volume_api.list_availability_zones(enable_cache=True)
azs = sorted([n['name'] for n in azs])
self.assertEqual(['a', 'b'], azs)
self.assertEqual(2, get_all.call_count)
self.assertGreater(volume_api.availability_zones_last_fetched,
last_fetched)
def test_list_availability_zones_enabled_service(self):
services = [
{'availability_zone': 'ping', 'disabled': 0},
{'availability_zone': 'ping', 'disabled': 1},
{'availability_zone': 'pong', 'disabled': 0},
{'availability_zone': 'pung', 'disabled': 1},
]
def stub_service_get_all_by_topic(*args, **kwargs):
return services
self.stubs.Set(db, 'service_get_all_by_topic',
stub_service_get_all_by_topic)
def sort_func(obj):
return obj['name']
volume_api = cinder.volume.api.API()
azs = volume_api.list_availability_zones()
azs = sorted(azs, key=sort_func)
expected = sorted([
{'name': 'pung', 'available': False},
{'name': 'pong', 'available': True},
{'name': 'ping', 'available': True},
], key=sort_func)
self.assertEqual(expected, azs)
class VolumeTestCase(BaseVolumeTestCase):
def setUp(self):
super(VolumeTestCase, self).setUp()
self._clear_patch = mock.patch('cinder.volume.utils.clear_volume',
autospec=True)
self._clear_patch.start()
self.expected_status = 'available'
def tearDown(self):
super(VolumeTestCase, self).tearDown()
self._clear_patch.stop()
def test_init_host_clears_downloads(self):
"""Test that init_host will unwedge a volume stuck in downloading."""
volume = tests_utils.create_volume(self.context, status='downloading',
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual("error", volume['status'])
self.volume.delete_volume(self.context, volume_id)
def test_init_host_resumes_deletes(self):
"""init_host will resume deleting volume in deleting status."""
volume = tests_utils.create_volume(self.context, status='deleting',
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
self.assertRaises(exception.VolumeNotFound, db.volume_get,
context.get_admin_context(), volume_id)
def test_init_host_count_allocated_capacity(self):
vol0 = tests_utils.create_volume(
self.context, size=100, host=CONF.host)
vol1 = tests_utils.create_volume(
self.context, size=128,
host=volutils.append_host(CONF.host, 'pool0'))
vol2 = tests_utils.create_volume(
self.context, size=256,
host=volutils.append_host(CONF.host, 'pool0'))
vol3 = tests_utils.create_volume(
self.context, size=512,
host=volutils.append_host(CONF.host, 'pool1'))
vol4 = tests_utils.create_volume(
self.context, size=1024,
host=volutils.append_host(CONF.host, 'pool2'))
self.volume.init_host()
stats = self.volume.stats
self.assertEqual(2020, stats['allocated_capacity_gb'])
self.assertEqual(
384, stats['pools']['pool0']['allocated_capacity_gb'])
self.assertEqual(
512, stats['pools']['pool1']['allocated_capacity_gb'])
self.assertEqual(
1024, stats['pools']['pool2']['allocated_capacity_gb'])
# NOTE(jdg): On the create we have host='xyz', BUT
# here we do a db.volume_get, and now the host has
# been updated to xyz#pool-name. Note this is
# done via the managers init, which calls the drivers
# get_pool method, which in the legacy case is going
# to be volume_backend_name or None
vol0 = db.volume_get(context.get_admin_context(), vol0['id'])
self.assertEqual(volutils.append_host(CONF.host, 'LVM'),
vol0['host'])
self.volume.delete_volume(self.context, vol0['id'])
self.volume.delete_volume(self.context, vol1['id'])
self.volume.delete_volume(self.context, vol2['id'])
self.volume.delete_volume(self.context, vol3['id'])
self.volume.delete_volume(self.context, vol4['id'])
@mock.patch.object(vol_manager.VolumeManager, 'add_periodic_task')
def test_init_host_repl_enabled_periodic_task(self, mock_add_p_task):
manager = vol_manager.VolumeManager()
with mock.patch.object(manager.driver,
'get_volume_stats') as m_get_stats:
m_get_stats.return_value = {'replication': True}
manager.init_host()
mock_add_p_task.assert_called_once_with(mock.ANY)
@mock.patch.object(vol_manager.VolumeManager, 'add_periodic_task')
def test_init_host_repl_disabled_periodic_task(self, mock_add_p_task):
manager = vol_manager.VolumeManager()
with mock.patch.object(manager.driver,
'get_volume_stats') as m_get_stats:
m_get_stats.return_value = {'replication': False}
manager.init_host()
self.assertEqual(0, mock_add_p_task.call_count)
@mock.patch.object(vol_manager.VolumeManager,
'update_service_capabilities')
def test_report_filter_goodness_function(self, mock_update):
manager = vol_manager.VolumeManager()
manager.driver.set_initialized()
myfilterfunction = "myFilterFunction"
mygoodnessfunction = "myGoodnessFunction"
expected = {'name': 'cinder-volumes',
'filter_function': myfilterfunction,
'goodness_function': mygoodnessfunction,
}
with mock.patch.object(manager.driver,
'get_volume_stats') as m_get_stats:
with mock.patch.object(manager.driver,
'get_goodness_function') as m_get_goodness:
with mock.patch.object(manager.driver,
'get_filter_function') as m_get_filter:
m_get_stats.return_value = {'name': 'cinder-volumes'}
m_get_filter.return_value = myfilterfunction
m_get_goodness.return_value = mygoodnessfunction
manager._report_driver_status(1)
self.assertTrue(m_get_stats.called)
mock_update.assert_called_once_with(expected)
def test_is_working(self):
# By default we have driver mocked to be initialized...
self.assertTrue(self.volume.is_working())
# ...lets switch it and check again!
self.volume.driver._initialized = False
self.assertFalse(self.volume.is_working())
def test_create_volume_fails_with_creating_and_downloading_status(self):
"""Test init_host in case of volume.
While the status of volume is 'creating' or 'downloading',
volume process down.
After process restarting this 'creating' status is changed to 'error'.
"""
for status in ['creating', 'downloading']:
volume = tests_utils.create_volume(self.context, status=status,
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('error', volume['status'])
self.volume.delete_volume(self.context, volume_id)
def test_create_snapshot_fails_with_creating_status(self):
"""Test init_host in case of snapshot.
While the status of snapshot is 'creating', volume process
down. After process restarting this 'creating' status is
changed to 'error'.
"""
volume = tests_utils.create_volume(self.context,
**self.volume_params)
snapshot = tests_utils.create_snapshot(self.context,
volume['id'],
status='creating')
snap_id = snapshot['id']
self.volume.init_host()
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
self.assertEqual('error', snapshot_obj.status)
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume['id'])
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'rollback')
def test_create_driver_not_initialized(self, reserve, commit, rollback):
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, volume_id)
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual("error", volume.status)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_driver_not_initialized_rescheduling(self):
self.volume.driver._initialized = False
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, volume_id,
{'volume_properties': self.volume_params},
{'retry': {'num_attempts': 1, 'host': []}})
# NOTE(dulek): Volume should be rescheduled as we passed request_spec
# and filter_properties, assert that it wasn't counted in
# allocated_capacity tracking.
self.assertEqual({}, self.volume.stats['pools'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_non_cinder_exception_rescheduling(self):
params = self.volume_params
del params['host']
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**params)
volume_id = volume['id']
with mock.patch.object(self.volume.driver, 'create_volume',
side_effect=processutils.ProcessExecutionError):
self.assertRaises(processutils.ProcessExecutionError,
self.volume.create_volume,
self.context, volume_id,
{'volume_properties': params},
{'retry': {'num_attempts': 1, 'host': []}})
# NOTE(dulek): Volume should be rescheduled as we passed request_spec
# and filter_properties, assert that it wasn't counted in
# allocated_capacity tracking.
self.assertEqual({}, self.volume.stats['pools'])
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch.object(QUOTAS, 'rollback')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'reserve')
def test_delete_driver_not_initialized(self, reserve, commit, rollback):
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.assertRaises(exception.DriverNotInitialized,
self.volume.delete_volume,
self.context, volume.id)
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error_deleting", volume.status)
volume.destroy()
@mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock())
@mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock())
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION'])
def test_create_delete_volume(self, _mock_reserve):
"""Test volume can be created and deleted."""
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.create_volume(self.context, volume_id)
self.assertEqual(2, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[0]
self.assertEqual('volume.create.start', msg['event_type'])
expected = {
'status': 'creating',
'host': socket.gethostname(),
'display_name': 'test_volume',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'volume_id': volume_id,
'volume_type': None,
'snapshot_id': None,
'user_id': 'fake',
'launched_at': 'DONTCARE',
'size': 1,
'replication_status': 'disabled',
'replication_extended_status': None,
'replication_driver_data': None,
'metadata': None,
'volume_attachment': [],
}
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[1]
self.assertEqual('volume.create.end', msg['event_type'])
expected['status'] = 'available'
self.assertDictMatch(expected, msg['payload'])
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
volume_id).id)
self.volume.delete_volume(self.context, volume_id)
vol = db.volume_get(context.get_admin_context(read_deleted='yes'),
volume_id)
self.assertEqual('deleted', vol['status'])
self.assertEqual(4, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[2]
self.assertEqual('volume.delete.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('volume.delete.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_create_delete_volume_with_metadata(self):
"""Test volume can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, metadata=test_meta,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.assertEqual(test_meta, volume.metadata)
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_create_volume_with_invalid_metadata(self):
"""Test volume create with too much metadata fails."""
volume_api = cinder.volume.api.API()
test_meta = {'fake_key': 'fake_value' * 256}
self.assertRaises(exception.InvalidVolumeMetadataSize,
volume_api.create,
self.context,
1,
'name',
'description',
None,
None,
None,
test_meta)
def test_update_volume_metadata_with_metatype(self):
"""Test update volume metadata with different metadata type."""
test_meta1 = {'fake_key1': 'fake_value1'}
test_meta2 = {'fake_key1': 'fake_value2'}
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
volume_api = cinder.volume.api.API()
# update user metadata associated with the volume.
result_meta = volume_api.update_volume_metadata(
self.context,
volume,
test_meta2,
False,
common.METADATA_TYPES.user)
self.assertEqual(test_meta2, result_meta)
# create image metadata associated with the volume.
result_meta = volume_api.update_volume_metadata(
self.context,
volume,
test_meta1,
False,
common.METADATA_TYPES.image)
self.assertEqual(test_meta1, result_meta)
# update image metadata associated with the volume.
result_meta = volume_api.update_volume_metadata(
self.context,
volume,
test_meta2,
False,
common.METADATA_TYPES.image)
self.assertEqual(test_meta2, result_meta)
# update volume metadata with invalid metadta type.
self.assertRaises(exception.InvalidMetadataType,
volume_api.update_volume_metadata,
self.context,
volume,
test_meta1,
False,
FAKE_METADATA_TYPE.fake_type)
def test_update_volume_metadata_maintenance(self):
"""Test update volume metadata with different metadata type."""
test_meta1 = {'fake_key1': 'fake_value1'}
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.update_volume_metadata,
self.context,
volume,
test_meta1,
False,
FAKE_METADATA_TYPE.fake_type)
@mock.patch('cinder.db.volume_update')
def test_update_with_ovo(self, volume_update):
"""Test update volume using oslo_versionedobject."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_api = cinder.volume.api.API()
updates = {'display_name': 'foobbar'}
volume_api.update(self.context, volume, updates)
volume_update.assert_called_once_with(self.context, volume.id,
updates)
self.assertEqual('foobbar', volume.display_name)
def test_delete_volume_metadata_with_metatype(self):
"""Test delete volume metadata with different metadata type."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
test_meta2 = {'fake_key1': 'fake_value1'}
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
volume_api = cinder.volume.api.API()
# delete user metadata associated with the volume.
volume_api.delete_volume_metadata(
self.context,
volume,
'fake_key2',
common.METADATA_TYPES.user)
self.assertEqual(test_meta2,
db.volume_metadata_get(self.context, volume_id))
# create image metadata associated with the volume.
result_meta = volume_api.update_volume_metadata(
self.context,
volume,
test_meta1,
False,
common.METADATA_TYPES.image)
self.assertEqual(test_meta1, result_meta)
# delete image metadata associated with the volume.
volume_api.delete_volume_metadata(
self.context,
volume,
'fake_key2',
common.METADATA_TYPES.image)
# parse the result to build the dict.
rows = db.volume_glance_metadata_get(self.context, volume_id)
result = {}
for row in rows:
result[row['key']] = row['value']
self.assertEqual(test_meta2, result)
# delete volume metadata with invalid metadta type.
self.assertRaises(exception.InvalidMetadataType,
volume_api.delete_volume_metadata,
self.context,
volume,
'fake_key1',
FAKE_METADATA_TYPE.fake_type)
def test_delete_volume_metadata_maintenance(self):
"""Test delete volume metadata in maintenance."""
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete_volume_metadata,
self.context,
volume,
'fake_key1',
FAKE_METADATA_TYPE.fake_type)
def test_volume_attach_in_maintenance(self):
"""Test attach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.attach,
self.context,
volume, None, None, None, None)
def test_volume_detach_in_maintenance(self):
"""Test detach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.detach,
self.context,
volume, None)
def test_initialize_connection_maintenance(self):
"""Test initialize connection in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.initialize_connection,
self.context,
volume,
None)
def test_accept_transfer_maintenance(self):
"""Test accept transfer in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.accept_transfer,
self.context,
volume,
None, None)
def test_copy_volume_to_image_maintenance(self):
"""Test copy volume to image in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.copy_volume_to_image,
self.context,
volume,
test_meta1,
force=True)
@mock.patch.object(cinder.volume.api.API, 'list_availability_zones')
def test_create_volume_uses_default_availability_zone(self, mock_list_az):
"""Test setting availability_zone correctly during volume create."""
mock_list_az.return_value = ({'name': 'az1', 'available': True},
{'name': 'az2', 'available': True},
{'name': 'default-az', 'available': True})
volume_api = cinder.volume.api.API()
# Test backwards compatibility, default_availability_zone not set
self.override_config('storage_availability_zone', 'az2')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual('az2', volume['availability_zone'])
self.override_config('default_availability_zone', 'default-az')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual('default-az', volume['availability_zone'])
@mock.patch('cinder.quota.QUOTAS.rollback', new=mock.MagicMock())
@mock.patch('cinder.quota.QUOTAS.commit', new=mock.MagicMock())
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_create_volume_with_volume_type(self, _mock_reserve):
"""Test volume creation with default volume type."""
volume_api = cinder.volume.api.API()
# Create volume with default volume type while default
# volume type doesn't exist, volume_type_id should be NULL
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertIsNone(volume['volume_type_id'])
self.assertIsNone(volume['encryption_key_id'])
# Create default volume type
vol_type = conf_fixture.def_vol_type
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
# Create volume with default volume type
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
self.assertIsNone(volume['encryption_key_id'])
# Create volume with specific volume type
vol_type = 'test'
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
@mock.patch.object(keymgr, 'API', fake_keymgr.fake_api)
def test_create_volume_with_encrypted_volume_type(self):
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
self.assertIsNotNone(volume['encryption_key_id'])
def test_create_volume_with_provider_id(self):
volume_params_with_provider_id = dict(provider_id='1111-aaaa',
**self.volume_params)
volume = tests_utils.create_volume(self.context,
**volume_params_with_provider_id)
self.volume.create_volume(self.context, volume['id'])
self.assertEqual('1111-aaaa', volume['provider_id'])
@mock.patch.object(keymgr, 'API', new=fake_keymgr.fake_api)
def test_create_delete_volume_with_encrypted_volume_type(self):
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertIsNotNone(volume.get('encryption_key_id', None))
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
self.assertIsNotNone(volume['encryption_key_id'])
volume['host'] = 'fake_host'
volume['status'] = 'available'
volume_api.delete(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('deleting', volume['status'])
db.volume_destroy(self.context, volume['id'])
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume['id'])
def test_extra_capabilities(self):
# Test valid extra_capabilities.
fake_capabilities = {'key1': 1, 'key2': 2}
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.return_value = fake_capabilities
manager = vol_manager.VolumeManager()
manager.stats = {'pools': {}}
manager.driver.set_initialized()
manager.publish_service_capabilities(self.context)
self.assertTrue(mock_loads.called)
volume_stats = manager.last_capabilities
self.assertEqual(fake_capabilities['key1'],
volume_stats['key1'])
self.assertEqual(fake_capabilities['key2'],
volume_stats['key2'])
def test_extra_capabilities_fail(self):
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.side_effect = exception.CinderException('test')
self.assertRaises(exception.CinderException,
vol_manager.VolumeManager)
@mock.patch.object(db, 'volume_get_all_by_host')
def test_update_replication_rel_status(self, m_get_by_host):
m_get_by_host.return_value = [mock.sentinel.vol]
ctxt = context.get_admin_context()
manager = vol_manager.VolumeManager()
with mock.patch.object(manager.driver,
'get_replication_status') as m_get_rep_status:
m_get_rep_status.return_value = None
manager._update_replication_relationship_status(ctxt)
m_get_rep_status.assert_called_once_with(ctxt, mock.sentinel.vol)
exp_filters = {
'replication_status':
['active', 'copying', 'error', 'active-stopped', 'inactive']}
m_get_by_host.assert_called_once_with(ctxt, manager.host,
filters=exp_filters)
@mock.patch.object(db, 'volume_get_all_by_host',
mock.Mock(return_value=[{'id': 'foo'}]))
@mock.patch.object(db, 'volume_update')
def test_update_replication_rel_status_update_vol(self, mock_update):
"""Volume is updated with replication update data."""
ctxt = context.get_admin_context()
manager = vol_manager.VolumeManager()
with mock.patch.object(manager.driver,
'get_replication_status') as m_get_rep_status:
m_get_rep_status.return_value = mock.sentinel.model_update
manager._update_replication_relationship_status(ctxt)
mock_update.assert_called_once_with(ctxt, 'foo',
mock.sentinel.model_update)
@mock.patch.object(db, 'volume_get_all_by_host',
mock.Mock(return_value=[{'id': 'foo'}]))
def test_update_replication_rel_status_with_repl_support_exc(self):
"""Exception handled when raised getting replication status."""
ctxt = context.get_admin_context()
manager = vol_manager.VolumeManager()
manager.driver._initialized = True
manager.driver._stats['replication'] = True
with mock.patch.object(manager.driver,
'get_replication_status') as m_get_rep_status:
m_get_rep_status.side_effect = Exception()
manager._update_replication_relationship_status(ctxt)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(
mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy(
volume_name='fake'))
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertTrue(res)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
def test_get_volume_different_tenant(self):
"""Test can't get volume of another tenant when viewable_admin_meta."""
volume = tests_utils.create_volume(self.context,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
another_context = context.RequestContext('another_user_id',
'another_project_id',
is_admin=False)
self.assertNotEqual(another_context.project_id,
self.context.project_id)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.VolumeNotFound, volume_api.get,
another_context, volume_id, viewable_admin_meta=True)
self.assertEqual(volume_id,
volume_api.get(self.context, volume_id)['id'])
self.volume.delete_volume(self.context, volume_id)
def test_get_all_limit_bad_value(self):
"""Test value of 'limit' is numeric and >= 0"""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.get_all,
self.context,
limit="A")
self.assertRaises(exception.InvalidInput,
volume_api.get_all,
self.context,
limit="-1")
def test_get_all_tenants_volume_list(self):
"""Validate when the volume list for all tenants is returned"""
volume_api = cinder.volume.api.API()
with mock.patch.object(volume_api.db,
'volume_get_all_by_project') as by_project:
with mock.patch.object(volume_api.db,
'volume_get_all') as get_all:
db_volume = {'volume_type_id': 'fake_type_id',
'name': 'fake_name',
'host': 'fake_host',
'id': 'fake_volume_id'}
volume = fake_volume.fake_db_volume(**db_volume)
by_project.return_value = [volume]
get_all.return_value = [volume]
volume_api.get_all(self.context, filters={'all_tenants': '0'})
self.assertTrue(by_project.called)
by_project.called = False
self.context.is_admin = False
volume_api.get_all(self.context, filters={'all_tenants': '1'})
self.assertTrue(by_project.called)
# check for volume list of all tenants
self.context.is_admin = True
volume_api.get_all(self.context, filters={'all_tenants': '1'})
self.assertTrue(get_all.called)
def test_delete_volume_in_error_extending(self):
"""Test volume can be deleted in error_extending stats."""
# create a volume
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
# delete 'error_extending' volume
db.volume_update(self.context, volume['id'],
{'status': 'error_extending'})
self.volume.delete_volume(self.context, volume['id'])
self.assertRaises(exception.NotFound, db.volume_get,
self.context, volume['id'])
@mock.patch.object(db.sqlalchemy.api, 'volume_get',
side_effect=exception.VolumeNotFound(
volume_id='12345678-1234-5678-1234-567812345678'))
def test_delete_volume_not_found(self, mock_get_volume):
"""Test delete volume moves on if the volume does not exist."""
volume_id = '12345678-1234-5678-1234-567812345678'
self.assertTrue(self.volume.delete_volume(self.context, volume_id))
self.assertTrue(mock_get_volume.called)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_volume_from_snapshot')
def test_create_volume_from_snapshot(self, mock_create_from_snap):
"""Test volume can be created from a snapshot."""
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'],
size=volume_src['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_obj)
volume_dst = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'])
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id,
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume_src['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_snapshot_with_types(self, _get_flow):
"""Test volume create from snapshot with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(
context.get_admin_context(),
{'name': 'foo',
'extra_specs': {'volume_backend_name': 'dev_1'}})
db.volume_type_create(
context.get_admin_context(),
{'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
snapshot = {'id': 1234,
'status': 'available',
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
# Make sure the case of specifying a type that
# doesn't match the snapshots type fails
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
# Make sure that trying to specify a type
# when the snapshots type is None fails
snapshot_obj.volume_type_id = None
self.assertRaises(exception.InvalidVolumeType,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
snapshot_obj.volume_type_id = foo_type['id']
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=foo_type,
snapshot=snapshot_obj)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_source_with_types(self, _get_flow):
"""Test volume create from source with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(
context.get_admin_context(),
{'name': 'foo',
'extra_specs': {'volume_backend_name': 'dev_1'}})
db.volume_type_create(
context.get_admin_context(),
{'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
source_vol = {'id': 1234,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
# Make sure that trying to specify a type
# when the source type is None fails
source_vol['volume_type_id'] = None
source_vol['volume_type'] = None
self.assertRaises(exception.InvalidVolumeType,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
source_vol['volume_type_id'] = biz_type['id']
source_vol['volume_type'] = biz_type
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=biz_type,
source_volume=source_vol)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_source_with_same_backend(self, _get_flow):
"""Test volume create from source with type mismatch same backend."""
volume_api = cinder.volume.api.API()
foo_type = {
'name': 'foo',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232),
'updated_at': None,
'extra_specs': {'volume_backend_name': 'dev_1'},
'is_public': True,
'deleted_at': None,
'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38',
'description': None}
biz_type = {
'name': 'biz',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232),
'updated_at': None,
'extra_specs': {'volume_backend_name': 'dev_1'},
'is_public': True,
'deleted_at': None,
'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59',
'description': None}
source_vol = {'id': 1234,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_from_source_and_snap_only_one_backend(self, _get_flow):
"""Test create from source and snap with type mismatch one backend."""
volume_api = cinder.volume.api.API()
foo_type = {
'name': 'foo',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232),
'updated_at': None,
'extra_specs': {'some_key': 3},
'is_public': True,
'deleted_at': None,
'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38',
'description': None}
biz_type = {
'name': 'biz',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232),
'updated_at': None,
'extra_specs': {'some_other_key': 4},
'is_public': True,
'deleted_at': None,
'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59',
'description': None}
source_vol = {'id': 1234,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
snapshot = {'id': 1234,
'status': 'available',
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
with mock.patch.object(db,
'service_get_all_by_topic') as mock_get_service, \
mock.patch.object(volume_api,
'list_availability_zones') as mock_get_azs:
mock_get_service.return_value = [{'host': 'foo'}]
mock_get_azs.return_value = {}
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
def test_create_snapshot_driver_not_initialized(self):
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'],
size=volume_src['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_snapshot,
self.context, volume_src['id'], snapshot_obj)
# NOTE(flaper87): The volume status should be error.
self.assertEqual("error", snapshot_obj.status)
# lets cleanup the mess
self.volume.driver._initialized = True
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume_src['id'])
def _mock_synchronized(self, name, *s_args, **s_kwargs):
def inner_sync1(f):
def inner_sync2(*args, **kwargs):
self.called.append('lock-%s' % (name))
ret = f(*args, **kwargs)
self.called.append('unlock-%s' % (name))
return ret
return inner_sync2
return inner_sync1
def _fake_execute(self, *cmd, **kwargs):
pass
@mock.patch.object(cinder.volume.drivers.lvm.LVMVolumeDriver,
'create_volume_from_snapshot')
def test_create_volume_from_snapshot_check_locks(
self, mock_lvm_create):
# mock the synchroniser so we can record events
self.stubs.Set(utils, 'synchronized', self._mock_synchronized)
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
self.assertEqual(1, len(self.called))
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
snap_id = self._create_snapshot(src_vol_id,
size=src_vol['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
# no lock
self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj)
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snap_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
request_spec={'snapshot_id': snap_id})
self.assertEqual(2, len(self.called))
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(snap_id,
db.volume_get(admin_ctxt, dst_vol_id).snapshot_id)
# locked
self.volume.delete_volume(self.context, dst_vol_id)
self.assertEqual(4, len(self.called))
# locked
self.volume.delete_snapshot(self.context, snapshot_obj)
self.assertEqual(6, len(self.called))
# locked
self.volume.delete_volume(self.context, src_vol_id)
self.assertEqual(8, len(self.called))
self.assertEqual(['lock-%s' % ('%s-delete_snapshot' % (snap_id)),
'unlock-%s' % ('%s-delete_snapshot' % (snap_id)),
'lock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'lock-%s' % ('%s-delete_snapshot' % (snap_id)),
'unlock-%s' % ('%s-delete_snapshot' % (snap_id)),
'lock-%s' % ('%s-delete_volume' % (src_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (src_vol_id))],
self.called)
self.assertTrue(mock_lvm_create.called)
def test_create_volume_from_volume_check_locks(self):
# mock the synchroniser so we can record events
self.stubs.Set(utils, 'synchronized', self._mock_synchronized)
self.stubs.Set(utils, 'execute', self._fake_execute)
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
self.assertEqual(1, len(self.called))
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
request_spec={'source_volid': src_vol_id})
self.assertEqual(2, len(self.called))
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(src_vol_id,
db.volume_get(admin_ctxt, dst_vol_id).source_volid)
# locked
self.volume.delete_volume(self.context, dst_vol_id)
self.assertEqual(4, len(self.called))
# locked
self.volume.delete_volume(self.context, src_vol_id)
self.assertEqual(6, len(self.called))
self.assertEqual(['lock-%s' % ('%s-delete_volume' % (src_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (src_vol_id)),
'lock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'lock-%s' % ('%s-delete_volume' % (src_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (src_vol_id))],
self.called)
def test_create_volume_from_volume_delete_lock_taken(self):
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
orig_elevated = self.context.elevated
gthreads = []
def mock_elevated(*args, **kwargs):
# unset mock so it is only called once
self.stubs.Set(self.context, 'elevated', orig_elevated)
# we expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume,
self.context,
volume_id=dst_vol.id,
request_spec={'source_volid': src_vol_id})
gthreads.append(t)
return orig_elevated(*args, **kwargs)
# mock something from early on in the delete operation and within the
# lock so that when we do the create we expect it to block.
self.stubs.Set(self.context, 'elevated', mock_elevated)
# locked
self.volume.delete_volume(self.context, src_vol_id)
# we expect the volume create to fail with the following err since the
# source volume was deleted while the create was locked. Note that the
# volume is still in the db since it was created by the test prior to
# calling manager.create_volume.
with mock.patch('sys.stderr', new=six.StringIO()):
self.assertRaises(exception.VolumeNotFound, gthreads[0].wait)
def _raise_metadata_copy_failure(self, method, dst_vol_id, **kwargs):
# MetadataCopyFailure exception will be raised if DB service is Down
# while copying the volume glance metadata
with mock.patch.object(db, method) as mock_db:
mock_db.side_effect = exception.MetadataCopyFailure(
reason="Because of DB service down.")
self.assertRaises(exception.MetadataCopyFailure,
self.volume.create_volume,
self.context,
dst_vol_id,
**kwargs)
# ensure that status of volume is 'error'
vol = db.volume_get(self.context, dst_vol_id)
self.assertEqual('error', vol['status'])
# cleanup resource
db.volume_destroy(self.context, dst_vol_id)
@mock.patch('cinder.utils.execute')
def test_create_volume_from_volume_with_glance_volume_metadata_none(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from source volume
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
self.volume.create_volume(self.context,
dst_vol['id'])
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_from_volume_to_volume,
self.context, src_vol_id, dst_vol['id'])
# ensure that status of volume is 'available'
vol = db.volume_get(self.context, dst_vol['id'])
self.assertEqual('available', vol['status'])
# cleanup resource
db.volume_destroy(self.context, src_vol_id)
db.volume_destroy(self.context, dst_vol['id'])
@mock.patch('cinder.utils.execute')
def test_create_volume_from_volume_raise_metadata_copy_failure(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from source volume
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
self._raise_metadata_copy_failure(
'volume_glance_metadata_copy_from_volume_to_volume',
dst_vol['id'])
# cleanup resource
db.volume_destroy(self.context, src_vol_id)
@mock.patch('cinder.utils.execute')
def test_create_volume_from_snapshot_raise_metadata_copy_failure(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from snapshot
snapshot_id = self._create_snapshot(src_vol['id'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.create_snapshot(self.context, src_vol['id'], snapshot_obj)
# ensure that status of snapshot is 'available'
self.assertEqual('available', snapshot_obj.status)
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self._raise_metadata_copy_failure(
'volume_glance_metadata_copy_to_volume',
dst_vol['id'])
# cleanup resource
snapshot_obj.destroy()
db.volume_destroy(self.context, src_vol_id)
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
@mock.patch('cinder.utils.execute')
def test_create_volume_from_srcreplica_raise_metadata_copy_failure(
self, mock_execute, _create_replica_test):
mock_execute.return_value = None
_create_replica_test.return_value = None
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from source volume
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
self._raise_metadata_copy_failure(
'volume_glance_metadata_copy_from_volume_to_volume',
dst_vol['id'])
# cleanup resource
db.volume_destroy(self.context, src_vol_id)
@mock.patch('cinder.utils.execute')
def test_create_volume_from_snapshot_with_glance_volume_metadata_none(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
volume = db.volume_get(self.context, src_vol_id)
# create snapshot of volume
snapshot_id = self._create_snapshot(volume['id'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.create_snapshot(self.context, volume['id'], snapshot_obj)
# ensure that status of snapshot is 'available'
self.assertEqual('available', snapshot_obj.status)
# create volume from snapshot
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol)
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_to_volume,
self.context, dst_vol['id'], snapshot_id)
# ensure that status of volume is 'available'
vol = db.volume_get(self.context, dst_vol['id'])
self.assertEqual('available', vol['status'])
# cleanup resource
snapshot_obj.destroy()
db.volume_destroy(self.context, src_vol_id)
db.volume_destroy(self.context, dst_vol['id'])
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
def test_create_volume_from_srcreplica_with_glance_volume_metadata_none(
self, _create_replica_test):
"""Test volume can be created from a volume replica."""
_create_replica_test.return_value = None
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
db.volume_update(self.context, volume_src['id'], {'bootable': True})
volume = db.volume_get(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'],
{'source_replicaid': volume['id']})
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_from_volume_to_volume,
self.context, volume_src['id'], volume_dst['id'])
self.assertEqual('available',
db.volume_get(self.context,
volume_dst['id']).status)
self.assertTrue(_create_replica_test.called)
# cleanup resource
db.volume_destroy(self.context, volume_dst['id'])
db.volume_destroy(self.context, volume_src['id'])
def test_create_volume_from_snapshot_delete_lock_taken(self):
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
# create snapshot
snap_id = self._create_snapshot(src_vol_id,
size=src_vol['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
# no lock
self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj)
# create vol from snapshot...
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snap_id,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
orig_elevated = self.context.elevated
gthreads = []
def mock_elevated(*args, **kwargs):
# unset mock so it is only called once
self.stubs.Set(self.context, 'elevated', orig_elevated)
# We expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume, self.context,
volume_id=dst_vol_id,
request_spec={'snapshot_id': snap_id})
gthreads.append(t)
return orig_elevated(*args, **kwargs)
# mock something from early on in the delete operation and within the
# lock so that when we do the create we expect it to block.
self.stubs.Set(self.context, 'elevated', mock_elevated)
# locked
self.volume.delete_snapshot(self.context, snapshot_obj)
# we expect the volume create to fail with the following err since the
# snapshot was deleted while the create was locked. Note that the
# volume is still in the db since it was created by the test prior to
# calling manager.create_volume.
with mock.patch('sys.stderr', new=six.StringIO()):
self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait)
# locked
self.volume.delete_volume(self.context, src_vol_id)
# make sure it is gone
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.context, src_vol_id)
@mock.patch.object(keymgr, 'API', fake_keymgr.fake_api)
def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of an encrypted volume"""
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
volume_src['host'] = 'fake_host'
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume_src,
'name',
'description')
snapshot_ref['status'] = 'available' # status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
snapshot=snapshot_ref)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_ref['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
key_manager = volume_api.key_manager # must use *same* key manager
volume_src_key = key_manager.get_key(self.context,
volume_src['encryption_key_id'])
volume_dst_key = key_manager.get_key(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_create_volume_from_encrypted_volume(self):
"""Test volume can be created from an encrypted volume."""
self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api)
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
volume_src['status'] = 'available' # status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
source_volume=volume_src)
self.assertEqual(volume_dst['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(volume_src['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).source_volid)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
key_manager = volume_api.key_manager # must use *same* key manager
volume_src_key = key_manager.get_key(self.context,
volume_src['encryption_key_id'])
volume_dst_key = key_manager.get_key(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_create_volume_from_snapshot_fail_bad_size(self):
"""Test volume can't be created from snapshot with bad volume size."""
volume_api = cinder.volume.api.API()
snapshot = {'id': 1234,
'status': 'available',
'volume_size': 10}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot_obj)
def test_create_volume_from_snapshot_fail_wrong_az(self):
"""Test volume can't be created from snapshot in a different az."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones(enable_cache=False):
return ({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True})
self.stubs.Set(volume_api,
'list_availability_zones',
fake_list_availability_zones)
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot = self._create_snapshot(volume_src['id'])
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot)
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot)
self.assertEqual('az2', volume_dst['availability_zone'])
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot,
availability_zone='nova')
def test_create_volume_with_invalid_exclusive_options(self):
"""Test volume create with multiple exclusive options fails."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
1,
'name',
'description',
snapshot='fake_id',
image_id='fake_id',
source_volume='fake_id')
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_fetchqos(self,
_mock_volume_update,
_mock_volume_get,
_mock_volume_admin_metadata_get,
mock_get_target):
"""Make sure initialize_connection returns correct information."""
_fake_admin_meta = {'fake-key': 'fake-value'}
_fake_volume = {'volume_type_id': 'fake_type_id',
'name': 'fake_name',
'host': 'fake_host',
'id': 'fake_volume_id',
'volume_admin_metadata': _fake_admin_meta}
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = _fake_admin_meta
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2'}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.unit.fake_driver.FakeISCSIDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'key1': 'value1',
'key2': 'value2'}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(self.context,
'fake_volume_id',
connector)
self.assertDictMatch(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(self.context,
'fake_volume_id',
connector)
self.assertDictMatch(qos_specs_expected,
conn_info['data']['qos_specs'])
# initialize_connection() skips qos_specs that is designated to be
# consumed by back-end only
qos_values.update({'consumer': 'back-end'})
type_qos.return_value = dict(qos_specs=qos_values)
conn_info = self.volume.initialize_connection(self.context,
'fake_volume_id',
connector)
self.assertIsNone(conn_info['data']['qos_specs'])
@mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_export_failure(self,
_mock_volume_update,
_mock_volume_get,
_mock_create_export):
"""Test exception path for create_export failure."""
_fake_admin_meta = {'fake-key': 'fake-value'}
_fake_volume = {'volume_type_id': 'fake_type_id',
'name': 'fake_name',
'host': 'fake_host',
'id': 'fake_volume_id',
'volume_admin_metadata': _fake_admin_meta}
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_create_export.side_effect = exception.CinderException
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.initialize_connection,
self.context,
'fake_volume_id',
connector)
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db, 'volume_update')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(fake_driver.FakeISCSIDriver, 'initialize_connection')
@mock.patch.object(db, 'driver_initiator_data_get')
@mock.patch.object(db, 'driver_initiator_data_update')
def test_initialize_connection_initiator_data(self, mock_data_update,
mock_data_get,
mock_driver_init,
mock_volume_get,
mock_volume_update,
mock_metadata_get,
mock_get_target):
fake_admin_meta = {'fake-key': 'fake-value'}
fake_volume = {'volume_type_id': None,
'name': 'fake_name',
'host': 'fake_host',
'id': 'fake_volume_id',
'volume_admin_metadata': fake_admin_meta,
'encryption_key_id': ('d371e7bb-7392-4c27-'
'ac0b-ebd9f5d16078')}
mock_volume_get.return_value = fake_volume
mock_volume_update.return_value = fake_volume
mock_get_target.return_value = None
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
mock_driver_init.return_value = {
'driver_volume_type': 'iscsi',
'data': {'access_mode': 'rw',
'encrypted': False}
}
mock_data_get.return_value = []
conn_info = self.volume.initialize_connection(self.context, 'id',
connector)
# Asserts that if the driver sets the encrypted flag then the
# VolumeManager doesn't overwrite it regardless of what's in the
# volume for the encryption_key_id field.
self.assertFalse(conn_info['data']['encrypted'])
mock_driver_init.assert_called_with(fake_volume, connector)
data = [{'key': 'key1', 'value': 'value1'}]
mock_data_get.return_value = data
self.volume.initialize_connection(self.context, 'id', connector)
mock_driver_init.assert_called_with(fake_volume, connector, data)
update = {
'set_values': {
'foo': 'bar'
},
'remove_values': [
'foo',
'foo2'
]
}
mock_driver_init.return_value['initiator_update'] = update
self.volume.initialize_connection(self.context, 'id', connector)
mock_driver_init.assert_called_with(fake_volume, connector, data)
mock_data_update.assert_called_with(self.context, 'INITIATOR',
'FakeISCSIDriver', update)
connector['initiator'] = None
mock_data_update.reset_mock()
mock_data_get.reset_mock()
mock_driver_init.return_value['data'].pop('encrypted')
conn_info = self.volume.initialize_connection(self.context, 'id',
connector)
# Asserts that VolumeManager sets the encrypted flag if the driver
# doesn't set it.
self.assertTrue(conn_info['data']['encrypted'])
mock_driver_init.assert_called_with(fake_volume, connector)
self.assertFalse(mock_data_get.called)
self.assertFalse(mock_data_update.called)
def test_run_attach_detach_volume_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual("in-use", vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_detach_invalid_attachment_id(self):
"""Make sure if the attachment id isn't found we raise."""
attachment_id = "notfoundid"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
instance_uuid = '12345678-1234-5678-1234-567812345678'
attached_host = 'fake_host'
mountpoint = '/dev/fake'
tests_utils.attach_volume(self.context, volume['id'],
instance_uuid, attached_host,
mountpoint)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('in-use', volume['status'])
def test_detach_no_attachments(self):
self.volume_params['status'] = 'detaching'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'])
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
def test_run_attach_detach_volume_for_instance_no_attachment_id(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
instance_uuid_2 = '12345678-4321-8765-4321-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid_2, None,
mountpoint, 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.assertRaises(exception.InvalidVolume,
self.volume.detach_volume,
self.context, volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance2_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance2_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
self.assertNotEqual(attachment, attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345699'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertIsNone(attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
def test_attach_detach_not_multiattach_volume_for_instances(self):
"""Make sure volume can't be attached to more than one instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
instance2_uuid,
None,
mountpoint2, 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host2', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host2', attachment2['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("in-use", vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertIsNone(attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
def test_run_attach_detach_not_multiattach_volume_for_hosts(self):
"""Make sure volume can't be attached to more than one host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host2',
mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available', })
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id,
attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('error_attaching', vol['status'])
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('error_attaching', vol['status'])
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
def test_detach_volume_while_uploading_to_image_is_in_progress(self):
# If instance is booted from volume with 'Terminate on Delete' flag
# set, and when we delete instance then it tries to delete volume
# even it is in 'uploading' state.
# It is happening because detach call is setting volume status to
# 'available'.
mountpoint = "/dev/sdf"
# Attach volume to the instance
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
# Change volume status to 'uploading'
db.volume_update(self.context, volume_id, {'status': 'uploading'})
# Call detach api
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
# Check that volume status is 'uploading'
self.assertEqual("uploading", vol['status'])
self.assertEqual("detached", vol['attach_status'])
@mock.patch.object(cinder.volume.api.API, 'update')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_reserve_volume_success(self, volume_get, volume_update):
fake_volume = {
'id': self.FAKE_UUID,
'status': 'available'
}
volume_get.return_value = fake_volume
volume_update.return_value = fake_volume
self.assertIsNone(cinder.volume.api.API().reserve_volume(
self.context,
fake_volume,
))
self.assertTrue(volume_get.called)
self.assertTrue(volume_update.called)
def test_reserve_volume_in_attaching(self):
self._test_reserve_volume_bad_status('attaching')
def test_reserve_volume_in_maintenance(self):
self._test_reserve_volume_bad_status('maintenance')
def _test_reserve_volume_bad_status(self, status):
fake_volume = {
'id': self.FAKE_UUID,
'status': status
}
with mock.patch.object(db.sqlalchemy.api, 'volume_get') as mock_get:
mock_get.return_value = fake_volume
self.assertRaises(exception.InvalidVolume,
cinder.volume.api.API().reserve_volume,
self.context,
fake_volume)
self.assertTrue(mock_get.called)
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_attachment_get_used_by_volume_id')
@mock.patch.object(cinder.volume.api.API, 'update')
def test_unreserve_volume_success(self, volume_get,
volume_attachment_get_used_by_volume_id,
volume_update):
fake_volume = {
'id': self.FAKE_UUID,
'status': 'attaching'
}
fake_attachments = [{'volume_id': self.FAKE_UUID,
'instance_uuid': 'fake_instance_uuid'}]
volume_get.return_value = fake_volume
volume_attachment_get_used_by_volume_id.return_value = fake_attachments
volume_update.return_value = fake_volume
self.assertIsNone(cinder.volume.api.API().unreserve_volume(
self.context,
fake_volume
))
self.assertTrue(volume_get.called)
self.assertTrue(volume_attachment_get_used_by_volume_id.called)
self.assertTrue(volume_update.called)
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assertNotIn(iscsi_target, targets)
targets.append(iscsi_target)
# FIXME(jdg): What is this actually testing?
# We never call the internal _check method?
for _index in range(100):
tests_utils.create_volume(self.context, **self.volume_params)
for volume_id in volume_ids:
self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
@staticmethod
def _create_snapshot(volume_id, size=1, metadata=None):
"""Create a snapshot object."""
metadata = metadata or {}
snap = objects.Snapshot(context.get_admin_context())
snap.volume_size = size
snap.user_id = 'fake'
snap.project_id = 'fake'
snap.volume_id = volume_id
snap.status = "creating"
if metadata is not None:
snap.metadata = metadata
snap.create()
return snap
def test_create_delete_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.create_volume(self.context, volume['id'])
msg = self.notifier.notifications[0]
self.assertEqual('volume.create.start', msg['event_type'])
self.assertEqual('creating', msg['payload']['status'])
self.assertEqual('INFO', msg['priority'])
msg = self.notifier.notifications[1]
self.assertEqual('volume.create.end', msg['event_type'])
self.assertEqual('available', msg['payload']['status'])
self.assertEqual('INFO', msg['priority'])
if len(self.notifier.notifications) > 2:
# Cause an assert to print the unexpected item
# and all of the notifications.
self.assertFalse(self.notifier.notifications[2],
self.notifier.notifications)
self.assertEqual(2, len(self.notifier.notifications),
self.notifier.notifications)
snapshot = self._create_snapshot(volume['id'], size=volume['size'])
snapshot_id = snapshot.id
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot_id, objects.Snapshot.get_by_id(self.context,
snapshot_id).id)
msg = self.notifier.notifications[2]
self.assertEqual('snapshot.create.start', msg['event_type'])
expected = {
'created_at': 'DONTCARE',
'deleted': '',
'display_name': None,
'snapshot_id': snapshot_id,
'status': 'creating',
'tenant_id': 'fake',
'user_id': 'fake',
'volume_id': volume['id'],
'volume_size': 1,
'availability_zone': 'nova',
'metadata': '',
}
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('snapshot.create.end', msg['event_type'])
expected['status'] = 'available'
self.assertDictMatch(expected, msg['payload'])
if len(self.notifier.notifications) > 4:
# Cause an assert to print the unexpected item
# and all of the notifications.
self.assertFalse(self.notifier.notifications[4],
self.notifier.notifications)
self.assertEqual(4, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.delete_snapshot(self.context, snapshot)
msg = self.notifier.notifications[4]
self.assertEqual('snapshot.delete.start', msg['event_type'])
expected['status'] = 'available'
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[5]
self.assertEqual('snapshot.delete.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
if len(self.notifier.notifications) > 6:
# Cause an assert to print the unexpected item
# and all of the notifications.
self.assertFalse(self.notifier.notifications[6],
self.notifier.notifications)
self.assertEqual(6, len(self.notifier.notifications),
self.notifier.notifications)
snap = objects.Snapshot.get_by_id(context.get_admin_context(
read_deleted='yes'), snapshot_id)
self.assertEqual('deleted', snap.status)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_delete_snapshot_with_metadata(self):
"""Test snapshot can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = self._create_snapshot(volume['id'], size=volume['size'],
metadata=test_meta)
snapshot_id = snapshot.id
result_dict = snapshot.metadata
self.assertEqual(test_meta, result_dict)
self.volume.delete_snapshot(self.context, snapshot)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
@mock.patch.object(db, 'snapshot_create',
side_effect=exception.InvalidSnapshot(
'Create snapshot in db failed!'))
def test_create_snapshot_failed_db_snapshot(self, mock_snapshot):
"""Test exception handling when create snapshot in db failed."""
test_volume = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, test_volume['id'])
test_volume['status'] = 'available'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidSnapshot,
volume_api.create_snapshot,
self.context,
test_volume,
'fake_name',
'fake_description')
def test_create_snapshot_failed_maintenance(self):
"""Test exception handling when create snapshot in maintenance."""
test_volume = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, test_volume['id'])
test_volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context,
test_volume,
'fake_name',
'fake_description')
@mock.patch.object(QUOTAS, 'commit',
side_effect=exception.QuotaError(
'Snapshot quota commit failed!'))
def test_create_snapshot_failed_quota_commit(self, mock_snapshot):
"""Test exception handling when snapshot quota commit failed."""
test_volume = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, test_volume['id'],
request_spec={})
test_volume['status'] = 'available'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.QuotaError,
volume_api.create_snapshot,
self.context,
test_volume,
'fake_name',
'fake_description')
def test_cannot_delete_volume_in_use(self):
"""Test volume can't be deleted in in-use status."""
self._test_cannot_delete_volume('in-use')
def test_cannot_delete_volume_maintenance(self):
"""Test volume can't be deleted in maintenance status."""
self._test_cannot_delete_volume('maintenance')
def _test_cannot_delete_volume(self, status):
"""Test volume can't be deleted in invalid stats."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = status
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# 'in-use' status raises InvalidVolume
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_force_delete_volume(self):
"""Test volume can be forced to delete."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'error_deleting'
volume_api = cinder.volume.api.API()
# 'error_deleting' volumes can't be deleted
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
# delete with force
volume_api.delete(self.context, volume, force=True)
# status is deleting
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('deleting', volume.status)
# clean up
self.volume.delete_volume(self.context, volume.id)
def test_cannot_force_delete_attached_volume(self):
"""Test volume can't be force delete in attached state."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume['attach_status'] = 'attached'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.VolumeAttached,
volume_api.delete,
self.context,
volume,
force=True)
self.volume.delete_volume(self.context, volume['id'])
def test_cannot_delete_volume_with_snapshots(self):
"""Test volume can't be deleted with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot = self._create_snapshot(volume['id'], size=volume['size'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot.id, objects.Snapshot.get_by_id(self.context,
snapshot.id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
self.volume.delete_snapshot(self.context, snapshot)
self.volume.delete_volume(self.context, volume['id'])
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot = self._create_snapshot(volume['id'], size=volume['size'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
volume_api = cinder.volume.api.API()
snapshot.status = 'badstatus'
self.assertRaises(exception.InvalidSnapshot,
volume_api.delete_snapshot,
self.context,
snapshot)
snapshot.status = 'error'
self.volume.delete_snapshot(self.context, snapshot)
self.volume.delete_volume(self.context, volume['id'])
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
instance_uuid = '12345678-1234-5678-1234-567812345678'
# create volume and attach to the instance
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
values = {'volume_id': volume['id'],
'instance_uuid': instance_uuid,
'attach_status': 'attaching', }
attachment = db.volume_attach(self.context, values)
db.volume_attached(self.context, attachment['id'], instance_uuid,
None, '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
snapshot_ref.destroy()
db.volume_destroy(self.context, volume['id'])
# create volume and attach to the host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
values = {'volume_id': volume['id'],
'attached_host': 'fake_host',
'attach_status': 'attaching', }
attachment = db.volume_attach(self.context, values)
db.volume_attached(self.context, attachment['id'], None,
'fake_host', '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
snapshot_ref.destroy()
db.volume_destroy(self.context, volume['id'])
def test_create_snapshot_from_bootable_volume(self):
"""Test create snapshot from bootable volume."""
# create bootable volume from image
volume = self._create_volume_from_image()
volume_id = volume['id']
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
# get volume's volume_glance_metadata
ctxt = context.get_admin_context()
vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id)
self.assertTrue(vol_glance_meta)
# create snapshot from bootable volume
snap = self._create_snapshot(volume_id)
self.volume.create_snapshot(ctxt, volume_id, snap)
# get snapshot's volume_glance_metadata
snap_glance_meta = db.volume_snapshot_glance_metadata_get(
ctxt, snap.id)
self.assertTrue(snap_glance_meta)
# ensure that volume's glance metadata is copied
# to snapshot's glance metadata
self.assertEqual(len(vol_glance_meta), len(snap_glance_meta))
vol_glance_dict = {x.key: x.value for x in vol_glance_meta}
snap_glance_dict = {x.key: x.value for x in snap_glance_meta}
self.assertDictMatch(vol_glance_dict, snap_glance_dict)
# ensure that snapshot's status is changed to 'available'
self.assertEqual('available', snap.status)
# cleanup resource
snap.destroy()
db.volume_destroy(ctxt, volume_id)
def test_create_snapshot_from_bootable_volume_fail(self):
"""Test create snapshot from bootable volume.
But it fails to volume_glance_metadata_copy_to_snapshot.
As a result, status of snapshot is changed to ERROR.
"""
# create bootable volume from image
volume = self._create_volume_from_image()
volume_id = volume['id']
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
# get volume's volume_glance_metadata
ctxt = context.get_admin_context()
vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id)
self.assertTrue(vol_glance_meta)
snap = self._create_snapshot(volume_id)
snap_stat = snap.status
self.assertTrue(snap.id)
self.assertTrue(snap_stat)
# set to return DB exception
with mock.patch.object(db, 'volume_glance_metadata_copy_to_snapshot')\
as mock_db:
mock_db.side_effect = exception.MetadataCopyFailure(
reason="Because of DB service down.")
# create snapshot from bootable volume
self.assertRaises(exception.MetadataCopyFailure,
self.volume.create_snapshot,
ctxt,
volume_id,
snap)
# get snapshot's volume_glance_metadata
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_snapshot_glance_metadata_get,
ctxt, snap.id)
# ensure that status of snapshot is 'error'
self.assertEqual('error', snap.status)
# cleanup resource
snap.destroy()
db.volume_destroy(ctxt, volume_id)
def test_create_snapshot_from_bootable_volume_with_volume_metadata_none(
self):
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
# set bootable flag of volume to True
db.volume_update(self.context, volume_id, {'bootable': True})
snapshot = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_snapshot_glance_metadata_get,
self.context, snapshot.id)
# ensure that status of snapshot is 'available'
self.assertEqual('available', snapshot.status)
# cleanup resource
snapshot.destroy()
db.volume_destroy(self.context, volume_id)
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot = self._create_snapshot(volume_id, size=volume['size'])
self.volume.create_snapshot(self.context, volume_id, snapshot)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(
mox.IgnoreArg()).AndRaise(
exception.SnapshotIsBusy(snapshot_name='fake'))
self.mox.ReplayAll()
snapshot_id = snapshot.id
self.volume.delete_snapshot(self.context, snapshot)
snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
@test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
def test_delete_no_dev_fails(self):
"""Test delete snapshot with no dev file fails."""
self.stubs.Set(os.path, 'exists', lambda x: False)
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot = self._create_snapshot(volume_id)
snapshot_id = snapshot.id
self.volume.create_snapshot(self.context, volume_id, snapshot)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(
mox.IgnoreArg()).AndRaise(
exception.SnapshotIsBusy(snapshot_name='fake'))
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot)
snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.delete_snapshot,
self.context,
snapshot)
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.delete_volume,
self.context,
volume_id)
@mock.patch('cinder.image.image_utils.TemporaryImages.fetch')
@mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask._clone_image_volume')
def _create_volume_from_image(self, mock_clone_image_volume,
mock_fetch_img,
fakeout_copy_image_to_volume=False,
fakeout_clone_image=False,
clone_image_volume=False):
"""Test function of create_volume_from_image.
Test cases call this function to create a volume from image, caller
can choose whether to fake out copy_image_to_volume and clone_image,
after calling this, test cases should check status of the volume.
"""
def fake_local_path(volume):
return dst_path
def fake_copy_image_to_volume(context, volume,
image_service, image_id):
pass
def fake_fetch_to_raw(ctx, image_service, image_id, path, blocksize,
size=None, throttle=None):
pass
def fake_clone_image(ctx, volume_ref,
image_location, image_meta,
image_service):
return {'provider_location': None}, True
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', fake_local_path)
if fakeout_clone_image:
self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image)
self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw)
if fakeout_copy_image_to_volume:
self.stubs.Set(self.volume.driver, 'copy_image_to_volume',
fake_copy_image_to_volume)
mock_clone_image_volume.return_value = ({}, clone_image_volume)
mock_fetch_img.return_value = mock.MagicMock(
spec=tests_utils.get_file_spec())
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume = tests_utils.create_volume(self.context, **self.volume_params)
# creating volume testdata
try:
request_spec = {
'volume_properties': self.volume_params,
'image_id': image_id,
}
self.volume.create_volume(self.context,
volume.id,
request_spec,
volume=volume)
finally:
# cleanup
os.unlink(dst_path)
volume = objects.Volume.get_by_id(self.context, volume.id)
return volume
def test_create_volume_from_image_cloned_status_available(self):
"""Test create volume from image via cloning.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
volume = self._create_volume_from_image()
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_not_cloned_status_available(self):
"""Test create volume from image via full copy.
Verify that after copying image to volume, it is in available
state and is bootable.
"""
volume = self._create_volume_from_image(fakeout_clone_image=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_exception(self):
"""Test create volume from a non-existing image.
Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable.
"""
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)
# creating volume testdata
kwargs = {'display_description': 'Test Desc',
'size': 20,
'availability_zone': 'fake_availability_zone',
'status': 'creating',
'attach_status': 'detached',
'host': 'dummy'}
volume = objects.Volume(context=self.context, **kwargs)
volume.create()
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
self.context,
volume.id,
{'image_id': self.FAKE_UUID})
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error", volume['status'])
self.assertFalse(volume['bootable'])
# cleanup
volume.destroy()
os.unlink(dst_path)
def test_create_volume_from_image_copy_exception_rescheduling(self):
"""Test create volume with ImageCopyFailure
This exception should not trigger rescheduling and allocated_capacity
should be incremented so we're having assert for that here.
"""
def fake_copy_image_to_volume(context, volume, image_service,
image_id):
raise exception.ImageCopyFailure()
self.stubs.Set(self.volume.driver, 'copy_image_to_volume',
fake_copy_image_to_volume)
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image)
# NOTE(dulek): Rescheduling should not occur, so lets assert that
# allocated_capacity is incremented.
self.assertDictEqual(self.volume.stats['pools'],
{'_pool0': {'allocated_capacity_gb': 1}})
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
def test_create_volume_from_image_unavailable(self, mock_detach,
mock_secure, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception inside _connect_device after volume has
already been attached to confirm that it detaches the volume.
"""
mock_secure.side_effect = NameError
# We want to test BaseVD copy_image_to_volume and since FakeISCSIDriver
# inherits from LVM it overwrites it, so we'll mock it to use the
# BaseVD implementation.
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
def test_create_volume_from_image_clone_image_volume(self):
"""Test create volume from image via image volume.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
volume = self._create_volume_from_image(clone_image_volume=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_exact_sized_image(self):
"""Test create volume from an image of the same size.
Verify that an image which is exactly the same size as the
volume, will work correctly.
"""
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=1)
volume_id = volume['id']
self.assertEqual('creating', volume['status'])
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_volume_from_oversized_image(self):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi + 1,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_mindisk_error(self):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_deleted_imaged(self):
"""Verify create volume from image will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'deleted'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
@mock.patch.object(QUOTAS, "rollback")
@mock.patch.object(QUOTAS, "commit")
@mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"])
def _do_test_create_volume_with_size(self, size, *_unused_quota_mocks):
volume_api = cinder.volume.api.API()
volume = volume_api.create(self.context,
size,
'name',
'description')
self.assertEqual(int(size), volume['size'])
def test_create_volume_int_size(self):
"""Test volume creation with int size."""
self._do_test_create_volume_with_size(2)
def test_create_volume_string_size(self):
"""Test volume creation with string size."""
self._do_test_create_volume_with_size('2')
@mock.patch.object(QUOTAS, "rollback")
@mock.patch.object(QUOTAS, "commit")
@mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"])
def test_create_volume_with_bad_size(self, *_unused_quota_mocks):
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'2Gb',
'name',
'description')
def test_create_volume_with_float_fails(self):
"""Test volume creation with invalid float size."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'1.5',
'name',
'description')
def test_create_volume_with_zero_size_fails(self):
"""Test volume creation with string size."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'0',
'name',
'description')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_begin_detaching_fails_available(self, volume_get):
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_get.return_value = volume
# Volume status is 'available'.
self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
self.context, volume)
volume_get.assert_called_once_with(self.context, volume['id'])
volume_get.reset_mock()
volume['status'] = "in-use"
volume['attach_status'] = "detached"
# Should raise an error since not attached
self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
self.context, volume)
volume_get.assert_called_once_with(self.context, volume['id'])
volume_get.reset_mock()
volume['attach_status'] = "attached"
# Ensure when attached no exception raised
volume_api.begin_detaching(self.context, volume)
volume_get.assert_called_once_with(self.context, volume['id'])
volume_get.reset_mock()
volume['status'] = "maintenance"
self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
self.context, volume)
volume_get.assert_called_once_with(self.context, volume['id'])
def test_begin_roll_detaching_volume(self):
"""Test begin_detaching and roll_detaching functions."""
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context, **self.volume_params)
attachment = db.volume_attach(self.context,
{'volume_id': volume['id'],
'attached_host': 'fake-host'})
volume = db.volume_attached(
self.context, attachment['id'], instance_uuid, 'fake-host', 'vdb')
volume_api = cinder.volume.api.API()
volume_api.begin_detaching(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual("detaching", volume['status'])
volume_api.roll_detaching(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual("in-use", volume['status'])
def test_volume_api_update(self):
# create a raw vol
volume = tests_utils.create_volume(self.context, **self.volume_params)
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update(self.context, volume, update_dict)
# read changes from db
vol = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('test update name', vol['display_name'])
def test_volume_api_update_maintenance(self):
# create a raw vol
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume['status'] = 'maintenance'
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
self.assertRaises(exception.InvalidVolume, volume_api.update,
self.context, volume, update_dict)
def test_volume_api_update_snapshot(self):
# create raw snapshot
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = self._create_snapshot(volume['id'])
snapshot_id = snapshot.id
self.assertIsNone(snapshot.display_name)
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update_snapshot(self.context, snapshot, update_dict)
# read changes from db
snap = objects.Snapshot.get_by_id(context.get_admin_context(),
snapshot_id)
self.assertEqual('test update name', snap.display_name)
def test_volume_api_get_list_volumes_image_metadata(self):
"""Test get_list_volumes_image_metadata in volume API."""
ctxt = context.get_admin_context()
db.volume_create(ctxt, {'id': 'fake1', 'status': 'available',
'host': 'test', 'provider_location': '',
'size': 1})
db.volume_glance_metadata_create(ctxt, 'fake1', 'key1', 'value1')
db.volume_glance_metadata_create(ctxt, 'fake1', 'key2', 'value2')
db.volume_create(ctxt, {'id': 'fake2', 'status': 'available',
'host': 'test', 'provider_location': '',
'size': 1})
db.volume_glance_metadata_create(ctxt, 'fake2', 'key3', 'value3')
db.volume_glance_metadata_create(ctxt, 'fake2', 'key4', 'value4')
volume_api = cinder.volume.api.API()
results = volume_api.get_list_volumes_image_metadata(ctxt, ['fake1',
'fake2'])
expect_results = {'fake1': {'key1': 'value1', 'key2': 'value2'},
'fake2': {'key3': 'value3', 'key4': 'value4'}}
self.assertEqual(expect_results, results)
@mock.patch.object(QUOTAS, 'reserve')
def test_extend_volume(self, reserve):
"""Test volume can be extended at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, size=2,
status='creating', host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume_api = cinder.volume.api.API()
# Extend fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.extend,
self.context,
volume,
3)
volume['status'] = 'available'
# Extend fails when new_size < orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
1)
# Extend fails when new_size == orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
2)
# works when new_size > orig_size
reserve.return_value = ["RESERVATION"]
volume_api.extend(self.context, volume, 3)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('extending', volume['status'])
reserve.assert_called_once_with(self.context, gigabytes=1,
project_id=volume['project_id'])
# Test the quota exceeded
volume['status'] = 'available'
reserve.side_effect = exception.OverQuota(overs=['gigabytes'],
quotas={'gigabytes': 20},
usages={'gigabytes':
{'reserved': 5,
'in_use': 15}})
self.assertRaises(exception.VolumeSizeExceedsAvailableQuota,
volume_api.extend, self.context,
volume, 3)
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_driver_not_initialized(self):
"""Test volume can be extended at API level."""
# create a volume and assign to host
fake_reservations = ['RESERVATION']
volume = tests_utils.create_volume(self.context, size=2,
status='available',
host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.extend_volume,
self.context, volume['id'], 3,
fake_reservations)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('error_extending', volume.status)
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_manager(self):
"""Test volume can be extended at the manager level."""
def fake_extend(volume, new_size):
volume['size'] = new_size
fake_reservations = ['RESERVATION']
volume = tests_utils.create_volume(self.context, size=2,
status='creating', host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
# Test driver exception
with mock.patch.object(self.volume.driver,
'extend_volume') as extend_volume:
extend_volume.side_effect =\
exception.CinderException('fake exception')
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4',
fake_reservations)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(2, volume['size'])
self.assertEqual('error_extending', volume['status'])
# Test driver success
with mock.patch.object(self.volume.driver,
'extend_volume') as extend_volume:
with mock.patch.object(QUOTAS, 'commit') as quotas_commit:
extend_volume.return_value = fake_extend
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4',
fake_reservations)
volume = db.volume_get(context.get_admin_context(),
volume['id'])
self.assertEqual(4, volume['size'])
self.assertEqual('available', volume['status'])
quotas_commit.assert_called_with(
self.context,
['RESERVATION'],
project_id=volume['project_id'])
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_with_volume_type(self):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'type', 'extra_specs': {}})
vol_type = db.volume_type_get_by_name(elevated, 'type')
volume_api = cinder.volume.api.API()
volume = volume_api.create(self.context, 100, 'name', 'description',
volume_type=vol_type)
try:
usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type')
volumes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
volumes_in_use = 0
self.assertEqual(100, volumes_in_use)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume['volume_type_id'] = vol_type.get('id')
volume_api.extend(self.context, volume, 200)
try:
usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type')
volumes_reserved = usage.reserved
except exception.QuotaUsageNotFound:
volumes_reserved = 0
self.assertEqual(100, volumes_reserved)
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
def test_create_volume_from_sourcereplica(self, _create_replica_test):
"""Test volume can be created from a volume replica."""
_create_replica_test.return_value = None
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'],
{'source_replicaid': volume_src['id']})
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
self.assertTrue(_create_replica_test.called)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
def test_create_volume_from_sourcevol(self):
"""Test volume can be created from a source volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'])
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
@mock.patch('cinder.volume.api.API.list_availability_zones',
return_value=({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True}))
def test_create_volume_from_sourcevol_fail_wrong_az(self, _mock_laz):
"""Test volume can't be cloned from an other volume in different az."""
volume_api = cinder.volume.api.API()
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_src = db.volume_get(self.context, volume_src['id'])
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src)
self.assertEqual('az2', volume_dst['availability_zone'])
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src,
availability_zone='nova')
def test_create_volume_from_sourcevol_with_glance_metadata(self):
"""Test glance metadata can be correctly copied to new volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
volume_src = self._create_volume_from_image()
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'])
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
src_glancemeta = db.volume_get(context.get_admin_context(),
volume_src['id']).volume_glance_metadata
dst_glancemeta = db.volume_get(context.get_admin_context(),
volume_dst['id']).volume_glance_metadata
for meta_src in src_glancemeta:
for meta_dst in dst_glancemeta:
if meta_dst.key == meta_src.key:
self.assertEqual(meta_src.value, meta_dst.value)
self.volume.delete_volume(self.context, volume_src['id'])
self.volume.delete_volume(self.context, volume_dst['id'])
def test_create_volume_from_sourcevol_failed_clone(self):
"""Test src vol status will be restore by error handling code."""
def fake_error_create_cloned_volume(volume, src_vref):
db.volume_update(self.context, src_vref['id'], {'status': 'error'})
raise exception.CinderException('fake exception')
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_error_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_dst['id'])
self.assertEqual('creating', volume_src['status'])
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
def test_clean_temporary_volume(self):
def fake_delete_volume(ctxt, volume):
volume.destroy()
fake_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host,
migration_status='migrating')
fake_new_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
# 1. Only clean the db
self.volume._clean_temporary_volume(self.context, fake_volume,
fake_new_volume,
clean_db_only=True)
self.assertRaises(exception.VolumeNotFound,
db.volume_get, self.context,
fake_new_volume.id)
# 2. Delete the backend storage
fake_new_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
with mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \
mock_delete_volume:
mock_delete_volume.side_effect = fake_delete_volume
self.volume._clean_temporary_volume(self.context,
fake_volume,
fake_new_volume,
clean_db_only=False)
self.assertRaises(exception.VolumeNotFound,
db.volume_get, self.context,
fake_new_volume.id)
# Check when the migrated volume is not in migration
fake_new_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
fake_volume.migration_status = 'non-migrating'
fake_volume.save()
self.volume._clean_temporary_volume(self.context, fake_volume,
fake_new_volume)
volume = db.volume_get(context.get_admin_context(),
fake_new_volume.id)
self.assertIsNone(volume.migration_status)
def test_update_volume_readonly_flag(self):
"""Test volume readonly flag can be updated at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
def sort_func(obj):
return obj['name']
volume_api = cinder.volume.api.API()
# Update fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.update_readonly_flag,
self.context,
volume,
False)
volume['status'] = 'available'
# works when volume in 'available' status
volume_api.update_readonly_flag(self.context, volume, False)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('available', volume['status'])
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('False', admin_metadata[0]['value'])
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_secure_file_operations_enabled(self):
"""Test secure file operations setting for base driver.
General, non network file system based drivers do not have
anything to do with "secure_file_operations". This test verifies that
calling the method always returns False.
"""
ret_flag = self.volume.driver.secure_file_operations_enabled()
self.assertFalse(ret_flag)
@mock.patch('cinder.volume.flows.common.make_pretty_name',
new=mock.MagicMock())
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_volume',
return_value=None)
@mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask.execute',
side_effect=exception.DriverNotInitialized())
def test_create_volume_raise_rescheduled_exception(self, mock_execute,
mock_reschedule):
# Create source volume
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
test_vol_id = test_vol['id']
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, test_vol_id,
{'volume_properties': self.volume_params},
{'retry': {'num_attempts': 1, 'host': []}})
self.assertTrue(mock_reschedule.called)
volume = db.volume_get(context.get_admin_context(), test_vol_id)
self.assertEqual('creating', volume['status'])
@mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask.execute')
def test_create_volume_raise_unrescheduled_exception(self, mock_execute):
# create source volume
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
test_vol_id = test_vol['id']
mock_execute.side_effect = exception.VolumeNotFound(
volume_id=test_vol_id)
self.assertRaises(exception.VolumeNotFound,
self.volume.create_volume,
self.context, test_vol_id,
{'volume_properties': self.volume_params},
{'retry': {'num_attempts': 1, 'host': []}})
volume = db.volume_get(context.get_admin_context(), test_vol_id)
self.assertEqual('error', volume['status'])
class VolumeMigrationTestCase(VolumeTestCase):
def test_migrate_volume_driver(self):
"""Test volume migration done by driver."""
# stub out driver and rpc functions
self.stubs.Set(self.volume.driver, 'migrate_volume',
lambda x, y, z, new_type_id=None: (True,
{'user_id': 'foo'}))
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
migration_status='migrating')
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume.id, host_obj, False,
volume=volume)
# check volume properties
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
def _fake_create_volume(self, ctxt, volume, host, req_spec, filters,
allow_reschedule=True):
return db.volume_update(ctxt, volume['id'],
{'status': self.expected_status})
def test_migrate_volume_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume') as \
mock_migrate,\
mock.patch.object(self.volume.driver, 'create_export') as \
mock_create_export:
# Exception case at self.driver.migrate_volume and create_export
mock_migrate.side_effect = processutils.ProcessExecutionError
mock_create_export.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
False,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
self.assertEqual('available', volume.status)
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic(self, volume_get,
migrate_volume_completion,
nova_api):
fake_volume_id = 'fake_volume_id'
fake_db_new_volume = {'status': 'available', 'id': fake_volume_id}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
new_volume_obj = fake_volume.fake_volume_obj(self.context,
**fake_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
volume_get.return_value = fake_new_volume
update_server_volume = nova_api.return_value.update_server_volume
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
with mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume:
self.volume._migrate_volume_generic(self.context, volume,
host_obj, None)
mock_copy_volume.assert_called_with(self.context, volume,
new_volume_obj,
remote='dest')
migrate_volume_completion.assert_called_with(
self.context, volume.id, new_volume_obj.id, error=False)
self.assertFalse(update_server_volume.called)
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic_attached_volume(self, volume_get,
migrate_volume_completion,
nova_api):
attached_host = 'some-host'
fake_volume_id = 'fake_volume_id'
fake_db_new_volume = {'status': 'available', 'id': fake_volume_id}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
fake_uuid = fakes.get_fake_uuid()
update_server_volume = nova_api.return_value.update_server_volume
volume_get.return_value = fake_new_volume
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
volume_attach = tests_utils.attach_volume(
self.context, volume['id'], fake_uuid, attached_host, '/dev/vda')
self.assertIsNotNone(volume_attach['volume_attachment'][0]['id'])
self.assertEqual(
fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid'])
self.assertEqual('in-use', volume_attach['status'])
self.volume._migrate_volume_generic(self.context, volume,
host_obj, None)
self.assertFalse(migrate_volume_completion.called)
update_server_volume.assert_called_with(self.context, fake_uuid,
volume['id'], fake_volume_id)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')
def test_migrate_volume_for_volume_generic(self, create_volume,
rpc_delete_volume,
update_migrated_volume):
fake_volume = tests_utils.create_volume(self.context, size=1,
previous_status='available',
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
with mock.patch.object(self.volume.driver, 'migrate_volume') as \
mock_migrate_volume,\
mock.patch.object(self.volume, '_copy_volume_data'),\
mock.patch.object(self.volume.driver, 'delete_volume') as \
delete_volume:
create_volume.side_effect = self._fake_create_volume
self.volume.migrate_volume(self.context, fake_volume.id,
host_obj, True, volume=fake_volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
fake_volume.id)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
self.assertFalse(mock_migrate_volume.called)
self.assertFalse(delete_volume.called)
self.assertTrue(rpc_delete_volume.called)
self.assertTrue(update_migrated_volume.called)
def test_migrate_volume_generic_copy_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion'),\
mock.patch.object(self.volume.driver, 'create_export'):
# Exception case at migrate_volume_generic
# source_volume['migration_status'] is 'migrating'
mock_create_volume.side_effect = self._fake_create_volume
mock_copy_volume.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
self.assertEqual('available', volume.status)
@mock.patch('cinder.db.volume_update')
def test_update_migrated_volume(self, volume_update):
fake_host = 'fake_host'
fake_new_host = 'fake_new_host'
fake_update = {'_name_id': 'updated_id',
'provider_location': 'updated_location'}
fake_elevated = context.RequestContext('fake', self.project_id,
is_admin=True)
volume = tests_utils.create_volume(self.context, size=1,
status='available',
host=fake_host)
new_volume = tests_utils.create_volume(
self.context, size=1,
status='available',
provider_location='fake_provider_location',
_name_id='fake_name_id',
host=fake_new_host)
new_volume._name_id = 'fake_name_id'
new_volume.provider_location = 'fake_provider_location'
fake_update_error = {'_name_id': new_volume._name_id,
'provider_location':
new_volume.provider_location}
expected_update = {'_name_id': volume._name_id,
'provider_location': volume.provider_location}
with mock.patch.object(self.volume.driver,
'update_migrated_volume') as migrate_update,\
mock.patch.object(self.context, 'elevated') as elevated:
migrate_update.return_value = fake_update
elevated.return_value = fake_elevated
self.volume.update_migrated_volume(self.context, volume,
new_volume, 'available')
volume_update.assert_has_calls((
mock.call(fake_elevated, new_volume.id, expected_update),
mock.call(fake_elevated, volume.id, fake_update)))
# Test the case for update_migrated_volume not implemented
# for the driver.
migrate_update.reset_mock()
volume_update.reset_mock()
# Reset the volume objects to their original value, since they
# were changed in the last call.
new_volume._name_id = 'fake_name_id'
new_volume.provider_location = 'fake_provider_location'
migrate_update.side_effect = NotImplementedError
self.volume.update_migrated_volume(self.context, volume,
new_volume, 'available')
volume_update.assert_has_calls((
mock.call(fake_elevated, new_volume.id, fake_update),
mock.call(fake_elevated, volume.id, fake_update_error)))
def test_migrate_volume_generic_create_volume_error(self):
self.expected_status = 'error'
with mock.patch.object(self.volume.driver, 'migrate_volume'), \
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \
mock_create_volume, \
mock.patch.object(self.volume, '_clean_temporary_volume') as \
clean_temporary_volume:
# Exception case at the creation of the new temporary volume
mock_create_volume.side_effect = self._fake_create_volume
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(exception.VolumeMigrationFailed,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
self.assertTrue(clean_temporary_volume.called)
self.expected_status = 'available'
def test_migrate_volume_generic_timeout_error(self):
CONF.set_override("migration_create_volume_timeout_secs", 2)
with mock.patch.object(self.volume.driver, 'migrate_volume'), \
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \
mock_create_volume, \
mock.patch.object(self.volume, '_clean_temporary_volume') as \
clean_temporary_volume, \
mock.patch.object(time, 'sleep'):
# Exception case at the timeout of the volume creation
self.expected_status = 'creating'
mock_create_volume.side_effect = self._fake_create_volume
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(exception.VolumeMigrationFailed,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
self.assertTrue(clean_temporary_volume.called)
self.expected_status = 'available'
def test_migrate_volume_generic_create_export_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion'),\
mock.patch.object(self.volume.driver, 'create_export') as \
mock_create_export:
# Exception case at create_export
mock_create_volume.side_effect = self._fake_create_volume
mock_copy_volume.side_effect = processutils.ProcessExecutionError
mock_create_export.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
def test_migrate_volume_generic_migrate_volume_completion_error(self):
def fake_migrate_volume_completion(ctxt, volume_id, new_volume_id,
error=False):
db.volume_update(ctxt, volume['id'],
{'migration_status': 'completing'})
raise processutils.ProcessExecutionError
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion')\
as mock_migrate_compl,\
mock.patch.object(self.volume.driver, 'create_export'), \
mock.patch.object(self.volume, '_attach_volume') \
as mock_attach, \
mock.patch.object(self.volume, '_detach_volume'), \
mock.patch.object(os_brick.initiator.connector,
'get_connector_properties') \
as mock_get_connector_properties, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_rpcapi.VolumeAPI,
'get_capabilities') \
as mock_get_capabilities:
# Exception case at delete_volume
# source_volume['migration_status'] is 'completing'
mock_create_volume.side_effect = self._fake_create_volume
mock_migrate_compl.side_effect = fake_migrate_volume_completion
mock_get_connector_properties.return_value = {}
mock_attach.side_effect = [{'device': {'path': 'bar'}},
{'device': {'path': 'foo'}}]
mock_get_capabilities.return_value = {'sparse_copy_volume': True}
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
mock_copy.assert_called_once_with('foo', 'bar', 0, '1M',
sparse=True)
def _test_migrate_volume_completion(self, status='available',
instance_uuid=None, attached_host=None,
retyping=False,
previous_status='available'):
def fake_attach_volume(ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
tests_utils.attach_volume(ctxt, volume.id,
instance_uuid, host_name,
'/dev/vda')
initial_status = retyping and 'retyping' or status
old_volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
status=initial_status,
migration_status='migrating',
previous_status=previous_status)
attachment_id = None
if status == 'in-use':
vol = tests_utils.attach_volume(self.context, old_volume.id,
instance_uuid, attached_host,
'/dev/vda')
self.assertEqual('in-use', vol['status'])
attachment_id = vol['volume_attachment'][0]['id']
target_status = 'target:%s' % old_volume.id
new_host = CONF.host + 'new'
new_volume = tests_utils.create_volume(self.context, size=0,
host=new_host,
migration_status=target_status)
with mock.patch.object(self.volume, 'detach_volume') as \
mock_detach_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \
mock_delete_volume, \
mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') as \
mock_attach_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI,
'update_migrated_volume'),\
mock.patch.object(self.volume.driver, 'attach_volume'):
mock_attach_volume.side_effect = fake_attach_volume
self.volume.migrate_volume_completion(self.context, old_volume.id,
new_volume.id)
after_new_volume = objects.Volume.get_by_id(self.context,
new_volume.id)
after_old_volume = objects.Volume.get_by_id(self.context,
old_volume.id)
if status == 'in-use':
mock_detach_volume.assert_called_with(self.context,
old_volume.id,
attachment_id)
attachment = db.volume_attachment_get_by_instance_uuid(
self.context, old_volume.id, instance_uuid)
self.assertIsNotNone(attachment)
self.assertEqual(attached_host, attachment['attached_host'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
else:
self.assertFalse(mock_detach_volume.called)
self.assertTrue(mock_delete_volume.called)
self.assertEqual(old_volume.host, after_new_volume.host)
self.assertEqual(new_volume.host, after_old_volume.host)
def test_migrate_volume_completion_retype_available(self):
self._test_migrate_volume_completion('available', retyping=True)
def test_migrate_volume_completion_retype_in_use(self):
self._test_migrate_volume_completion(
'in-use',
'83c969d5-065e-4c9c-907d-5394bc2e98e2',
'some-host',
retyping=True,
previous_status='in-use')
def test_migrate_volume_completion_migrate_available(self):
self._test_migrate_volume_completion()
def test_migrate_volume_completion_migrate_in_use(self):
self._test_migrate_volume_completion(
'in-use',
'83c969d5-065e-4c9c-907d-5394bc2e98e2',
'some-host',
retyping=False,
previous_status='in-use')
def test_retype_setup_fail_volume_is_available(self):
"""Verify volume is still available if retype prepare failed."""
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})
old_vol_type = db.volume_type_get_by_name(elevated, 'old')
db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})
new_vol_type = db.volume_type_get_by_name(elevated, 'new')
db.quota_create(elevated, project_id, 'volumes_new', 0)
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host, status='available',
volume_type_id=old_vol_type['id'])
api = cinder.volume.api.API()
self.assertRaises(exception.VolumeLimitExceeded, api.retype,
self.context, volume, new_vol_type['id'])
volume = db.volume_get(elevated, volume.id)
self.assertEqual('available', volume['status'])
def _retype_volume_exec(self, driver, snap=False, policy='on-demand',
migrate_exc=False, exc=None, diff_equal=False,
replica=False):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})
old_vol_type = db.volume_type_get_by_name(elevated, 'old')
db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})
vol_type = db.volume_type_get_by_name(elevated, 'new')
db.quota_create(elevated, project_id, 'volumes_new', 10)
if replica:
rep_status = 'active'
else:
rep_status = 'disabled'
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host, status='retyping',
volume_type_id=old_vol_type['id'],
replication_status=rep_status)
volume.previous_status = 'available'
volume.save()
if snap:
self._create_snapshot(volume.id, size=volume.size)
if driver or diff_equal:
host_obj = {'host': CONF.host, 'capabilities': {}}
else:
host_obj = {'host': 'newhost', 'capabilities': {}}
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(self.context,
reserve_opts,
vol_type['id'])
reservations = QUOTAS.reserve(self.context,
project_id=project_id,
**reserve_opts)
with mock.patch.object(self.volume.driver, 'retype') as _retype,\
mock.patch.object(volume_types, 'volume_types_diff') as _diff,\
mock.patch.object(self.volume, 'migrate_volume') as _mig,\
mock.patch.object(db.sqlalchemy.api, 'volume_get') as mock_get:
mock_get.return_value = volume
_retype.return_value = driver
_diff.return_value = ({}, diff_equal)
if migrate_exc:
_mig.side_effect = KeyError
else:
_mig.return_value = True
if not exc:
self.volume.retype(self.context, volume.id,
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations,
volume=volume)
else:
self.assertRaises(exc, self.volume.retype,
self.context, volume.id,
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations,
volume=volume)
# get volume/quota properties
volume = objects.Volume.get_by_id(elevated, volume.id)
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes_new')
volumes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
volumes_in_use = 0
# check properties
if driver or diff_equal:
self.assertEqual(vol_type['id'], volume.volume_type_id)
self.assertEqual('available', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(1, volumes_in_use)
elif not exc:
self.assertEqual(old_vol_type['id'], volume.volume_type_id)
self.assertEqual('retyping', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(1, volumes_in_use)
else:
self.assertEqual(old_vol_type['id'], volume.volume_type_id)
self.assertEqual('available', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(0, volumes_in_use)
def test_retype_volume_driver_success(self):
self._retype_volume_exec(True)
def test_retype_volume_migration_bad_policy(self):
# Test volume retype that requires migration by not allowed
self._retype_volume_exec(False, policy='never',
exc=exception.VolumeMigrationFailed)
def test_retype_volume_migration_with_replica(self):
self._retype_volume_exec(False,
replica=True,
exc=exception.InvalidVolume)
def test_retype_volume_migration_with_snaps(self):
self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume)
def test_retype_volume_migration_failed(self):
self._retype_volume_exec(False, migrate_exc=True, exc=KeyError)
def test_retype_volume_migration_success(self):
self._retype_volume_exec(False, migrate_exc=False, exc=None)
def test_retype_volume_migration_equal_types(self):
self._retype_volume_exec(False, diff_equal=True)
def test_migrate_driver_not_initialized(self):
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.migrate_volume,
self.context, volume.id, host_obj, True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume['id'])
def test_delete_source_volume_in_migration(self):
"""Test deleting a source volume that is in migration."""
self._test_delete_volume_in_migration('migrating')
def test_delete_destination_volume_in_migration(self):
"""Test deleting a destination volume that is in migration."""
self._test_delete_volume_in_migration('target:vol-id')
def _test_delete_volume_in_migration(self, migration_status):
"""Test deleting a volume that is in migration."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume = db.volume_update(self.context, volume['id'],
{'status': 'available',
'migration_status': migration_status})
self.volume.delete_volume(self.context, volume['id'])
# The volume is successfully removed during the volume delete
# and won't exist in the database any more.
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.context, volume['id'])
class ConsistencyGroupTestCase(BaseVolumeTestCase):
def test_delete_volume_in_consistency_group(self):
"""Test deleting a volume that's tied to a consistency group fails."""
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **self.volume_params)
consistencygroup_id = '12345678-1234-5678-1234-567812345678'
volume = db.volume_update(self.context, volume['id'],
{'status': 'available',
'consistencygroup_id': consistencygroup_id})
self.assertRaises(exception.InvalidVolume,
volume_api.delete, self.context, volume)
@mock.patch.object(CGQUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(CGQUOTAS, "commit")
@mock.patch.object(CGQUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"delete_consistencygroup",
return_value=({'status': 'deleted'}, []))
def test_create_delete_consistencygroup(self, fake_delete_cg,
fake_rollback,
fake_commit, fake_reserve):
"""Test consistencygroup can be created and deleted."""
def fake_driver_create_cg(context, group):
"""Make sure that the pool is part of the host."""
self.assertIn('host', group)
host = group.host
pool = volutils.extract_host(host, level='pool')
self.assertEqual('fakepool', pool)
return {'status': 'available'}
self.stubs.Set(self.volume.driver, 'create_consistencygroup',
fake_driver_create_cg)
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool')
group = objects.ConsistencyGroup.get_by_id(self.context, group.id)
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.create_consistencygroup(self.context, group)
self.assertEqual(2, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[0]
self.assertEqual('consistencygroup.create.start', msg['event_type'])
expected = {
'status': 'available',
'name': 'test_cg',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': 'fake',
'consistencygroup_id': group.id
}
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[1]
self.assertEqual('consistencygroup.create.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
self.assertEqual(
group.id,
objects.ConsistencyGroup.get_by_id(context.get_admin_context(),
group.id).id)
self.volume.delete_consistencygroup(self.context, group)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'), group.id)
self.assertEqual('deleted', cg.status)
self.assertEqual(4, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[2]
self.assertEqual('consistencygroup.delete.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('consistencygroup.delete.end', msg['event_type'])
expected['status'] = 'deleted'
self.assertDictMatch(expected, msg['payload'])
self.assertRaises(exception.NotFound,
objects.ConsistencyGroup.get_by_id,
self.context,
group.id)
@mock.patch.object(CGQUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(CGQUOTAS, "commit")
@mock.patch.object(CGQUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"create_consistencygroup",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"update_consistencygroup")
def test_update_consistencygroup(self, fake_update_cg,
fake_create_cg, fake_rollback,
fake_commit, fake_reserve):
"""Test consistencygroup can be updated."""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
self.volume.create_consistencygroup(self.context, group)
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
volume2 = tests_utils.create_volume(
self.context,
consistencygroup_id=None,
**self.volume_params)
volume_id2 = volume2['id']
self.volume.create_volume(self.context, volume_id2)
fake_update_cg.return_value = (
{'status': 'available'},
[{'id': volume_id2, 'status': 'available'}],
[{'id': volume_id, 'status': 'available'}])
self.volume.update_consistencygroup(self.context, group,
add_volumes=volume_id2,
remove_volumes=volume_id)
cg = objects.ConsistencyGroup.get_by_id(self.context, group.id)
expected = {
'status': 'available',
'name': 'test_cg',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': 'fake',
'consistencygroup_id': group.id
}
self.assertEqual('available', cg.status)
self.assertEqual(10, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('consistencygroup.update.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('consistencygroup.update.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
cgvolumes = db.volume_get_all_by_group(self.context, group.id)
cgvol_ids = [cgvol['id'] for cgvol in cgvolumes]
# Verify volume is removed.
self.assertNotIn(volume_id, cgvol_ids)
# Verify volume is added.
self.assertIn(volume_id2, cgvol_ids)
self.volume_params['status'] = 'wrong-status'
volume3 = tests_utils.create_volume(
self.context,
consistencygroup_id=None,
**self.volume_params)
volume_id3 = volume3['id']
volume_get_orig = self.volume.db.volume_get
self.volume.db.volume_get = mock.Mock(
return_value={'status': 'wrong_status',
'id': volume_id3})
# Try to add a volume in wrong status
self.assertRaises(exception.InvalidVolume,
self.volume.update_consistencygroup,
self.context,
group,
add_volumes=volume_id3,
remove_volumes=None)
self.volume.db.volume_get.reset_mock()
self.volume.db.volume_get = volume_get_orig
@mock.patch.object(driver.VolumeDriver,
"create_consistencygroup",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_consistencygroup",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_cgsnapshot",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_cgsnapshot",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_consistencygroup_from_src",
return_value=(None, None))
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
def test_create_consistencygroup_from_src(self,
mock_create_cloned_vol,
mock_create_vol_from_snap,
mock_create_from_src,
mock_delete_cgsnap,
mock_create_cgsnap,
mock_delete_cg,
mock_create_cg):
"""Test consistencygroup can be created and deleted."""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
status='available')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
status='available',
host=CONF.host,
size=1)
volume_id = volume['id']
cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_id)
cgsnapshot = cgsnapshot_returns[0]
snapshot_id = cgsnapshot_returns[1]['id']
# Create CG from source CG snapshot.
group2 = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
cgsnapshot_id=cgsnapshot.id)
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
volume2 = tests_utils.create_volume(
self.context,
consistencygroup_id=group2.id,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, volume2.id, volume=volume2)
self.volume.create_consistencygroup_from_src(
self.context, group2, cgsnapshot=cgsnapshot)
cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
expected = {
'status': 'available',
'name': 'test_cg',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': 'fake',
'consistencygroup_id': group2.id,
}
self.assertEqual('available', cg2.status)
self.assertEqual(group2.id, cg2['id'])
self.assertEqual(cgsnapshot.id, cg2['cgsnapshot_id'])
self.assertIsNone(cg2['source_cgid'])
msg = self.notifier.notifications[2]
self.assertEqual('consistencygroup.create.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[4]
self.assertEqual('consistencygroup.create.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
if len(self.notifier.notifications) > 6:
self.assertFalse(self.notifier.notifications[6],
self.notifier.notifications)
self.assertEqual(6, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.delete_consistencygroup(self.context, group2)
if len(self.notifier.notifications) > 10:
self.assertFalse(self.notifier.notifications[10],
self.notifier.notifications)
self.assertEqual(10, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('consistencygroup.delete.start', msg['event_type'])
expected['status'] = 'available'
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('consistencygroup.delete.end', msg['event_type'])
expected['status'] = 'deleted'
self.assertDictMatch(expected, msg['payload'])
cg2 = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'), group2.id)
self.assertEqual('deleted', cg2.status)
self.assertRaises(exception.NotFound,
objects.ConsistencyGroup.get_by_id,
self.context,
group2.id)
# Create CG from source CG.
group3 = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
source_cgid=group.id)
volume3 = tests_utils.create_volume(
self.context,
consistencygroup_id=group3.id,
source_volid=volume_id,
**self.volume_params)
self.volume.create_volume(self.context, volume3.id, volume=volume3)
self.volume.create_consistencygroup_from_src(
self.context, group3, source_cg=group)
cg3 = objects.ConsistencyGroup.get_by_id(self.context, group3.id)
self.assertEqual('available', cg3.status)
self.assertEqual(group3.id, cg3.id)
self.assertEqual(group.id, cg3.source_cgid)
self.assertIsNone(cg3.cgsnapshot_id)
self.volume.delete_cgsnapshot(self.context, cgsnapshot)
self.volume.delete_consistencygroup(self.context, group)
def test_sort_snapshots(self):
vol1 = {'id': '1', 'name': 'volume 1',
'snapshot_id': '1',
'consistencygroup_id': '1'}
vol2 = {'id': '2', 'name': 'volume 2',
'snapshot_id': '2',
'consistencygroup_id': '1'}
vol3 = {'id': '3', 'name': 'volume 3',
'snapshot_id': '3',
'consistencygroup_id': '1'}
snp1 = {'id': '1', 'name': 'snap 1',
'cgsnapshot_id': '1'}
snp2 = {'id': '2', 'name': 'snap 2',
'cgsnapshot_id': '1'}
snp3 = {'id': '3', 'name': 'snap 3',
'cgsnapshot_id': '1'}
snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1)
snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2)
snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3)
volumes = []
snapshots = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
snapshots.append(snp2_obj)
snapshots.append(snp3_obj)
snapshots.append(snp1_obj)
i = 0
for vol in volumes:
snap = snapshots[i]
i += 1
self.assertNotEqual(vol['snapshot_id'], snap.id)
sorted_snaps = self.volume._sort_snapshots(volumes, snapshots)
i = 0
for vol in volumes:
snap = sorted_snaps[i]
i += 1
self.assertEqual(vol['snapshot_id'], snap.id)
snapshots[2]['id'] = '9999'
self.assertRaises(exception.SnapshotNotFound,
self.volume._sort_snapshots,
volumes, snapshots)
self.assertRaises(exception.InvalidInput,
self.volume._sort_snapshots,
volumes, [])
def test_sort_source_vols(self):
vol1 = {'id': '1', 'name': 'volume 1',
'source_volid': '1',
'consistencygroup_id': '2'}
vol2 = {'id': '2', 'name': 'volume 2',
'source_volid': '2',
'consistencygroup_id': '2'}
vol3 = {'id': '3', 'name': 'volume 3',
'source_volid': '3',
'consistencygroup_id': '2'}
src_vol1 = {'id': '1', 'name': 'source vol 1',
'consistencygroup_id': '1'}
src_vol2 = {'id': '2', 'name': 'source vol 2',
'consistencygroup_id': '1'}
src_vol3 = {'id': '3', 'name': 'source vol 3',
'consistencygroup_id': '1'}
volumes = []
src_vols = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
src_vols.append(src_vol2)
src_vols.append(src_vol3)
src_vols.append(src_vol1)
i = 0
for vol in volumes:
src_vol = src_vols[i]
i += 1
self.assertNotEqual(vol['source_volid'], src_vol['id'])
sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols)
i = 0
for vol in volumes:
src_vol = sorted_src_vols[i]
i += 1
self.assertEqual(vol['source_volid'], src_vol['id'])
src_vols[2]['id'] = '9999'
self.assertRaises(exception.VolumeNotFound,
self.volume._sort_source_vols,
volumes, src_vols)
self.assertRaises(exception.InvalidInput,
self.volume._sort_source_vols,
volumes, [])
def _create_cgsnapshot(self, group_id, volume_id, size='0'):
"""Create a cgsnapshot object."""
cgsnap = objects.CGSnapshot(self.context)
cgsnap.user_id = 'fake'
cgsnap.project_id = 'fake'
cgsnap.consistencygroup_id = group_id
cgsnap.status = "creating"
cgsnap.create()
# Create a snapshot object
snap = objects.Snapshot(context.get_admin_context())
snap.volume_size = size
snap.user_id = 'fake'
snap.project_id = 'fake'
snap.volume_id = volume_id
snap.status = "available"
snap.cgsnapshot_id = cgsnap.id
snap.create()
return cgsnap, snap
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
autospec=True,
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
autospec=True,
return_value=({'status': 'deleted'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.create_cgsnapshot',
autospec=True,
return_value=({'status': 'available'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.delete_cgsnapshot',
autospec=True,
return_value=({'status': 'deleted'}, []))
def test_create_delete_cgsnapshot(self,
mock_del_cgsnap, mock_create_cgsnap,
mock_del_cg, _mock_create_cg):
"""Test cgsnapshot can be created and deleted."""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
if len(self.notifier.notifications) > 2:
self.assertFalse(self.notifier.notifications[2],
self.notifier.notifications)
self.assertEqual(2, len(self.notifier.notifications),
self.notifier.notifications)
cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_id)
cgsnapshot = cgsnapshot_returns[0]
self.volume.create_cgsnapshot(self.context, cgsnapshot)
self.assertEqual(cgsnapshot.id,
objects.CGSnapshot.get_by_id(
context.get_admin_context(),
cgsnapshot.id).id)
if len(self.notifier.notifications) > 6:
self.assertFalse(self.notifier.notifications[6],
self.notifier.notifications)
msg = self.notifier.notifications[2]
self.assertEqual('cgsnapshot.create.start', msg['event_type'])
expected = {
'created_at': 'DONTCARE',
'name': None,
'cgsnapshot_id': cgsnapshot.id,
'status': 'creating',
'tenant_id': 'fake',
'user_id': 'fake',
'consistencygroup_id': group.id
}
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('snapshot.create.start', msg['event_type'])
msg = self.notifier.notifications[4]
expected['status'] = 'available'
self.assertEqual('cgsnapshot.create.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[5]
self.assertEqual('snapshot.create.end', msg['event_type'])
self.assertEqual(6, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.delete_cgsnapshot(self.context, cgsnapshot)
if len(self.notifier.notifications) > 10:
self.assertFalse(self.notifier.notifications[10],
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('cgsnapshot.delete.start', msg['event_type'])
expected['status'] = 'available'
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('cgsnapshot.delete.end', msg['event_type'])
expected['status'] = 'deleted'
self.assertDictMatch(expected, msg['payload'])
self.assertEqual(10, len(self.notifier.notifications),
self.notifier.notifications)
cgsnap = objects.CGSnapshot.get_by_id(
context.get_admin_context(read_deleted='yes'),
cgsnapshot.id)
self.assertEqual('deleted', cgsnap.status)
self.assertRaises(exception.NotFound,
objects.CGSnapshot.get_by_id,
self.context,
cgsnapshot.id)
self.volume.delete_consistencygroup(self.context, group)
self.assertTrue(mock_create_cgsnap.called)
self.assertTrue(mock_del_cgsnap.called)
self.assertTrue(mock_del_cg.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
return_value=({'status': 'deleted'}, []))
def test_delete_consistencygroup_correct_host(self,
mock_del_cg,
_mock_create_cg):
"""Test consistencygroup can be deleted.
Test consistencygroup can be deleted when volumes are on
the correct volume node.
"""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
host='host1@backend1#pool1',
status='creating',
size=1)
self.volume.host = 'host1@backend1'
self.volume.create_volume(self.context, volume.id, volume=volume)
self.volume.delete_consistencygroup(self.context, group)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
group.id)
self.assertEqual('deleted', cg.status)
self.assertRaises(exception.NotFound,
objects.ConsistencyGroup.get_by_id,
self.context,
group.id)
self.assertTrue(mock_del_cg.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
return_value={'status': 'available'})
def test_delete_consistencygroup_wrong_host(self, *_mock_create_cg):
"""Test consistencygroup cannot be deleted.
Test consistencygroup cannot be deleted when volumes in the
group are not local to the volume node.
"""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
host='host1@backend1#pool1',
status='creating',
size=1)
self.volume.host = 'host1@backend2'
self.volume.create_volume(self.context, volume.id, volume=volume)
self.assertRaises(exception.InvalidVolume,
self.volume.delete_consistencygroup,
self.context,
group)
cg = objects.ConsistencyGroup.get_by_id(self.context, group.id)
# Group is not deleted
self.assertEqual('available', cg.status)
def test_create_volume_with_consistencygroup_invalid_type(self):
"""Test volume creation with ConsistencyGroup & invalid volume type."""
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=conf_fixture.def_vol_type, extra_specs={})
)
db_vol_type = db.volume_type_get(context.get_admin_context(),
vol_type.id)
cg = {
'id': '1',
'name': 'cg1',
'volume_type_id': db_vol_type['id'],
}
fake_type = {
'id': '9999',
'name': 'fake',
}
vol_api = cinder.volume.api.API()
# Volume type must be provided when creating a volume in a
# consistency group.
self.assertRaises(exception.InvalidInput,
vol_api.create,
self.context, 1, 'vol1', 'volume 1',
consistencygroup=cg)
# Volume type must be valid.
self.assertRaises(exception.InvalidInput,
vol_api.create,
self.context, 1, 'vol1', 'volume 1',
volume_type=fake_type,
consistencygroup=cg)
@mock.patch.object(fake_driver.FakeISCSIDriver, 'get_volume_stats')
@mock.patch.object(driver.BaseVD, '_init_vendor_properties')
def test_get_capabilities(self, mock_init_vendor, mock_get_volume_stats):
stats = {
'volume_backend_name': 'lvm',
'vendor_name': 'Open Source',
'storage_protocol': 'iSCSI',
'vendor_prefix': 'abcd'
}
expected = stats.copy()
expected['properties'] = {
'compression': {
'title': 'Compression',
'description': 'Enables compression.',
'type': 'boolean'},
'qos': {
'title': 'QoS',
'description': 'Enables QoS.',
'type': 'boolean'},
'replication': {
'title': 'Replication',
'description': 'Enables replication.',
'type': 'boolean'},
'thin_provisioning': {
'title': 'Thin Provisioning',
'description': 'Sets thin provisioning.',
'type': 'boolean'},
}
# Test to get updated capabilities
discover = True
mock_get_volume_stats.return_value = stats
mock_init_vendor.return_value = ({}, None)
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities)
mock_get_volume_stats.assert_called_once_with(True)
# Test to get existing original capabilities
mock_get_volume_stats.reset_mock()
discover = False
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities)
self.assertFalse(mock_get_volume_stats.called)
# Normal test case to get vendor unique capabilities
def init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"abcd:minIOPS",
"Minimum IOPS QoS",
"Sets minimum IOPS if QoS is enabled.",
"integer",
minimum=10,
default=100)
return properties, 'abcd'
expected['properties'].update(
{'abcd:minIOPS': {
'title': 'Minimum IOPS QoS',
'description': 'Sets minimum IOPS if QoS is enabled.',
'type': 'integer',
'minimum': 10,
'default': 100}})
mock_get_volume_stats.reset_mock()
mock_init_vendor.reset_mock()
discover = True
mock_init_vendor.return_value = (
init_vendor_properties(self.volume.driver))
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities)
self.assertTrue(mock_get_volume_stats.called)
@mock.patch.object(fake_driver.FakeISCSIDriver, 'get_volume_stats')
@mock.patch.object(driver.BaseVD, '_init_vendor_properties')
@mock.patch.object(driver.BaseVD, '_init_standard_capabilities')
def test_get_capabilities_prefix_error(self, mock_init_standard,
mock_init_vendor,
mock_get_volume_stats):
# Error test case: propety does not match vendor prefix
def init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"aaa:minIOPS",
"Minimum IOPS QoS",
"Sets minimum IOPS if QoS is enabled.",
"integer")
self._set_property(
properties,
"abcd:compression_type",
"Compression type",
"Specifies compression type.",
"string")
return properties, 'abcd'
expected = {
'abcd:compression_type': {
'title': 'Compression type',
'description': 'Specifies compression type.',
'type': 'string'}}
discover = True
mock_get_volume_stats.return_value = {}
mock_init_standard.return_value = {}
mock_init_vendor.return_value = (
init_vendor_properties(self.volume.driver))
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities['properties'])
@mock.patch.object(fake_driver.FakeISCSIDriver, 'get_volume_stats')
@mock.patch.object(driver.BaseVD, '_init_vendor_properties')
@mock.patch.object(driver.BaseVD, '_init_standard_capabilities')
def test_get_capabilities_fail_override(self, mock_init_standard,
mock_init_vendor,
mock_get_volume_stats):
# Error test case: propety cannot override any standard capabilities
def init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"qos",
"Minimum IOPS QoS",
"Sets minimum IOPS if QoS is enabled.",
"integer")
self._set_property(
properties,
"ab::cd:compression_type",
"Compression type",
"Specifies compression type.",
"string")
return properties, 'ab::cd'
expected = {
'ab__cd:compression_type': {
'title': 'Compression type',
'description': 'Specifies compression type.',
'type': 'string'}}
discover = True
mock_get_volume_stats.return_value = {}
mock_init_standard.return_value = {}
mock_init_vendor.return_value = (
init_vendor_properties(self.volume.driver))
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities['properties'])
def test_delete_encryptied_volume(self):
self.volume_params['status'] = 'active'
volume = tests_utils.create_volume(self.context,
**self.volume_params)
vol_api = cinder.volume.api.API()
with mock.patch.object(
vol_api.key_manager,
'delete_key',
side_effect=Exception):
self.assertRaises(exception.InvalidVolume,
vol_api.delete,
self.context, volume)
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
def fake_local_path(self, volume):
return self.dst_path
def setUp(self):
super(CopyVolumeToImageTestCase, self).setUp()
self.dst_fd, self.dst_path = tempfile.mkstemp()
self.addCleanup(os.unlink, self.dst_path)
os.close(self.dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', self.fake_local_path)
self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.image_meta = {
'id': self.image_id,
'container_format': 'bare',
'disk_format': 'raw'
}
self.volume_id = 1
self.addCleanup(db.volume_destroy, self.context, self.volume_id)
self.volume_attrs = {
'id': self.volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'host': 'dummy'
}
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_instance_deleted(self):
# During uploading volume to image if instance is deleted,
# volume should be in available status.
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# Creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
db.volume_create(self.context, self.volume_attrs)
# Storing unmocked db api function reference here, because we have to
# update volume status (set instance_uuid to None) before calling the
# 'volume_update_status_based_on_attached_instance_id' db api.
unmocked_db_api = db.volume_update_status_based_on_attachment
def mock_volume_update_after_upload(context, volume_id):
# First update volume and set 'instance_uuid' to None
# because after deleting instance, instance_uuid of volume is
# set to None
db.volume_update(context, volume_id, {'instance_uuid': None})
# Calling unmocked db api
unmocked_db_api(context, volume_id)
with mock.patch.object(
db,
'volume_update_status_based_on_attachment',
side_effect=mock_volume_update_after_upload) as mock_update:
# Start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
# Check 'volume_update_status_after_copy_volume_to_image'
# is called 1 time
self.assertEqual(1, mock_update.call_count)
# Check volume status has changed to available because
# instance is deleted
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = self.FAKE_UUID
# creating volume testdata
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_driver_not_initialized(self):
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume.status)
def test_copy_volume_to_image_driver_exception(self):
self.image_meta['id'] = self.image_id
image_service = fake_image.FakeImageService()
# create new image in queued state
queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9'
queued_image_meta = image_service.show(self.context, self.image_id)
queued_image_meta['id'] = queued_image_id
queued_image_meta['status'] = 'queued'
image_service.create(self.context, queued_image_meta)
# create new image in saving state
saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
saving_image_meta = image_service.show(self.context, self.image_id)
saving_image_meta['id'] = saving_image_id
saving_image_meta['status'] = 'saving'
image_service.create(self.context, saving_image_meta)
# create volume
self.volume_attrs['status'] = 'available'
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.VolumeDriverException(
"Error")
# test with image not in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image shouldn't be deleted if it is not in queued state
image_service.show(self.context, self.image_id)
# test with image in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
queued_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# queued image should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
queued_image_id)
# test with image in saving state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
saving_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image in saving state should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
saving_image_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(vol_manager.VolumeManager, 'create_volume')
@mock.patch.object(fake_driver.FakeISCSIDriver, 'copy_volume_to_image')
def _test_copy_volume_to_image_with_image_volume(
self, mock_copy, mock_create, mock_quota_commit,
mock_quota_reserve):
self.flags(glance_api_version=2)
self.volume.driver.configuration.image_upload_use_cinder_backend = True
image_service = fake_image.FakeImageService()
image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
self.image_meta['id'] = image_id
self.image_meta['status'] = 'queued'
image_service.create(self.context, self.image_meta)
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
def fake_create(context, volume_id, **kwargs):
db.volume_update(context, volume_id, {'status': 'available'})
mock_create.side_effect = fake_create
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# return create image
image = image_service.show(self.context, image_id)
image_service.delete(self.context, image_id)
return image
def test_copy_volume_to_image_with_image_volume(self):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
def test_copy_volume_to_image_with_image_volume_qcow2(self):
self.image_meta['disk_format'] = 'qcow2'
image = self._test_copy_volume_to_image_with_image_volume()
self.assertIsNone(image.get('locations'))
@mock.patch.object(vol_manager.VolumeManager, 'delete_volume')
@mock.patch.object(fake_image._FakeImageService, 'add_location',
side_effect=exception.Invalid)
def test_copy_volume_to_image_with_image_volume_failure(
self, mock_add_location, mock_delete):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertIsNone(image.get('locations'))
self.assertTrue(mock_delete.called)
class GetActiveByWindowTestCase(BaseVolumeTestCase):
def setUp(self):
super(GetActiveByWindowTestCase, self).setUp()
self.ctx = context.get_admin_context(read_deleted="yes")
self.db_attrs = [
{
'id': 1,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
},
{
'id': 2,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': 3,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
},
{
'id': 4,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': 5,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
}
]
def test_volume_get_active_by_window(self):
# Find all all volumes valid within a timeframe window.
# Not in window
db.volume_create(self.ctx, self.db_attrs[0])
# In - deleted in window
db.volume_create(self.ctx, self.db_attrs[1])
# In - deleted after window
db.volume_create(self.ctx, self.db_attrs[2])
# In - created in window
db.volume_create(self.context, self.db_attrs[3])
# Not of window.
db.volume_create(self.context, self.db_attrs[4])
volumes = db.volume_get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1),
project_id='p1')
self.assertEqual(3, len(volumes))
self.assertEqual(u'2', volumes[0].id)
self.assertEqual(u'3', volumes[1].id)
self.assertEqual(u'4', volumes[2].id)
def test_snapshot_get_active_by_window(self):
# Find all all snapshots valid within a timeframe window.
db.volume_create(self.context, {'id': 1})
for i in range(5):
self.db_attrs[i]['volume_id'] = 1
# Not in window
del self.db_attrs[0]['id']
snap1 = objects.Snapshot(self.ctx, **self.db_attrs[0])
snap1.create()
# In - deleted in window
del self.db_attrs[1]['id']
snap2 = objects.Snapshot(self.ctx, **self.db_attrs[1])
snap2.create()
# In - deleted after window
del self.db_attrs[2]['id']
snap3 = objects.Snapshot(self.ctx, **self.db_attrs[2])
snap3.create()
# In - created in window
del self.db_attrs[3]['id']
snap4 = objects.Snapshot(self.ctx, **self.db_attrs[3])
snap4.create()
# Not of window.
del self.db_attrs[4]['id']
snap5 = objects.Snapshot(self.ctx, **self.db_attrs[4])
snap5.create()
snapshots = objects.SnapshotList.get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1)).objects
self.assertEqual(3, len(snapshots))
self.assertEqual(snap2.id, snapshots[0].id)
self.assertEqual(u'1', snapshots[0].volume_id)
self.assertEqual(snap3.id, snapshots[1].id)
self.assertEqual(u'1', snapshots[1].volume_id)
self.assertEqual(snap4.id, snapshots[2].id)
self.assertEqual(u'1', snapshots[2].volume_id)
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "cinder.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volume_driver=self.driver_name,
volumes_dir=vol_tmpdir)
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.output = ""
self.configuration = conf.Configuration(None)
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
exec_patcher = mock.patch.object(self.volume.driver, '_execute',
_fake_execute)
exec_patcher.start()
self.addCleanup(exec_patcher.stop)
self.volume.driver.set_initialized()
self.addCleanup(self._cleanup)
def _cleanup(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
def _attach_volume(self):
"""Attach volumes to an instance."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class GenericVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver."""
driver_name = "cinder.tests.unit.fake_driver.LoggingVolumeDriver"
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_available(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
self.context.user_id = 'fake'
self.context.project_id = 'fake'
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.create_snapshot = mock.MagicMock()
self.volume.driver.delete_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse_temp_volume(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
temp_vol = tests_utils.create_volume(self.context)
self.context.user_id = 'fake'
self.context.project_id = 'fake'
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver._create_temp_cloned_volume = mock.MagicMock()
self.volume.driver._delete_temp_volume = mock.MagicMock()
mock_volume_get.return_value = vol
self.volume.driver._create_temp_cloned_volume.return_value = temp_vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.volume.driver._create_temp_cloned_volume.assert_called_once_with(
self.context, vol)
self.volume.driver._delete_temp_volume.assert_called_once_with(
self.context, temp_vol)
@mock.patch.object(cinder.volume.driver.VolumeDriver,
'backup_use_temp_snapshot',
return_value=True)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector.LocalConnector,
'connect_volume')
@mock.patch.object(os_brick.initiator.connector.LocalConnector,
'check_valid_device',
return_value=True)
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties',
return_value={})
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse_temp_snapshot(self, mock_volume_get,
mock_get_connector_properties,
mock_check_device,
mock_connect_volume,
mock_file_open,
mock_temporary_chown,
mock_temp_snapshot):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
self.context.user_id = 'fake'
self.context.project_id = 'fake'
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
attach_info = {'device': {'path': '/dev/null'},
'driver_volume_type': 'LOCAL',
'data': {}}
backup_service = mock.Mock()
self.volume.driver.terminate_connection_snapshot = mock.MagicMock()
self.volume.driver.initialize_connection_snapshot = mock.MagicMock()
self.volume.driver.create_snapshot = mock.MagicMock()
self.volume.driver.delete_snapshot = mock.MagicMock()
self.volume.driver.create_export_snapshot = mock.MagicMock()
self.volume.driver.remove_export_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
mock_connect_volume.return_value = {'type': 'local',
'path': '/dev/null'}
f = mock_file_open.return_value = open('/dev/null', 'rb')
self.volume.driver._connect_device
backup_service.backup(backup_obj, f, None)
self.volume.driver.initialize_connection_snapshot.return_value = (
attach_info)
self.volume.driver.create_export_snapshot.return_value = (
{'provider_location': '/dev/null',
'provider_auth': 'xxxxxxxx'})
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.assertTrue(self.volume.driver.create_snapshot.called)
self.assertTrue(self.volume.driver.create_export_snapshot.called)
self.assertTrue(
self.volume.driver.initialize_connection_snapshot.called)
self.assertTrue(
self.volume.driver.terminate_connection_snapshot.called)
self.assertTrue(self.volume.driver.remove_export_snapshot.called)
self.assertTrue(self.volume.driver.delete_snapshot.called)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch('six.moves.builtins.open')
def test_restore_backup(self,
mock_open,
mock_get_connector_properties,
mock_temporary_chown):
dev_null = '/dev/null'
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id'], 'id': 'backup-for-%s' % vol['id']}
properties = {}
attach_info = {'device': {'path': dev_null}}
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
volume_file = mock.MagicMock()
mock_open.return_value.__enter__.return_value = volume_file
mock_get_connector_properties.return_value = properties
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled.side_effect = (False,
True)
backup_service = mock.MagicMock()
for i in (1, 2):
self.volume.driver.restore_backup(self.context, backup, vol,
backup_service)
mock_get_connector_properties.assert_called_with(root_helper,
CONF.my_ip,
False, False)
self.volume.driver._attach_volume.assert_called_with(
self.context, vol, properties)
self.assertEqual(i, self.volume.driver._attach_volume.call_count)
self.volume.driver._detach_volume.assert_called_with(
self.context, attach_info, vol, properties)
self.assertEqual(i, self.volume.driver._detach_volume.call_count)
self.volume.driver.secure_file_operations_enabled.\
assert_called_with()
self.assertEqual(
i,
self.volume.driver.secure_file_operations_enabled.call_count
)
mock_temporary_chown.assert_called_once_with(dev_null)
mock_open.assert_called_with(dev_null, 'wb')
self.assertEqual(i, mock_open.call_count)
backup_service.restore.assert_called_with(backup, vol['id'],
volume_file)
self.assertEqual(i, backup_service.restore.call_count)
def test_enable_replication_invalid_state(self):
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
volume = tests_utils.create_volume(ctxt,
size=1,
host=CONF.host,
replication_status='enabled')
self.assertRaises(exception.InvalidVolume,
volume_api.enable_replication,
ctxt, volume)
def test_enable_replication_invalid_type(self):
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
volume = tests_utils.create_volume(self.context,
size=1,
host=CONF.host,
replication_status='disabled')
volume['volume_type_id'] = 'dab02f01-b50f-4ed6-8d42-2b5b9680996e'
fake_specs = {}
with mock.patch.object(volume_types,
'get_volume_type_extra_specs',
return_value = fake_specs):
self.assertRaises(exception.InvalidVolume,
volume_api.enable_replication,
ctxt,
volume)
def test_enable_replication(self):
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
volume = tests_utils.create_volume(self.context,
size=1,
host=CONF.host,
replication_status='disabled')
volume['volume_type_id'] = 'dab02f01-b50f-4ed6-8d42-2b5b9680996e'
fake_specs = {'replication_enabled': '<is> True'}
with mock.patch.object(volume_rpcapi.VolumeAPI,
'enable_replication') as mock_enable_rep,\
mock.patch.object(volume_types,
'get_volume_type_extra_specs',
return_value = fake_specs):
volume_api.enable_replication(ctxt, volume)
self.assertTrue(mock_enable_rep.called)
def test_enable_replication_driver_initialized(self):
volume = tests_utils.create_volume(self.context,
size=1,
host=CONF.host,
replication_status='enabling')
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.enable_replication,
self.context,
volume)
def test_disable_replication_invalid_state(self):
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
volume = tests_utils.create_volume(ctxt,
size=1,
host=CONF.host,
replication_status='invalid-state')
self.assertRaises(exception.InvalidVolume,
volume_api.disable_replication,
ctxt, volume)
def test_disable_replication(self):
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
volume = tests_utils.create_volume(self.context,
size=1,
host=CONF.host,
replication_status='disabled')
volume['volume_type_id'] = 'dab02f01-b50f-4ed6-8d42-2b5b9680996e'
fake_specs = {'replication_enabled': '<is> True'}
with mock.patch.object(volume_rpcapi.VolumeAPI,
'disable_replication') as mock_disable_rep,\
mock.patch.object(volume_types,
'get_volume_type_extra_specs',
return_value = fake_specs):
volume_api.disable_replication(ctxt, volume)
self.assertTrue(mock_disable_rep.called)
volume['replication_status'] = 'enabled'
volume_api.disable_replication(ctxt, volume)
self.assertTrue(mock_disable_rep.called)
def test_disable_replication_driver_initialized(self):
volume = tests_utils.create_volume(self.context,
size=1,
host=CONF.host,
replication_status='disabling')
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.disable_replication,
self.context,
volume)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume')
@mock.patch.object(volutils, 'copy_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities')
def test_copy_volume_data(self,
mock_get_capabilities,
mock_copy,
mock_detach,
mock_attach,
mock_get_connector):
src_vol = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
dest_vol = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
mock_get_connector.return_value = {}
self.volume.driver._throttle = mock.MagicMock()
attach_expected = [
mock.call(self.context, dest_vol, {}, remote=False),
mock.call(self.context, src_vol, {}, remote=False)]
detach_expected = [
mock.call(self.context, {'device': {'path': 'bar'}},
dest_vol, {}, force=False, remote=False),
mock.call(self.context, {'device': {'path': 'foo'}},
src_vol, {}, force=False, remote=False)]
attach_volume_returns = [
({'device': {'path': 'bar'}}, dest_vol),
({'device': {'path': 'foo'}}, src_vol),
]
# Test case for sparse_copy_volume = False
mock_attach.side_effect = attach_volume_returns
mock_get_capabilities.return_value = {}
self.volume.driver.copy_volume_data(self.context,
src_vol,
dest_vol)
self.assertEqual(attach_expected, mock_attach.mock_calls)
mock_copy.assert_called_with(
'foo', 'bar', 1024, '1M',
throttle=self.volume.driver._throttle,
sparse=False)
self.assertEqual(detach_expected, mock_detach.mock_calls)
# Test case for sparse_copy_volume = True
mock_attach.reset_mock()
mock_detach.reset_mock()
mock_attach.side_effect = attach_volume_returns
mock_get_capabilities.return_value = {'sparse_copy_volume': True}
self.volume.driver.copy_volume_data(self.context,
src_vol,
dest_vol)
self.assertEqual(attach_expected, mock_attach.mock_calls)
mock_copy.assert_called_with(
'foo', 'bar', 1024, '1M',
throttle=self.volume.driver._throttle,
sparse=True)
self.assertEqual(detach_expected, mock_detach.mock_calls)
# cleanup resource
db.volume_destroy(self.context, src_vol['id'])
db.volume_destroy(self.context, dest_vol['id'])
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume')
@mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume')
@mock.patch.object(volutils, 'copy_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities')
def test_copy_volume_data_mgr(self,
mock_get_capabilities,
mock_copy,
mock_detach,
mock_attach,
mock_get_connector):
"""Test function of _copy_volume_data."""
src_vol = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
dest_vol = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
mock_get_connector.return_value = {}
self.volume.driver._throttle = mock.MagicMock()
attach_expected = [
mock.call(self.context, dest_vol, {}, remote=False),
mock.call(self.context, src_vol, {}, remote=False)]
detach_expected = [
mock.call(self.context, {'device': {'path': 'bar'}},
dest_vol, {}, force=False, remote=False),
mock.call(self.context, {'device': {'path': 'foo'}},
src_vol, {}, force=False, remote=False)]
attach_volume_returns = [
{'device': {'path': 'bar'}},
{'device': {'path': 'foo'}}
]
# Test case for sparse_copy_volume = False
mock_attach.side_effect = attach_volume_returns
mock_get_capabilities.return_value = {}
self.volume._copy_volume_data(self.context,
src_vol,
dest_vol)
self.assertEqual(attach_expected, mock_attach.mock_calls)
mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=False)
self.assertEqual(detach_expected, mock_detach.mock_calls)
# Test case for sparse_copy_volume = True
mock_attach.reset_mock()
mock_detach.reset_mock()
mock_attach.side_effect = attach_volume_returns
mock_get_capabilities.return_value = {'sparse_copy_volume': True}
self.volume._copy_volume_data(self.context,
src_vol,
dest_vol)
self.assertEqual(attach_expected, mock_attach.mock_calls)
mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=True)
self.assertEqual(detach_expected, mock_detach.mock_calls)
# cleanup resource
db.volume_destroy(self.context, src_vol['id'])
db.volume_destroy(self.context, dest_vol['id'])
class LVMVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver"
FAKE_VOLUME = {'name': 'test1',
'id': 'test1'}
@mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export')
def test_delete_volume_invalid_parameter(self, _mock_create_export):
self.configuration.volume_clear = 'zero'
self.configuration.volume_clear_size = 0
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
# Test volume without 'size' field and 'volume_size' field
self.assertRaises(exception.InvalidParameterValue,
lvm_driver._delete_volume,
self.FAKE_VOLUME)
@mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export')
def test_delete_volume_bad_path(self, _mock_create_export):
self.configuration.volume_clear = 'zero'
self.configuration.volume_clear_size = 0
self.configuration.volume_type = 'default'
volume = dict(self.FAKE_VOLUME, size=1)
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.VolumeBackendAPIException,
lvm_driver._delete_volume, volume)
@mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export')
def test_delete_volume_thinlvm_snap(self, _mock_create_export):
self.configuration.volume_clear = 'zero'
self.configuration.volume_clear_size = 0
self.configuration.lvm_type = 'thin'
self.configuration.iscsi_helper = 'tgtadm'
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
vg_obj=mox.MockAnything(),
db=db)
# Ensures that copy_volume is not called for ThinLVM
self.mox.StubOutWithMock(volutils, 'copy_volume')
self.mox.StubOutWithMock(volutils, 'clear_volume')
self.mox.StubOutWithMock(lvm_driver, '_execute')
self.mox.ReplayAll()
uuid = '00000000-0000-0000-0000-c3aa7ee01536'
fake_snapshot = {'name': 'volume-' + uuid,
'id': uuid,
'size': 123}
lvm_driver._delete_volume(fake_snapshot, is_snapshot=True)
def test_check_for_setup_error(self):
def get_all_volume_groups(vg):
return [{'name': 'cinder-volumes'}]
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
configuration = conf.Configuration(fake_opt, 'fake_group')
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration,
vg_obj=vg_obj, db=db)
lvm_driver.delete_snapshot = mock.Mock()
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
volume = tests_utils.create_volume(self.context,
host=socket.gethostname())
volume_id = volume['id']
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = 'fake'
backup['host'] = socket.gethostname()
backup['availability_zone'] = '1'
backup['display_name'] = 'test_check_for_setup_error'
backup['display_description'] = 'test_check_for_setup_error'
backup['container'] = 'fake'
backup['status'] = 'creating'
backup['fail_reason'] = ''
backup['service'] = 'fake'
backup['parent_id'] = None
backup['size'] = 5 * 1024 * 1024
backup['object_count'] = 22
db.backup_create(self.context, backup)
lvm_driver.check_for_setup_error()
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
self.context.user_id = 'fake'
self.context.project_id = 'fake'
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
mock_volume_get.return_value = vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
def test_retype_volume(self):
vol = tests_utils.create_volume(self.context)
new_type = 'fake'
diff = {}
host = 'fake_host'
retyped = self.volume.driver.retype(self.context, vol, new_type,
diff, host)
self.assertTrue(retyped)
def test_update_migrated_volume(self):
fake_volume_id = 'vol1'
fake_new_volume_id = 'vol2'
fake_provider = 'fake_provider'
original_volume_name = CONF.volume_name_template % fake_volume_id
current_name = CONF.volume_name_template % fake_new_volume_id
fake_volume = tests_utils.create_volume(self.context)
fake_volume['id'] = fake_volume_id
fake_new_volume = tests_utils.create_volume(self.context)
fake_new_volume['id'] = fake_new_volume_id
fake_new_volume['provider_location'] = fake_provider
fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False,
None, 'default')
with mock.patch.object(self.volume.driver, 'vg') as vg:
vg.return_value = fake_vg
vg.rename_volume.return_value = None
update = self.volume.driver.update_migrated_volume(self.context,
fake_volume,
fake_new_volume,
'available')
vg.rename_volume.assert_called_once_with(current_name,
original_volume_name)
self.assertEqual({'_name_id': None,
'provider_location': None}, update)
vg.rename_volume.reset_mock()
vg.rename_volume.side_effect = processutils.ProcessExecutionError
update = self.volume.driver.update_migrated_volume(self.context,
fake_volume,
fake_new_volume,
'available')
vg.rename_volume.assert_called_once_with(current_name,
original_volume_name)
self.assertEqual({'_name_id': fake_new_volume_id,
'provider_location': fake_provider},
update)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
self.context.user_id = 'fake'
self.context.project_id = 'fake'
mock_volume_get.return_value = vol
temp_snapshot = tests_utils.create_snapshot(self.context, vol['id'])
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver._create_temp_snapshot = mock.MagicMock()
self.volume.driver._delete_temp_snapshot = mock.MagicMock()
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info
self.volume.driver._create_temp_snapshot.return_value = temp_snapshot
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.volume.driver._create_temp_snapshot.assert_called_once_with(
self.context, vol)
self.volume.driver._delete_temp_snapshot.assert_called_once_with(
self.context, temp_snapshot)
def test_create_volume_from_snapshot_none_sparse(self):
with mock.patch.object(self.volume.driver, 'vg'), \
mock.patch.object(self.volume.driver, '_create_volume'), \
mock.patch.object(volutils, 'copy_volume') as mock_copy:
# Test case for thick LVM
src_volume = tests_utils.create_volume(self.context)
snapshot_ref = tests_utils.create_snapshot(self.context,
src_volume['id'])
dst_volume = tests_utils.create_volume(self.context)
self.volume.driver.create_volume_from_snapshot(dst_volume,
snapshot_ref)
volume_path = self.volume.driver.local_path(dst_volume)
snapshot_path = self.volume.driver.local_path(snapshot_ref)
volume_size = 1024
block_size = '1M'
mock_copy.assert_called_with(snapshot_path,
volume_path,
volume_size,
block_size,
execute=self.volume.driver._execute,
sparse=False)
def test_create_volume_from_snapshot_sparse(self):
self.configuration.lvm_type = 'thin'
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db)
with mock.patch.object(lvm_driver, 'vg'), \
mock.patch.object(lvm_driver, '_create_volume'), \
mock.patch.object(volutils, 'copy_volume') as mock_copy:
# Test case for thin LVM
lvm_driver._sparse_copy_volume = True
src_volume = tests_utils.create_volume(self.context)
snapshot_ref = tests_utils.create_snapshot(self.context,
src_volume['id'])
dst_volume = tests_utils.create_volume(self.context)
lvm_driver.create_volume_from_snapshot(dst_volume,
snapshot_ref)
volume_path = lvm_driver.local_path(dst_volume)
snapshot_path = lvm_driver.local_path(snapshot_ref)
volume_size = 1024
block_size = '1M'
mock_copy.assert_called_with(snapshot_path,
volume_path,
volume_size,
block_size,
execute=lvm_driver._execute,
sparse=True)
@mock.patch.object(cinder.volume.utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
@mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes')
@mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning',
return_value=True)
def test_lvm_type_auto_thin_pool_exists(self, *_unused_mocks):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.lvm_type = 'auto'
vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration,
vg_obj=vg_obj)
lvm_driver.check_for_setup_error()
self.assertEqual('thin', lvm_driver.configuration.lvm_type)
@mock.patch.object(cinder.volume.utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
@mock.patch.object(cinder.brick.local_dev.lvm.LVM, 'get_volumes',
return_value=[])
@mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes')
@mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning',
return_value=True)
def test_lvm_type_auto_no_lvs(self, *_unused_mocks):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.lvm_type = 'auto'
vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration,
vg_obj=vg_obj)
lvm_driver.check_for_setup_error()
self.assertEqual('thin', lvm_driver.configuration.lvm_type)
@mock.patch.object(cinder.volume.utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
@mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes')
@mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning',
return_value=False)
def test_lvm_type_auto_no_thin_support(self, *_unused_mocks):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.lvm_type = 'auto'
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration)
lvm_driver.check_for_setup_error()
self.assertEqual('default', lvm_driver.configuration.lvm_type)
@mock.patch.object(cinder.volume.utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
@mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_volume')
@mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning',
return_value=False)
def test_lvm_type_auto_no_thin_pool(self, *_unused_mocks):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.lvm_type = 'auto'
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration)
lvm_driver.check_for_setup_error()
self.assertEqual('default', lvm_driver.configuration.lvm_type)
@mock.patch.object(lvm.LVMVolumeDriver, 'extend_volume')
def test_create_cloned_volume_by_thin_snapshot(self, mock_extend):
self.configuration.lvm_type = 'thin'
fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False,
None, 'default'))
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
vg_obj=fake_vg,
db=db)
fake_volume = tests_utils.create_volume(self.context, size=1)
fake_new_volume = tests_utils.create_volume(self.context, size=2)
lvm_driver.create_cloned_volume(fake_new_volume, fake_volume)
fake_vg.create_lv_snapshot.assert_called_once_with(
fake_new_volume['name'], fake_volume['name'], 'thin')
mock_extend.assert_called_once_with(fake_new_volume, 2)
fake_vg.activate_lv.assert_called_once_with(
fake_new_volume['name'], is_snapshot=True, permanent=True)
def test_lvm_migrate_volume_no_loc_info(self):
host = {'capabilities': {}}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertFalse(moved)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_bad_loc_info(self):
capabilities = {'location_info': 'foo'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertFalse(moved)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_diff_driver(self):
capabilities = {'location_info': 'FooDriver:foo:bar:default:0'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertFalse(moved)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_diff_host(self):
capabilities = {'location_info': 'LVMVolumeDriver:foo:bar:default:0'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertFalse(moved)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_in_use(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertFalse(moved)
self.assertIsNone(model_update)
@mock.patch.object(volutils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
def test_lvm_migrate_volume_same_volume_group(self, vgs):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.driver.migrate_volume, self.context,
vol, host)
@mock.patch.object(lvm.LVMVolumeDriver, '_create_volume')
@mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes')
@mock.patch.object(brick_lvm.LVM, 'delete')
@mock.patch.object(volutils, 'copy_volume',
side_effect=processutils.ProcessExecutionError)
@mock.patch.object(volutils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
def test_lvm_migrate_volume_volume_copy_error(self, vgs, copy_volume,
mock_delete, mock_pvs,
mock_create):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes-old',
False, None, 'default')
self.assertRaises(processutils.ProcessExecutionError,
self.volume.driver.migrate_volume, self.context,
vol, host)
mock_delete.assert_called_once_with(vol)
def test_lvm_volume_group_missing(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes-3:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
def get_all_volume_groups():
return [{'name': 'cinder-volumes-2'}]
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertFalse(moved)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_proceed(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes-2:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'}
def fake_execute(*args, **kwargs):
pass
def get_all_volume_groups():
# NOTE(flaper87) Return just the destination
# host to test the check of dest VG existence.
return [{'name': 'cinder-volumes-2'}]
def _fake_get_all_physical_volumes(obj, root_helper, vg_name):
return [{}]
with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes',
return_value = [{}]), \
mock.patch.object(self.volume.driver, '_execute') \
as mock_execute, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volutils, 'get_all_volume_groups',
side_effect = get_all_volume_groups), \
mock.patch.object(self.volume.driver, '_delete_volume'):
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
moved, model_update = \
self.volume.driver.migrate_volume(self.context, vol, host)
self.assertTrue(moved)
self.assertIsNone(model_update)
mock_copy.assert_called_once_with(
'/dev/mapper/cinder--volumes-testvol',
'/dev/mapper/cinder--volumes--2-testvol',
2048,
'1M',
execute=mock_execute,
sparse=False)
def test_lvm_migrate_volume_proceed_with_thin(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes-2:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'}
def fake_execute(*args, **kwargs):
pass
def get_all_volume_groups():
# NOTE(flaper87) Return just the destination
# host to test the check of dest VG existence.
return [{'name': 'cinder-volumes-2'}]
def _fake_get_all_physical_volumes(obj, root_helper, vg_name):
return [{}]
self.configuration.lvm_type = 'thin'
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db)
with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes',
return_value = [{}]), \
mock.patch.object(lvm_driver, '_execute') \
as mock_execute, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volutils, 'get_all_volume_groups',
side_effect = get_all_volume_groups), \
mock.patch.object(lvm_driver, '_delete_volume'):
lvm_driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
lvm_driver._sparse_copy_volume = True
moved, model_update = \
lvm_driver.migrate_volume(self.context, vol, host)
self.assertTrue(moved)
self.assertIsNone(model_update)
mock_copy.assert_called_once_with(
'/dev/mapper/cinder--volumes-testvol',
'/dev/mapper/cinder--volumes--2-testvol',
2048,
'1M',
execute=mock_execute,
sparse=True)
@staticmethod
def _get_manage_existing_lvs(name):
"""Helper method used by the manage_existing tests below."""
lvs = [{'name': 'fake_lv', 'size': '1.75'},
{'name': 'fake_lv_bad_size', 'size': 'Not a float'}]
for lv in lvs:
if lv['name'] == name:
return lv
def _setup_stubs_for_manage_existing(self):
"""Helper to set up common stubs for the manage_existing tests."""
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.stubs.Set(self.volume.driver.vg, 'get_volume',
self._get_manage_existing_lvs)
@mock.patch.object(db.sqlalchemy.api, 'volume_get',
side_effect=exception.VolumeNotFound(
volume_id='d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'))
def test_lvm_manage_existing_not_found(self, mock_vol_get):
self._setup_stubs_for_manage_existing()
vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'
ref = {'source-name': 'fake_lv'}
vol = {'name': vol_name, 'id': 1, 'size': 0}
with mock.patch.object(self.volume.driver.vg, 'rename_volume'):
model_update = self.volume.driver.manage_existing(vol, ref)
self.assertIsNone(model_update)
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_lvm_manage_existing_already_managed(self, mock_conf):
self._setup_stubs_for_manage_existing()
mock_conf.volume_name_template = 'volume-%s'
vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'
ref = {'source-name': vol_name}
vol = {'name': 'test', 'id': 1, 'size': 0}
with mock.patch.object(self.volume.driver.vg, 'rename_volume'):
self.assertRaises(exception.ManageExistingAlreadyManaged,
self.volume.driver.manage_existing,
vol, ref)
def test_lvm_manage_existing(self):
"""Good pass on managing an LVM volume.
This test case ensures that, when a logical volume with the
specified name exists, and the size is as expected, no error is
returned from driver.manage_existing, and that the rename_volume
function is called in the Brick LVM code with the correct arguments.
"""
self._setup_stubs_for_manage_existing()
ref = {'source-name': 'fake_lv'}
vol = {'name': 'test', 'id': 1, 'size': 0}
def _rename_volume(old_name, new_name):
self.assertEqual(ref['source-name'], old_name)
self.assertEqual(vol['name'], new_name)
self.stubs.Set(self.volume.driver.vg, 'rename_volume',
_rename_volume)
size = self.volume.driver.manage_existing_get_size(vol, ref)
self.assertEqual(2, size)
model_update = self.volume.driver.manage_existing(vol, ref)
self.assertIsNone(model_update)
def test_lvm_manage_existing_bad_size(self):
"""Make sure correct exception on bad size returned from LVM.
This test case ensures that the correct exception is raised when
the information returned for the existing LVs is not in the format
that the manage_existing code expects.
"""
self._setup_stubs_for_manage_existing()
ref = {'source-name': 'fake_lv_bad_size'}
vol = {'name': 'test', 'id': 1, 'size': 2}
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.driver.manage_existing_get_size,
vol, ref)
def test_lvm_manage_existing_bad_ref(self):
"""Error case where specified LV doesn't exist.
This test case ensures that the correct exception is raised when
the caller attempts to manage a volume that does not exist.
"""
self._setup_stubs_for_manage_existing()
ref = {'source-name': 'fake_nonexistent_lv'}
vol = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.volume.driver.manage_existing_get_size,
vol, ref)
def test_lvm_manage_existing_snapshot(self):
"""Good pass on managing an LVM snapshot.
This test case ensures that, when a logical volume's snapshot with the
specified name exists, and the size is as expected, no error is
returned from driver.manage_existing_snapshot, and that the
rename_volume function is called in the Brick LVM code with the correct
arguments.
"""
self._setup_stubs_for_manage_existing()
ref = {'source-name': 'fake_lv'}
snp = {'name': 'test', 'id': 1, 'size': 0}
def _rename_volume(old_name, new_name):
self.assertEqual(ref['source-name'], old_name)
self.assertEqual(snp['name'], new_name)
with mock.patch.object(self.volume.driver.vg, 'rename_volume') as \
mock_rename_volume:
mock_rename_volume.return_value = _rename_volume
size = self.volume.driver.manage_existing_snapshot_get_size(snp,
ref)
self.assertEqual(2, size)
model_update = self.volume.driver.manage_existing_snapshot(snp,
ref)
self.assertIsNone(model_update)
def test_lvm_manage_existing_snapshot_bad_ref(self):
"""Error case where specified LV snapshot doesn't exist.
This test case ensures that the correct exception is raised when
the caller attempts to manage a snapshot that does not exist.
"""
self._setup_stubs_for_manage_existing()
ref = {'source-name': 'fake_nonexistent_lv'}
snp = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.volume.driver.manage_existing_snapshot_get_size,
snp, ref)
def test_lvm_manage_existing_snapshot_bad_size(self):
"""Make sure correct exception on bad size returned from LVM.
This test case ensures that the correct exception is raised when
the information returned for the existing LVs is not in the format
that the manage_existing_snapshot code expects.
"""
self._setup_stubs_for_manage_existing()
ref = {'source-name': 'fake_lv_bad_size'}
snp = {'name': 'test', 'id': 1, 'size': 2}
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.driver.manage_existing_snapshot_get_size,
snp, ref)
def test_lvm_unmanage(self):
volume = tests_utils.create_volume(self.context, status='available',
size=1, host=CONF.host)
ret = self.volume.driver.unmanage(volume)
self.assertEqual(ret, None)
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver"
def setUp(self):
super(ISCSITestCase, self).setUp()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.iscsi_target_prefix = 'iqn.2010-10.org.openstack:'
self.configuration.iscsi_ip_address = '0.0.0.0'
self.configuration.iscsi_port = 3260
def _attach_volume(self):
"""Attach volumes to an instance."""
volume_id_list = []
for index in range(3):
vol = {}
vol['size'] = 0
vol_ref = db.volume_create(self.context, vol)
self.volume.create_volume(self.context, vol_ref['id'])
vol_ref = db.volume_get(self.context, vol_ref['id'])
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
instance_uuid = '12345678-1234-5678-1234-567812345678'
db.volume_attached(self.context, vol_ref['id'], instance_uuid,
mountpoint)
volume_id_list.append(vol_ref['id'])
return volume_id_list
def test_do_iscsi_discovery(self):
self.configuration = conf.Configuration(None)
iscsi_driver = \
cinder.volume.targets.tgt.TgtAdm(
configuration=self.configuration)
utils.execute = lambda *a, **kw: \
("%s dummy" % CONF.iscsi_ip_address, '')
volume = {"name": "dummy",
"host": "0.0.0.0",
"id": "12345678-1234-5678-1234-567812345678"}
iscsi_driver._do_iscsi_discovery(volume)
def test_get_iscsi_properties(self):
volume = {"provider_location": '',
"id": "0",
"provider_auth": "a b c",
"attached_mode": "rw"}
iscsi_driver = \
cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration)
iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0"
result = iscsi_driver._get_iscsi_properties(volume)
self.assertEqual("0.0.0.0:0000", result["target_portal"])
self.assertEqual("iqn:iqn", result["target_iqn"])
self.assertEqual(0, result["target_lun"])
def test_get_iscsi_properties_multiple_portals(self):
volume = {"provider_location": '1.1.1.1:3260;2.2.2.2:3261,1 iqn:iqn 0',
"id": "0",
"provider_auth": "a b c",
"attached_mode": "rw"}
iscsi_driver = \
cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration)
result = iscsi_driver._get_iscsi_properties(volume)
self.assertEqual("1.1.1.1:3260", result["target_portal"])
self.assertEqual("iqn:iqn", result["target_iqn"])
self.assertEqual(0, result["target_lun"])
self.assertEqual(["1.1.1.1:3260", "2.2.2.2:3261"],
result["target_portals"])
self.assertEqual(["iqn:iqn", "iqn:iqn"], result["target_iqns"])
self.assertEqual([0, 0], result["target_luns"])
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version',
return_value=(2, 2, 100))
def test_get_volume_stats(self, _mock_get_version):
def _fake_get_all_physical_volumes(obj, root_helper, vg_name):
return [{}]
@staticmethod
def _fake_get_all_volume_groups(root_helper, vg_name=None):
return [{'name': 'cinder-volumes',
'size': '5.52',
'available': '0.52',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
def _fake_get_volumes(obj, lv_name=None):
return [{'vg': 'fake_vg', 'name': 'fake_vol', 'size': '1000'}]
self.stubs.Set(brick_lvm.LVM,
'get_all_volume_groups',
_fake_get_all_volume_groups)
self.stubs.Set(brick_lvm.LVM,
'get_all_physical_volumes',
_fake_get_all_physical_volumes)
self.stubs.Set(brick_lvm.LVM,
'get_volumes',
_fake_get_volumes)
self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo')
self.volume.driver._update_volume_stats()
stats = self.volume.driver._stats
self.assertEqual(
float('5.52'), stats['pools'][0]['total_capacity_gb'])
self.assertEqual(
float('0.52'), stats['pools'][0]['free_capacity_gb'])
self.assertEqual(
float('5.0'), stats['pools'][0]['provisioned_capacity_gb'])
self.assertEqual(
int('1'), stats['pools'][0]['total_volumes'])
self.assertFalse(stats['sparse_copy_volume'])
# Check value of sparse_copy_volume for thin enabled case.
# This value is set in check_for_setup_error.
self.configuration = conf.Configuration(None)
self.configuration.lvm_type = 'thin'
vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db,
vg_obj=vg_obj)
lvm_driver.check_for_setup_error()
lvm_driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo')
lvm_driver._update_volume_stats()
stats = lvm_driver._stats
self.assertTrue(stats['sparse_copy_volume'])
def test_validate_connector(self):
iscsi_driver =\
cinder.volume.targets.tgt.TgtAdm(
configuration=self.configuration)
# Validate a valid connector
connector = {'ip': '10.0.0.2',
'host': 'fakehost',
'initiator': 'iqn.2012-07.org.fake:01'}
iscsi_driver.validate_connector(connector)
# Validate a connector without the initiator
connector = {'ip': '10.0.0.2', 'host': 'fakehost'}
self.assertRaises(exception.InvalidConnectorException,
iscsi_driver.validate_connector, connector)
class FibreChannelTestCase(DriverTestCase):
"""Test Case for FibreChannelDriver."""
driver_name = "cinder.volume.driver.FibreChannelDriver"
def test_initialize_connection(self):
self.assertRaises(NotImplementedError,
self.volume.driver.initialize_connection, {}, {})
def test_validate_connector(self):
"""validate_connector() successful use case.
validate_connector() does not throw an exception when
wwpns and wwnns are both set and both are not empty.
"""
connector = {'wwpns': ["not empty"],
'wwnns': ["not empty"]}
self.volume.driver.validate_connector(connector)
def test_validate_connector_no_wwpns(self):
"""validate_connector() throws exception when it has no wwpns."""
connector = {'wwnns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
def test_validate_connector_empty_wwpns(self):
"""validate_connector() throws exception when it has empty wwpns."""
connector = {'wwpns': [],
'wwnns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
def test_validate_connector_no_wwnns(self):
"""validate_connector() throws exception when it has no wwnns."""
connector = {'wwpns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
def test_validate_connector_empty_wwnns(self):
"""validate_connector() throws exception when it has empty wwnns."""
connector = {'wwnns': [],
'wwpns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
class VolumePolicyTestCase(test.TestCase):
def setUp(self):
super(VolumePolicyTestCase, self).setUp()
cinder.policy.init()
self.context = context.get_admin_context()
def test_check_policy(self):
self.mox.StubOutWithMock(cinder.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
cinder.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
cinder.volume.api.check_policy(self.context, 'attach')
def test_check_policy_with_target(self):
self.mox.StubOutWithMock(cinder.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'id': 2,
}
cinder.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
cinder.volume.api.check_policy(self.context, 'attach', {'id': 2})
class ImageVolumeCacheTestCase(BaseVolumeTestCase):
def setUp(self):
super(ImageVolumeCacheTestCase, self).setUp()
self.volume.driver.set_initialized()
@mock.patch('oslo_utils.importutils.import_object')
def test_cache_configs(self, mock_import_object):
opts = {
'image_volume_cache_enabled': True,
'image_volume_cache_max_size_gb': 100,
'image_volume_cache_max_count': 20
}
def conf_get(option):
if option in opts:
return opts[option]
else:
return None
mock_driver = mock.Mock()
mock_driver.configuration.safe_get.side_effect = conf_get
mock_driver.configuration.extra_capabilities = 'null'
def import_obj(*args, **kwargs):
return mock_driver
mock_import_object.side_effect = import_obj
manager = vol_manager.VolumeManager(volume_driver=mock_driver)
self.assertIsNotNone(manager)
self.assertIsNotNone(manager.image_volume_cache)
self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb)
self.assertEqual(20, manager.image_volume_cache.max_cache_size_count)
def test_delete_image_volume(self):
volume_params = {
'status': 'creating',
'host': 'some_host',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
volume.status = 'available'
volume.save()
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
db.image_volume_cache_create(self.context,
volume['host'],
image_id,
datetime.datetime.utcnow(),
volume['id'],
volume['size'])
volume_api.delete(self.context, volume)
entry = db.image_volume_cache_get_by_volume_id(self.context,
volume['id'])
self.assertIsNone(entry)
|
{
"content_hash": "3c143c652bef4ac0b53d39a47dd7d768",
"timestamp": "",
"source": "github",
"line_count": 7610,
"max_line_length": 81,
"avg_line_length": 45.40026281208936,
"alnum_prop": 0.5468399055271262,
"repo_name": "apporc/cinder",
"id": "08c9010b7424bf6e923ad8b073678a10f13ef75d",
"size": "346227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/test_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13595277"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
}
|
"""Support for EZVIZ camera."""
import logging
from pyezviz.client import EzvizClient
from pyezviz.exceptions import HTTPError, InvalidURL, PyEzvizError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_TYPE,
CONF_URL,
CONF_USERNAME,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from .const import (
ATTR_TYPE_CAMERA,
ATTR_TYPE_CLOUD,
CONF_FFMPEG_ARGUMENTS,
DATA_COORDINATOR,
DATA_UNDO_UPDATE_LISTENER,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_TIMEOUT,
DOMAIN,
)
from .coordinator import EzvizDataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.CAMERA,
Platform.SENSOR,
Platform.SWITCH,
]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up EZVIZ from a config entry."""
hass.data.setdefault(DOMAIN, {})
if not entry.options:
options = {
CONF_FFMPEG_ARGUMENTS: DEFAULT_FFMPEG_ARGUMENTS,
CONF_TIMEOUT: DEFAULT_TIMEOUT,
}
hass.config_entries.async_update_entry(entry, options=options)
if entry.data.get(CONF_TYPE) == ATTR_TYPE_CAMERA:
if hass.data.get(DOMAIN):
# Should only execute on addition of new camera entry.
# Fetch Entry id of main account and reload it.
for item in hass.config_entries.async_entries():
if item.data.get(CONF_TYPE) == ATTR_TYPE_CLOUD:
_LOGGER.info("Reload EZVIZ integration with new camera rtsp entry")
await hass.config_entries.async_reload(item.entry_id)
return True
try:
ezviz_client = await hass.async_add_executor_job(
_get_ezviz_client_instance, entry
)
except (InvalidURL, HTTPError, PyEzvizError) as error:
_LOGGER.error("Unable to connect to EZVIZ service: %s", str(error))
raise ConfigEntryNotReady from error
coordinator = EzvizDataUpdateCoordinator(
hass, api=ezviz_client, api_timeout=entry.options[CONF_TIMEOUT]
)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
DATA_UNDO_UPDATE_LISTENER: undo_listener,
}
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if entry.data.get(CONF_TYPE) == ATTR_TYPE_CAMERA:
return True
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id][DATA_UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
def _get_ezviz_client_instance(entry: ConfigEntry) -> EzvizClient:
"""Initialize a new instance of EzvizClientApi."""
ezviz_client = EzvizClient(
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
entry.data[CONF_URL],
entry.options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
ezviz_client.login()
return ezviz_client
|
{
"content_hash": "be6b8ea240b008c93af41cdeb744dd3c",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 87,
"avg_line_length": 30.45762711864407,
"alnum_prop": 0.6783528102392877,
"repo_name": "mezz64/home-assistant",
"id": "fbd49102f3c3875882fd70855b04f7f1bf9412fc",
"size": "3594",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ezviz/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
This example mirrors the 8-direction movement example here:
https://github.com/Mekire/meks-pygame-samples/blob/master/eight_dir_move.py
The difference is that this example uses delta time.
Delta time is a method of assuring that updates are unaffected by
changes in framerate.
"""
import os
import sys
import pygame as pg
CAPTION = "Delta Time"
SCREEN_SIZE = (500, 500)
TRANSPARENT = (0, 0, 0, 0)
BACKGROUND_COLOR = pg.Color("darkslategrey")
DIRECT_DICT = {pg.K_LEFT: (-1, 0),
pg.K_RIGHT: (1, 0),
pg.K_UP: (0, -1),
pg.K_DOWN: (0, 1)}
class Player(object):
"""This class will represent our user controlled character."""
SIZE = (100, 100)
def __init__(self, pos, speed):
"""
Aside from setting up our image and rect as seen previously,
in this example we create a new variable called true_pos.
Rects can only hold integers, so in order to preserve fractional
changes we need this new variable to hold the exact float position.
Without it, a body that moved slower than 1 pixel per frame would
never move.
"""
self.image = self.make_image()
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center) # Exact float position.
self.speed = speed # Speed in pixels per second.
def make_image(self):
"""
Create player image. No differences from previous.
"""
image = pg.Surface(Player.SIZE).convert_alpha()
image.fill(TRANSPARENT)
rect = image.get_rect()
pg.draw.ellipse(image, pg.Color("black"), rect)
pg.draw.ellipse(image, pg.Color("tomato"), rect.inflate(-12, -12))
return image
def update(self, keys, screen_rect, dt):
"""
Update must accept a new argument dt (time delta between frames).
Adjustments to position must be multiplied by this delta.
Set the rect to true_pos once adjusted (automatically converts to int).
"""
for key in DIRECT_DICT:
if keys[key]:
self.true_pos[0] += DIRECT_DICT[key][0] * self.speed * dt
self.true_pos[1] += DIRECT_DICT[key][1] * self.speed * dt
self.rect.center = self.true_pos
self.clamp(screen_rect)
def clamp(self, screen_rect):
"""
Clamp the rect to the screen if needed and reset true_pos to the
rect position so they don't lose sync.
"""
if not screen_rect.contains(self.rect):
self.rect.clamp_ip(screen_rect)
self.true_pos = list(self.rect.center)
def draw(self, surface):
"""
Basic draw function.
"""
surface.blit(self.image, self.rect)
class App(object):
"""
Class responsible for program control flow.
"""
def __init__(self):
self.screen = pg.display.get_surface()
self.screen_rect = self.screen.get_rect()
self.clock = pg.time.Clock()
self.fps = 60
self.done = False
self.keys = pg.key.get_pressed()
self.player = Player(self.screen_rect.center, 300)
def event_loop(self):
"""
Basic event loop.
"""
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type in (pg.KEYDOWN, pg.KEYUP):
self.keys = pg.key.get_pressed()
def update(self, dt):
"""
Update must acccept and pass dt to all elements that need to update.
"""
self.player.update(self.keys, self.screen_rect, dt)
def render(self):
"""
Render all needed elements and update the display.
"""
self.screen.fill(BACKGROUND_COLOR)
self.player.draw(self.screen)
pg.display.update()
def main_loop(self):
"""
We now use the return value of the call to self.clock.tick to
get the time delta between frames.
"""
dt = 0
self.clock.tick(self.fps)
while not self.done:
self.event_loop()
self.update(dt)
self.render()
dt = self.clock.tick(self.fps) / 1000.0
def main():
"""
Initialize; create an App; and start the main loop.
"""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.init()
pg.display.set_caption(CAPTION)
pg.display.set_mode(SCREEN_SIZE)
App().main_loop()
pg.quit()
sys.exit()
if __name__ == "__main__":
main()
|
{
"content_hash": "755537eb32a203df731180b78f075f08",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 30.58108108108108,
"alnum_prop": 0.58285461776403,
"repo_name": "imn00133/pythonSeminar17",
"id": "4fc4dccdb36ba4e6a7811567f2c8d24e9dfbcf34",
"size": "4548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonTeachingMaterials/python_issue_test/pygame/pygame_delta_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "249137"
}
],
"symlink_target": ""
}
|
from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import ManyToManyRel, RelatedField
from django.db.models.related import RelatedObject
from django.db.models.fields.related import add_lazy_relation
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import TaggedItem, GenericTaggedItemBase
from taggit.utils import require_instance_manager
try:
all
except NameError:
# 2.4 compat
try:
from django.utils.itercompat import all
except ImportError:
# 1.1.X compat
def all(iterable):
for item in iterable:
if not item:
return False
return True
class TaggableRel(ManyToManyRel):
def __init__(self):
self.related_name = None
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None
class TaggableManager(RelatedField):
help_text = _("A comma-separated list of tags.")
verbose_name = _("Tags")
def __init__(self, verbose_name=None,
help_text=None,
through=None,
blank=False,
transform_on_save=False):
self.transform_on_save = transform_on_save
self.through = through or TaggedItem
self.rel = TaggableRel()
self.verbose_name = verbose_name
if help_text is not None:
self.help_text = help_text
self.blank = blank
self.editable = True
self.unique = False
self.creates_table = False
self.db_column = None
self.choices = None
self.serialize = False
self.null = True
self.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = _TaggableManager(
through=self.through, model=model, instance=instance
)
return manager
def contribute_to_class(self, cls, name):
self.name = self.column = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.through, basestring):
def resolve_related_class(field, model, cls):
self.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def post_through_setup(self, cls):
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
self.rel.to = self.through._meta.get_field("tag").rel.to
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": self.verbose_name,
"help_text": self.help_text,
"required": not self.blank,
"transform_on_save": self.transform_on_save
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.module_name
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_reverse_name(self):
return self.through._meta.get_field_by_name("tag")[0].column
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos-2])
cts = map(ContentType.objects.get_for_model, _get_subclasses(self.model))
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def bulk_related_objects(self, new_objs, using):
return []
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance):
self.through = through
self.model = model
self.instance = instance
def get_query_set(self):
return self.through.tags_for(self.model, self.instance)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
name__in=str_tags
)
tag_objs.update(existing)
existing_names = set(t.name for t in existing)
existing_names_lower = set(t.name.lower() for t in existing)
for new_name in str_tags - existing_names:
if len(set([new_name.lower()]) - existing_names_lower) > 0:
tag_objs.add(self.through.tag_model().objects.create(name=new_name))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def set(self, *tags):
self.clear()
self.add(*tags)
@require_instance_manager
def remove(self, *tags):
self.through.objects.filter(**self._lookup_kwargs()).filter(
tag__name__in=tags).delete()
@require_instance_manager
def clear(self):
self.through.objects.filter(**self._lookup_kwargs()).delete()
def most_common(self):
return self.get_query_set().annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self, num=None, **filters):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*lookup_kwargs.keys())
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
subq = self.all()
qs = qs.filter(tag__in=list(subq))
qs = qs.order_by('-n')
if filters is not None:
qs = qs.filter(**filters)
if num is not None:
qs = qs[:num]
# TODO: This all feels like a bit of a hack.
items = {}
if len(lookup_keys) == 1:
# Can we do this without a second query by using a select_related()
# somehow?
f = self.through._meta.get_field_by_name(lookup_keys[0])[0]
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.iteritems():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
def _get_subclasses(model):
subclasses = [model]
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if (isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
|
{
"content_hash": "90b8c97defdd08e09bdefe8becf6b01d",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 86,
"avg_line_length": 34.52471482889734,
"alnum_prop": 0.5851321585903083,
"repo_name": "theatlantic/django-taggit2",
"id": "0dfd58423b84945133037ad1ea928e74498d1ba9",
"size": "9080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taggit/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "28303"
},
{
"name": "Python",
"bytes": "73639"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
def get_context(context):
return { "obj": frappe.get_doc("About Us Settings", "About Us Settings") }
|
{
"content_hash": "8baf8910528e8574383f4d82c35190a3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 75,
"avg_line_length": 31.4,
"alnum_prop": 0.7261146496815286,
"repo_name": "gangadhar-kadam/hrfrappe",
"id": "13554fe53ba7ef36848e82549b6a560fda0c6cd0",
"size": "262",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/templates/pages/about.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105798"
},
{
"name": "JavaScript",
"bytes": "1458963"
},
{
"name": "Python",
"bytes": "714974"
}
],
"symlink_target": ""
}
|
"""
Tools for managing a federated Carbon cluster.
"""
__version_info__ = ('1', '1', '10')
__version__ = '.'.join(__version_info__)
class CarbonateException(Exception):
pass
|
{
"content_hash": "f85fe4b39f292d6f477de0523b27b99a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 18.1,
"alnum_prop": 0.6187845303867403,
"repo_name": "jssjr/carbonate",
"id": "56e8fa8b17d1dec738220127b79a52632b0ab211",
"size": "181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "carbonate/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34760"
},
{
"name": "Ruby",
"bytes": "121"
},
{
"name": "Shell",
"bytes": "1262"
}
],
"symlink_target": ""
}
|
import pickle
import os
def read_pickle_ex(filepath):
with open(filepath, "rb") as fin:
return pickle.load(fin)
def read_pickle_withdefaults(filepath, default):
if os.path.exists(filepath):
return read_pickle_ex(filepath)
return default
def write_pickle_ex(obj, filepath):
with open(filepath, "wb") as fout:
pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)
def makedirs_ex(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def escape_quote(s):
return s.replace("'", "''")
|
{
"content_hash": "6dc97c7598825a2bac7d23f966de7cf0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 56,
"avg_line_length": 25.09090909090909,
"alnum_prop": 0.6721014492753623,
"repo_name": "5hun/imgov",
"id": "a68324a86954cfcc3a1697dab00769fcf7034cff",
"size": "576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11001"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
from celery.utils.log import get_task_logger
from sentry.models import Organization, OrganizationMember
from sentry.tasks.base import instrumented_task
logger = get_task_logger(__name__)
@instrumented_task(name='sentry.tasks.send_sso_link_emails', queue='auth')
def email_missing_links(organization_id, **kwargs):
try:
org = Organization.objects.get(id=organization_id)
except Organization.DoesNotExist:
logger.warning(
'Organization(id=%s) does not exist',
organization_id,
)
return
member_list = OrganizationMember.objects.filter(
organization=org,
flags=~getattr(OrganizationMember.flags, 'sso:linked'),
)
for member in member_list:
member.send_sso_link_email()
|
{
"content_hash": "de418eb7a29f79dcc295f5dd281ad15f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 30.59259259259259,
"alnum_prop": 0.6900726392251816,
"repo_name": "daevaorn/sentry",
"id": "6e708a13b931eb8d7a410532ee12090ca633a356",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/tasks/auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174905"
},
{
"name": "HTML",
"bytes": "200247"
},
{
"name": "JavaScript",
"bytes": "618375"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "8680827"
},
{
"name": "Shell",
"bytes": "746"
}
],
"symlink_target": ""
}
|
import bitcoin
from bitcoin import *
from bitcoin import hash160_to_p2sh, hash160_to_p2pkh
from util import print_error, profiler
import time
import sys
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import random
from keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return map(lambda x: None if x == NO_SIGNATURE else x, x_sig)
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except Exception:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bytes.encode('hex'))
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
if item[0] == chr(0):
redeemScript = item.encode('hex')
d['address'] = bitcoin.hash160_to_p2sh(bitcoin.hash_160(redeemScript.decode('hex')))
d['type'] = 'p2wpkh-p2sh'
d['redeemScript'] = redeemScript
d['x_pubkeys'] = ["(witness)"]
d['pubkeys'] = ["(witness)"]
d['signatures'] = ['(witness)']
d['num_sig'] = 1
else:
# payto_pubkey
d['type'] = 'p2pk'
d['address'] = "(pubkey)"
d['signatures'] = [item.encode('hex')]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
x_pubkey = decoded[1][1].encode('hex')
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
import traceback
traceback.print_exc(file=sys.stdout)
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_sig = [x[1].encode('hex') for x in decoded[1:-1]]
dec2 = [ x for x in script_GetOp(decoded[-1][1]) ]
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_pubkeys = map(lambda x: x[1].encode('hex'), dec2[1:-2])
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = multisig_script(pubkeys, m)
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = hash160_to_p2sh(hash_160(redeemScript.decode('hex')))
def get_address_from_output_script(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_PUBKEY, decoded[0][1].encode('hex')
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2pkh(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2sh(decoded[1][1])
return TYPE_SCRIPT, bytes
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['scriptSig'] = scriptSig.encode('hex')
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
else:
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
if scriptSig:
parse_scriptSig(d, scriptSig)
return d
def parse_witness(vds):
n = vds.read_compact_size()
for i in range(n):
x = vds.read_bytes(vds.read_compact_size())
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = scriptPubKey.encode('hex')
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(raw.decode('hex'))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
is_segwit = (n_vin == 0)
if is_segwit:
marker = vds.read_bytes(1)
assert marker == chr(1)
n_vin = vds.read_compact_size()
d['inputs'] = list(parse_input(vds) for i in xrange(n_vin))
n_vout = vds.read_compact_size()
d['outputs'] = list(parse_output(vds,i) for i in xrange(n_vout))
if is_segwit:
d['witness'] = list(parse_witness(vds) for i in xrange(n_vin))
d['lockTime'] = vds.read_uint32()
return d
# pay & redeem scripts
def push_script(x):
return op_push(len(x)/2) + x
def get_scriptPubKey(addr):
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == bitcoin.ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(hash_160.encode('hex'))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == bitcoin.ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(hash_160.encode('hex'))
script += '87' # op_equal
else:
raise BaseException('unknown address type')
return script
def segwit_script(pubkey):
pubkey = safe_parse_pubkey(pubkey)
pkh = hash_160(pubkey.decode('hex')).encode('hex')
return '00' + push_script(pkh)
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)/2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif type(raw) in [str, unicode]:
self.raw = raw.strip() if raw else None
elif type(raw) is dict:
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, raw):
"""Add new signatures to a transaction"""
d = deserialize(raw)
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sigs1 = txin.get('signatures')
sigs2 = d['inputs'][i].get('signatures')
for sig in sigs2:
if sig in sigs1:
continue
pre_hash = Hash(self.serialize_preimage(i).decode('hex'))
# der to string
order = ecdsa.ecdsa.generator_secp256k1.order()
r, s = ecdsa.util.sigdecode_der(sig.decode('hex')[:-1], order)
sig_string = ecdsa.util.sigencode_string(r, s, order)
compressed = True
for recid in range(4):
public_key = MyVerifyingKey.from_signature(sig_string, recid, pre_hash, curve = SECP256k1)
pubkey = point_to_ser(public_key.pubkey.point, compressed).encode('hex')
if pubkey in pubkeys:
public_key.verify_digest(sig_string, pre_hash, sigdecode = ecdsa.util.sigdecode_string)
j = pubkeys.index(pubkey)
print_error("adding sig", i, j, pubkey, sig)
self._inputs[i]['signatures'][j] = sig
self._inputs[i]['x_pubkeys'][j] = pubkey
break
# redo raw
self.raw = self.serialize()
def deserialize(self):
if self.raw is None:
return
#self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw)
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
return self
@classmethod
def pay_script(self, output_type, addr):
if output_type == TYPE_SCRIPT:
return addr.encode('hex')
elif output_type == TYPE_ADDRESS:
return get_scriptPubKey(addr)
else:
raise TypeError('Unknown output type')
return script
@classmethod
def get_siglist(self, txin, estimate_size=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
# we assume that signature will be 0x48 bytes long
pk_list = [ "00" * 0x21 ] * num_sig
sig_list = [ "00" * 0x48 ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def serialize_witness(self, txin):
pubkeys, sig_list = self.get_siglist(txin)
n = len(pubkeys) + len(sig_list)
return var_int(n) + ''.join(push_script(x) for x in sig_list) + ''.join(push_script(x) for x in pubkeys)
@classmethod
def is_segwit_input(self, txin):
return txin['type'] in ['p2wpkh-p2sh']
@classmethod
def input_script(self, txin, estimate_size=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type == 'p2wpkh-p2sh':
redeem_script = txin.get('redeemScript') or segwit_script(pubkeys[0])
return push_script(redeem_script)
elif _type == 'address':
script += push_script(pubkeys[0])
else:
raise TypeError('Unknown txin type', _type)
return script
@classmethod
def get_preimage_script(self, txin):
# only for non-segwit
if txin['type'] == 'p2pkh':
return get_scriptPubKey(txin['address'])
elif txin['type'] == 'p2sh':
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif txin['type'] == 'p2wpkh-p2sh':
pubkey = txin['pubkeys'][0]
pkh = bitcoin.hash_160(pubkey.decode('hex')).encode('hex')
return '76a9' + push_script(pkh) + '88ac'
else:
raise TypeError('Unknown txin type', _type)
@classmethod
def serialize_outpoint(self, txin):
return txin['prevout_hash'].decode('hex')[::-1].encode('hex') + int_to_hex(txin['prevout_n'], 4)
@classmethod
def serialize_input(self, txin, script):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)/2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff), 4)
return s
def set_rbf(self, rbf):
nSequence = 0xffffffff - (2 if rbf else 0)
for txin in self.inputs():
txin['sequence'] = nSequence
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(output_type, addr)
s += var_int(len(script)/2)
s += script
return s
def serialize_preimage(self, i):
nVersion = int_to_hex(1, 4)
nHashType = int_to_hex(1, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[i]
if self.is_segwit_input(txin):
hashPrevouts = Hash(''.join(self.serialize_outpoint(txin) for txin in inputs).decode('hex')).encode('hex')
hashSequence = Hash(''.join(int_to_hex(txin.get('sequence', 0xffffffff), 4) for txin in inputs).decode('hex')).encode('hex')
hashOutputs = Hash(''.join(self.serialize_output(o) for o in outputs).decode('hex')).encode('hex')
outpoint = self.serialize_outpoint(txin)
scriptCode = push_script(self.get_preimage_script(txin))
amount = int_to_hex(txin['value'], 8)
nSequence = int_to_hex(txin.get('sequence', 0xffffffff), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
else:
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.get_preimage_script(txin) if i==k else '') for k, txin in enumerate(inputs))
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
preimage = nVersion + txins + txouts + nLocktime + nHashType
return preimage
def is_segwit(self):
return any(self.is_segwit_input(x) for x in self.inputs())
def serialize(self, estimate_size=False, witness=True):
nVersion = int_to_hex(1, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size)) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
if witness and self.is_segwit():
marker = '00'
flag = '01'
witness = ''.join(self.serialize_witness(x) for x in inputs)
return nVersion + marker + flag + txins + txouts + witness + nLocktime
else:
return nVersion + txins + txouts + nLocktime
def hash(self):
print "warning: deprecated tx.hash()"
return self.txid()
def txid(self):
all_segwit = all(self.is_segwit_input(x) for x in self.inputs())
if not all_segwit and not self.is_complete():
return None
ser = self.serialize(witness=False)
return Hash(ser.decode('hex'))[::-1].encode('hex')
def wtxid(self):
ser = self.serialize(witness=True)
return Hash(ser.decode('hex'))[::-1].encode('hex')
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence', 0xffffffff) < 0xffffffff - 1 for x in self.inputs()])
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return len(self.serialize(True)) / 2 if not self.is_complete() or self.raw is None else len(self.raw) / 2 # ASCII hex string
@classmethod
def estimated_input_size(self, txin):
'''Return an estimated of serialized input size in bytes.'''
script = self.input_script(txin, True)
return len(self.serialize_input(txin, script)) / 2
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = filter(None, txin.get('signatures',[]))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def sign(self, keypairs):
for i, txin in enumerate(self.inputs()):
num = txin['num_sig']
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, x_pubkey in enumerate(x_pubkeys):
signatures = filter(None, txin['signatures'])
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
sec = keypairs.get(x_pubkey)
pubkey = public_key_from_private_key(sec)
# add signature
pre_hash = Hash(self.serialize_preimage(i).decode('hex'))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = bitcoin.MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
txin['signatures'][j] = sig.encode('hex') + '01'
txin['x_pubkeys'][j] = pubkey
self._inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs():
if type == TYPE_ADDRESS:
addr = x
elif type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(x.decode('hex'))
else:
addr = 'SCRIPT ' + x.encode('hex')
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
def requires_fee(self, wallet):
# see https://en.bitcoin.it/wiki/Transaction_fees
#
# size must be smaller than 1 kbyte for free tx
size = len(self.serialize(-1))/2
if size >= 49000:
return True
# all outputs must be 0.001 BTC or larger for free tx
for addr, value in self.get_outputs():
if value < 100000:
return True
# priority must be large enough for free tx
threshold = 768000000
weight = 0
for txin in self.inputs():
height, conf, timestamp = wallet.get_tx_height(txin["prevout_hash"])
weight += txin["value"] * conf
priority = weight / size
print_error(priority, threshold)
return priority < threshold
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
try:
txt.decode('hex')
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
|
{
"content_hash": "817d3c197e3b65925f2d05946b8142ec",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 159,
"avg_line_length": 36.114285714285714,
"alnum_prop": 0.5643037974683545,
"repo_name": "argentumproject/electrum-arg",
"id": "a7721148452e3577ada221cee1fb18255d893600",
"size": "32827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/transaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3869"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "NSIS",
"bytes": "7179"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Python",
"bytes": "1244527"
},
{
"name": "Shell",
"bytes": "7098"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 9011))
sock.listen(5)
while True:
connection,address = sock.accept()
try:
connection.settimeout(5)
buf = connection.recv(1024)
print(buf)
if buf == '1':
connection.send('welcome to server!')
else:
connection.send('please go out!')
except socket.timeout:
print 'time out'
connection.close()
|
{
"content_hash": "b9db805caee16be87c732ff6c4404259",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 60,
"avg_line_length": 31,
"alnum_prop": 0.5161290322580645,
"repo_name": "buaawp/pums",
"id": "a797e6f77a9a1dc9df89735d64f8f743d0f499cd",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_deprecated/demo_1/myserver2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18014"
},
{
"name": "HTML",
"bytes": "13186"
},
{
"name": "JavaScript",
"bytes": "6154"
},
{
"name": "Python",
"bytes": "22365"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
import re
import time
import pandas as pd
import pdb
chromedriver = "./chromedriver"
link = "https://www.tradingview.com/chart/?symbol={}".format("VIX")
opts = ChromeOptions()
opts.add_experimental_option("detach", True)
driver = webdriver.Chrome(chromedriver, chrome_options=opts)
driver.get(link)
driver.set_window_size(2500, 1500)
time.sleep(2)
def get_candles():
l = []
v = "group-2JyOhh7Z-"
q = driver.find_element_by_class_name(v)
for i in range(90, 2090, 2):
action = ActionChains(driver)
action.move_to_element_with_offset(q, i, 500)
action.perform()
olhc_legend = driver.find_element_by_xpath(
"//*[@class='pane-legend-item-value-container']"
)
vol = driver.find_element_by_xpath(
"//*[@class='pane-legend-item-value pane-legend-line']"
)
o = driver.execute_script(
"return window.TradingView.Linking._activeChartWidget._model"
".m_model.m_panes[0]._model.m_crossHairSource._timeAxisView"
".renderer()._data.text"
)
print(olhc_legend.text, vol.text, o)
l.append((olhc_legend.text, vol.text, o))
return l
def shift_candles(k):
print("iteration", k)
action = ActionChains(driver)
for _ in range(20):
action.send_keys(Keys.LEFT).perform()
shift_candles(1)
shift_candles(1)
shift_candles(1)
shift_candles(1)
def main():
ee = []
for k in range(7, 60):
l = get_candles()
e = [
{
"O": re.split("[A-Z]", r[0])[1],
"H": re.split("[A-Z]", r[0])[2],
"L": re.split("[A-Z]", r[0])[3],
"C": re.split(r"(C[0-9]+.[0-9]+)", r[0])[1].strip("C"),
"vol": r[1],
"date": r[2],
}
for r in l
]
df = pd.DataFrame(e)
df.drop_duplicates(subset="date", inplace=True)
shift_candles(k)
time.sleep(2)
df.to_csv("VIX_temp_{}.csv".format(k))
if __name__ == "__main__":
main()
|
{
"content_hash": "db3275c61be85aefa9e5bfbef6092372",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 25.955555555555556,
"alnum_prop": 0.5787671232876712,
"repo_name": "jamiemori/robinhood_technical_analysis",
"id": "b3fcfed4d460b568c2b5ac6df8ae50b1b1efec5d",
"size": "2336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1769186"
},
{
"name": "Python",
"bytes": "5322"
}
],
"symlink_target": ""
}
|
from telemetry.core import video
from telemetry.core import web_contents
DEFAULT_TAB_TIMEOUT = 60
class Tab(web_contents.WebContents):
"""Represents a tab in the browser
The important parts of the Tab object are in the runtime and page objects.
E.g.:
# Navigates the tab to a given url.
tab.Navigate('http://www.google.com/')
# Evaluates 1+1 in the tab's JavaScript context.
tab.Evaluate('1+1')
"""
def __init__(self, inspector_backend, tab_list_backend, browser):
super(Tab, self).__init__(inspector_backend)
self._tab_list_backend = tab_list_backend
self._browser = browser
@property
def browser(self):
"""The browser in which this tab resides."""
return self._browser
@property
def url(self):
return self._inspector_backend.url
@property
def dom_stats(self):
"""A dictionary populated with measured DOM statistics.
Currently this dictionary contains:
{
'document_count': integer,
'node_count': integer,
'event_listener_count': integer
}
"""
dom_counters = self._inspector_backend.GetDOMStats(
timeout=DEFAULT_TAB_TIMEOUT)
assert (len(dom_counters) == 3 and
all([x in dom_counters for x in ['document_count', 'node_count',
'event_listener_count']]))
return dom_counters
def Activate(self):
"""Brings this tab to the foreground asynchronously.
Not all browsers or browser versions support this method.
Be sure to check browser.supports_tab_control.
Please note: this is asynchronous. There is a delay between this call
and the page's documentVisibilityState becoming 'visible', and yet more
delay until the actual tab is visible to the user. None of these delays
are included in this call.
Raises:
devtools_http.DevToolsClientConnectionError
devtools_client_backend.TabNotFoundError
tab_list_backend.TabUnexpectedResponseException
"""
self._tab_list_backend.ActivateTab(self.id)
def Close(self):
"""Closes this tab.
Not all browsers or browser versions support this method.
Be sure to check browser.supports_tab_control.
Raises:
devtools_http.DevToolsClientConnectionError
devtools_client_backend.TabNotFoundError
tab_list_backend.TabUnexpectedResponseException
exceptions.TimeoutException
"""
self._tab_list_backend.CloseTab(self.id)
@property
def screenshot_supported(self):
"""True if the browser instance is capable of capturing screenshots."""
return self._inspector_backend.screenshot_supported
def Screenshot(self, timeout=DEFAULT_TAB_TIMEOUT):
"""Capture a screenshot of the tab's contents.
Returns:
A telemetry.core.Bitmap.
"""
return self._inspector_backend.Screenshot(timeout)
@property
def video_capture_supported(self):
"""True if the browser instance is capable of capturing video."""
return self.browser.platform.CanCaptureVideo()
def Highlight(self, color):
"""Synchronously highlights entire tab contents with the given RgbaColor.
TODO(tonyg): It is possible that the z-index hack here might not work for
all pages. If this happens, DevTools also provides a method for this.
"""
self.ExecuteJavaScript("""
(function() {
var screen = document.createElement('div');
screen.style.background = 'rgba(%d, %d, %d, %d)';
screen.style.position = 'fixed';
screen.style.top = '0';
screen.style.left = '0';
screen.style.width = '100%%';
screen.style.height = '100%%';
screen.style.zIndex = '2147483638';
document.body.appendChild(screen);
requestAnimationFrame(function() {
requestAnimationFrame(function() {
window.__telemetry_screen_%d = screen;
});
});
})();
""" % (color.r, color.g, color.b, color.a, int(color)))
self.WaitForJavaScriptExpression(
'!!window.__telemetry_screen_%d' % int(color), 5)
def ClearHighlight(self, color):
"""Clears a highlight of the given bitmap.RgbaColor."""
self.ExecuteJavaScript("""
(function() {
document.body.removeChild(window.__telemetry_screen_%d);
requestAnimationFrame(function() {
requestAnimationFrame(function() {
window.__telemetry_screen_%d = null;
console.time('__ClearHighlight.video_capture_start');
console.timeEnd('__ClearHighlight.video_capture_start');
});
});
})();
""" % (int(color), int(color)))
self.WaitForJavaScriptExpression(
'!window.__telemetry_screen_%d' % int(color), 5)
def StartVideoCapture(self, min_bitrate_mbps,
highlight_bitmap=video.HIGHLIGHT_ORANGE_FRAME):
"""Starts capturing video of the tab's contents.
This works by flashing the entire tab contents to a arbitrary color and then
starting video recording. When the frames are processed, we can look for
that flash as the content bounds.
Args:
min_bitrate_mbps: The minimum caputre bitrate in MegaBits Per Second.
The platform is free to deliver a higher bitrate if it can do so
without increasing overhead.
"""
self.Highlight(highlight_bitmap)
self.browser.platform.StartVideoCapture(min_bitrate_mbps)
self.ClearHighlight(highlight_bitmap)
@property
def is_video_capture_running(self):
return self.browser.platform.is_video_capture_running
def StopVideoCapture(self):
"""Stops recording video of the tab's contents.
This looks for the initial color flash in the first frame to establish the
tab content boundaries and then omits all frames displaying the flash.
Returns:
video: A video object which is a telemetry.core.Video
"""
return self.browser.platform.StopVideoCapture()
def GetCookieByName(self, name, timeout=DEFAULT_TAB_TIMEOUT):
"""Returns the value of the cookie by the given |name|."""
return self._inspector_backend.GetCookieByName(name, timeout)
def CollectGarbage(self):
self._inspector_backend.CollectGarbage()
def ClearCache(self, force):
"""Clears the browser's networking related disk, memory and other caches.
Args:
force: Iff true, navigates to about:blank which destroys the previous
renderer, ensuring that even "live" resources in the memory cache are
cleared.
"""
self.browser.platform.FlushDnsCache()
self.ExecuteJavaScript("""
if (window.chrome && chrome.benchmarking &&
chrome.benchmarking.clearCache) {
chrome.benchmarking.clearCache();
chrome.benchmarking.clearPredictorCache();
chrome.benchmarking.clearHostResolverCache();
}
""")
if force:
self.Navigate('about:blank')
|
{
"content_hash": "02e5895b98d64f4f89a228c4ba46fdf6",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 80,
"avg_line_length": 34.14427860696517,
"alnum_prop": 0.6698236922628588,
"repo_name": "sgraham/nope",
"id": "4dc251f7c1f418eb4a0d3502dbbe95f5bc56d43e",
"size": "7026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/tab.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "39967"
},
{
"name": "C",
"bytes": "4061434"
},
{
"name": "C++",
"bytes": "279546186"
},
{
"name": "CMake",
"bytes": "27212"
},
{
"name": "CSS",
"bytes": "919339"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "15989749"
},
{
"name": "Java",
"bytes": "7541683"
},
{
"name": "JavaScript",
"bytes": "32372588"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "40513"
},
{
"name": "Objective-C",
"bytes": "1584184"
},
{
"name": "Objective-C++",
"bytes": "8249988"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "169060"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427339"
},
{
"name": "Python",
"bytes": "8346306"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "844553"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import os
from pathlib import Path
from novelsCrawler.settings import DOWNLOADS
def main(novel_dir=DOWNLOADS, gbk_dir=None):
if gbk_dir is None or os.path.isdir(gbk_dir) is False:
gbk_dir = os.path.join(novel_dir, "gbk_novel")
if os.path.isdir(gbk_dir) is False:
os.mkdir(gbk_dir)
novel_dir = Path(novel_dir)
novel_list = novel_dir.glob("*.txt")
# novel_list = [str(novel) for novel in novel_list]
for novel in list(novel_list)[:1]:
new_novel_path = os.path.join(gbk_dir, novel.name)
with open(str(novel), 'r', encoding="utf-8") as fs, \
open(new_novel_path, 'w', encoding="gbk") as fd:
s = fs.read()
fd.write(s)
if __name__ == '__main__':
main()
|
{
"content_hash": "526f3a6084c68945549d5a2ab19d6752",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 64,
"avg_line_length": 31.625,
"alnum_prop": 0.5915678524374176,
"repo_name": "yytang2012/novels-crawler",
"id": "b99a4d602fa882a987d016ab76b776a007987870",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert_to_gbk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165709"
}
],
"symlink_target": ""
}
|
import math
"""
This prediction models need a minimum range of input values of 15-20 to be able to make future predictions.
"""
class Prediction_Models:
def __init__(self, logger):
self.logger = logger
def average(self, x):
assert len(x) > 0
return float(sum(x)) / len(x)
def correlation(self, x, y):
assert len(x) == len(y)
n = len(x)
assert n > 0
avg_x = self.average(x)
avg_y = self.average(y)
diffprod = 0
xdiff2 = 0
ydiff2 = 0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff * ydiff
xdiff2 += xdiff * xdiff
ydiff2 += ydiff * ydiff
return diffprod / math.sqrt(xdiff2 * ydiff2)
def linear_regression(self, php_resp_time, num_predictions):
forecast = []
return forecast
def vector_auto_regression(self, php_resp_time, cpu_load, num_predictions):
#year = 1921 # year to be mapped with the values
forecast = [] # predict array
return forecast
def linear_regression_ols(self, php_resp_time, num_predictions):
forecast = []
return forecast
def multiple_regression(self, php_resp_time, req_rate, cpu_usage, num_predictions):
forecast = []
return forecast
def auto_regression(self, php_resp_time, num_predictions):
forecast = []
return forecast
def arma (self,php_resp_time, num_predictions):
forecast = []
return forecast
def holtwinters(self, y, alpha, beta, gamma, c, debug=False):
"""
y - time series data.
alpha , beta, gamma - exponential smoothing coefficients
for level, trend, seasonal components.
c - extrapolated future data points.
4 quarterly
7 weekly.
12 monthly
The length of y must be a an integer multiple (> 2) of c.
"""
#Compute initial b and intercept using the first two complete c periods.
forecast = []
return forecast
def exponential_smoothing (self, php_resp_time, num_predictions ):
forecast = []
return forecast
|
{
"content_hash": "3012b77a23c5c8281ebb39ace5fcfee9",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 112,
"avg_line_length": 27.065934065934066,
"alnum_prop": 0.5245635403978888,
"repo_name": "ema/conpaas",
"id": "57c7e7fe9dc8f6a4a7f617f1c9d40b63e65d8d7c",
"size": "2464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conpaas-services/src/conpaas/services/webservers/manager/autoscaling/prediction/prediction_models_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "59192"
},
{
"name": "Java",
"bytes": "399657"
},
{
"name": "JavaScript",
"bytes": "108113"
},
{
"name": "PHP",
"bytes": "1824901"
},
{
"name": "Python",
"bytes": "2405080"
},
{
"name": "Shell",
"bytes": "157790"
}
],
"symlink_target": ""
}
|
import unittest, random, sys, time, json
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts
import h2o_kmeans, h2o_import as h2i, h2o_jobs as h2j
def define_params(SEED):
paramDict = {
# always do grid (see default below)..no destination key should be specified if grid?
# comma separated or range from:to:step
'k': ['2,3,4', '2,4'],
'initialization': ['None', 'PlusPlus', 'Furthest'],
# not used in Grid?
# 'cols': [None, "0", "3", "0,1,2,3,4,5,6"],
'max_iter': [1, 5, 10, 20], # FIX! comma separated or range from:to:step.
'seed': [None, 12345678, SEED],
'normalize': [None, 0, 1],
# 'destination_key:': "junk",
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=4)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeansGrid_params_rand2_fvec(self):
h2o.beta_features = True
if localhost:
csvFilenameList = [
# ('covtype.data', 60),
('covtype.data', 800),
]
else:
csvFilenameList = [
('covtype.data', 800),
]
importFolderPath = "standard"
for csvFilename, timeoutSecs in csvFilenameList:
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname,
timeoutSecs=2000, pollTimeoutSecs=60)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
paramDict = define_params(SEED)
for trial in range(3):
# default
destinationKey = csvFilename + "_" + str(trial) + '.hex'
params = {'k': '2,3', 'destination_key': destinationKey}
h2o_kmeans.pickRandKMeansParams(paramDict, params)
kwargs = params.copy()
start = time.time()
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, \
timeoutSecs=timeoutSecs, retryDelaySecs=2, pollTimeoutSecs=60, noPoll=True, **kwargs)
h2j.pollWaitJobs(timeoutSecs=timeoutSecs, pollTimeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "FIX! how do we get results..need redirect_url"
print "Have to inspect different models? (grid)"
print "kmeans grid end on ", csvPathname, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
# h2o_kmeans.simpleCheckKMeans(self, kmeans, **kwargs)
### print h2o.dump_json(kmeans)
# destination_key is ignored by kmeans...what are the keys for the results
# inspect = h2o_cmd.runInspect(None,key=destinationKey)
# print h2o.dump_json(inspect)
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "1c8c6478d8b6cb1ba5093ca4cb40f0b1",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 105,
"avg_line_length": 39.05494505494506,
"alnum_prop": 0.5469893078221723,
"repo_name": "woobe/h2o",
"id": "a464baf9e64f93f96da26a35eb23ddd826c47abb",
"size": "3554",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/testdir_multi_jvm/test_KMeansGrid_params_rand2_fvec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import httplib
import json
import logging
import socket
import urlparse
import uuid
LOG = logging.getLogger(__name__)
def log_curl_request(conn, base_url, url, method, kwargs):
curl = ['curl -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % (key, value)
curl.append(header)
if 'body' in kwargs:
curl.append('-d \'%s\'' % kwargs['body'])
curl.append('http://%s:%d%s%s' % (conn.host, conn.port, base_url, url))
LOG.debug(' '.join(curl))
def log_http_response(resp, body=None):
status = (resp.version / 10.0, resp.status, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.getheaders()])
dump.append('')
if body:
dump.extend([body, ''])
LOG.debug('\n'.join(dump))
def make_connection_url(base_url, url):
return '%s/%s' % (base_url.rstrip('/'), url.lstrip('/'))
def http_request(conn, base_url, url, method, **kwargs):
log_curl_request(conn, base_url, url, method, kwargs)
try:
conn_url = make_connection_url(base_url, url)
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
message = ('Error finding address for %(url)s: %(e)s' %
{'url': url, 'e': e})
raise RuntimeError(message)
except (socket.error, socket.timeout) as e:
message = ('Error communicating with %(endpoint)s %(e)s' %
{'endpoint': 'http://%s:%d' % (conn.host, conn.port),
'e': e})
raise RuntimeError(message)
if 300 <= resp.status < 600:
LOG.warn('Request returned failure/redirect status.')
raise RuntimeError('Status code %d returned' % resp.status)
body = resp.read()
log_http_response(resp, body)
return resp, body
def json_request(conn, base_url, url, method, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['body'] = json.dumps(kwargs['body'])
resp, body = http_request(conn, base_url, url, method, **kwargs)
content_type = resp.getheader('content-type', None)
if resp.status == 204 or resp.status == 205 or content_type is None:
body = None
elif 'application/json' in content_type:
try:
body = json.loads(body)
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
body = None
return resp, body
def create_resource_class(conn, base_url, name, service_type, racks, flavors):
return json_request(conn, base_url, '/resource_classes', 'POST',
body=dict(name=name, service_type=service_type,
racks=racks, flavors=flavors))
def create_rack(conn, base_url, name, slots, location,
subnet, capacities, nodes):
return json_request(conn, base_url, '/racks', 'POST',
body=dict(name=name, slots=slots,
subnet=subnet, location=location,
capacities=capacities,nodes=nodes))
def set_nodes_on_rack(conn, base_url, rack_url, nodes):
return json_request(conn, base_url, rack_url, 'PUT',
body=dict(nodes=nodes))
def create_flavor(conn, base_url, resource_class_url, flavor):
return json_request(conn, base_url + resource_class_url, '/flavors', 'PUT', body=flavor)
def get_location(base_url, resp):
return urlparse.urlparse(resp.getheader('location')).path[len(base_url):]
def generate_uuid():
return str(uuid.uuid4())
def generate_data():
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
conn = httplib.HTTPConnection('localhost', 6385)
base_url = '/v1'
capacities = [
dict(name='total_cpu', value='64', unit='count'),
dict(name='total_memory', value='16384', unit='MB'),
]
nodes = []
while len(nodes) < 30:
nodes.append(dict(id=generate_uuid()))
rack_resp1, rack_body1 = create_rack(conn, base_url,
name='compute_1', slots=3,
subnet='192.168.1.0/24',
location='room d2, row 1',
capacities=capacities,
nodes=nodes[0:3])
rack_resp2, rack_body2 = create_rack(conn, base_url,
name='compute_2', slots=3,
subnet='192.168.2.0/24',
location='room d2, row 2',
capacities=capacities,
nodes=nodes[3:6])
compute_racks = [
dict(id=rack_body1.get('id'),links=rack_body1.get('links')),
dict(id=rack_body2.get('id'),links=rack_body2.get('links')),
]
rack_resp3, rack_body3 = create_rack(conn, base_url,
name='not_compute', slots=3,
subnet='192.168.3.0/24',
location='room d2, row 3',
capacities=capacities,
nodes=[nodes[7]])
non_compute_racks = [
dict(id=rack_body3.get('id'),links=rack_body3.get('links')),
]
flavors = [
dict(name='m1.small',
capacities=[
dict(name='cpu', value='1', unit='count'),
dict(name='memory', value='1.7', unit='GB'),
dict(name='storage', value='160', unit='GB'),
]),
dict(name='m1.medium',
capacities=[
dict(name='cpu', value='1', unit='count'),
dict(name='memory', value='3.75', unit='GB'),
dict(name='storage', value='410', unit='GB'),
]),
dict(name='m1.large',
capacities=[
dict(name='cpu', value='2', unit='count'),
dict(name='memory', value='7.5', unit='GB'),
dict(name='storage', value='840', unit='GB'),
]),
dict(name='m1.xlarge',
capacities=[
dict(name='cpu', value='4', unit='count'),
dict(name='memory', value='15', unit='GB'),
dict(name='storage', value='1680', unit='GB'),
]),
]
rc_resp1, rc_body1 = create_resource_class(conn, base_url,
name='compute-rc',
service_type='compute',
racks=compute_racks,
flavors=[flavors[3]])
rc_resp1, rc_body1 = create_resource_class(conn, base_url,
name='non-compute-rc',
service_type='not_compute',
racks=non_compute_racks,
flavors=[])
if __name__ == '__main__':
generate_data()
|
{
"content_hash": "fccaedbaf2bcacbf07061d3256c93c34",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 92,
"avg_line_length": 35.291866028708135,
"alnum_prop": 0.49783080260303686,
"repo_name": "ccrouch/tuskar",
"id": "6298ca5ae66bc3dd2fbf73c00678412c9e35e1b7",
"size": "8189",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/sample_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Functions and classes for managing collision tests between multiple objects.
This module defines the :class:`WorldCollider` class, which makes it easy to
ignore various collision pairs in a WorldModel.
For groups of objects, the :meth:`self_collision_iter` and
:meth:`group_collision_iter` functions perform broad-phase collision detection
to speed up collision testing.
The :meth:`ray_cast` function is a convenient way to return the first point of
intersection for a ray and a group of objects.
"""
from ..robotsim import *
from ..math import vectorops,se3
from typing import Union,Optional,List,Tuple,Sequence,Callable,Iterator
from .typing import Vector,Vector3,Matrix3,RigidTransform
BBType = Tuple[Vector3,Vector3]
CollidableType = Union[RobotModel,RobotModelLink,RigidObjectModel,TerrainModel]
WorldBodyType = Union[RobotModelLink,RigidObjectModel,TerrainModel]
def bb_create(*ptlist: Vector3) -> BBType:
"""Creates a bounding box from an optional set of points. If no points
are provided, creates an empty bounding box."""
if len(ptlist) == 0:
return [float('inf')]*3,[float('-inf')]*3
else:
bmin,bmax = list(ptlist[0]),list(ptlist[0])
for i in range(1,len(ptlist)):
x = ptlist[i]
bmin = [min(a,b) for (a,b) in zip(bmin,x)]
bmax = [max(a,b) for (a,b) in zip(bmax,x)]
return bmin,bmax
def bb_empty(bb: BBType) -> bool:
"""Returns True if the bounding box is empty"""
return any((a > b) for (a,b) in zip(bb[0],bb[1]))
def bb_intersect(a: BBType, b: BBType) -> bool:
"""Returns true if the bounding boxes (a[0]->a[1]) and (b[0]->b[1]) intersect"""
amin,amax=a
bmin,bmax=b
return not any(q < u or v < p for (p,q,u,v) in zip(amin,amax,bmin,bmax))
def bb_contains(bb: BBType, x: Vector3) -> bool:
"""Returns true if x is inside the bounding box bb"""
return not any(v < p or v > q for (p,q,v) in zip(bb[0],bb[1],x))
def bb_intersection(*bbs: BBType):
"""Returns the bounding box representing the intersection the given bboxes.
The result may be empty."""
return [max(x) for x in zip(*[b[0] for b in bbs])],[min(x) for x in zip(*[b[1] for b in bbs])]
def bb_union(*bbs: BBType):
"""Returns the smallest bounding box containing the given bboxes"""
return [min(x) for x in zip(*[b[0] for b in bbs])],[max(x) for x in zip(*[b[1] for b in bbs])]
def self_collision_iter(
geomlist: Sequence[Geometry3D],
pairs: Union[str,Callable[[int,int],bool],List[Tuple[int,int]]] = 'all'
) -> Iterator[Tuple[int,int]]:
"""Performs efficient self collision testing for a list of geometries.
Args:
geomlist (list of Geometry3D): the list of geometries
pairs: can be:
* 'all': all pairs are tested.
* a function test(i,j) -> bool taking geometry indices and
returning true if they should be tested
* list of pairs (i,j) of collision indices.
Uses a quick bounding box reject test.
Returns:
Iterator over colliding pairs (i,j) where i and
j are indices into geomlist.
"""
#bblist = [g.getBB() for g in geomlist]
if pairs=='all':
for i,g in enumerate(geomlist):
for j in range(i+1,len(geomlist)):
#if not bb_intersect(bblist[i],bblist[j]): continue
g2 = geomlist[j]
if g.collides(g2):
yield (i,j)
elif callable(pairs):
for i,g in enumerate(geomlist):
for j in range(i+1,len(geomlist)):
if not pairs(i,j): continue
#if not bb_intersect(bblist[i],bblist[j]): continue
g2 = geomlist[j]
if g.collides(g2):
yield (i,j)
else:
for (i,j) in pairs:
#if not bb_intersect(bblist[i],bblist[j]): continue
g =geomlist[i]
g2 = geomlist[j]
if g.collides(g2):
yield (i,j)
return
def group_collision_iter(
geomlist1: Sequence[Geometry3D],
geomlist2: Sequence[Geometry3D],
pairs: Union[str,Callable[[int,int],bool],List[Tuple[int,int]]] = 'all'
) -> Iterator[Tuple[int,int]]:
"""Tests whether two sets of geometries collide.
Args:
geomlist1 (list of Geometry3D): set 1
geomlist2 (list of Geometry3D): set 2
pairs: can be:
* 'all': all pairs are tested.
* a function test(i,j) -> bool, taking geomlist1 index i and
geomlist2 index j, and returning true if they should be tested
* list of pairs (i,j) of collision indices.
Uses a quick bounding box reject test.
Returns:
Iterator over colliding pairs (i,j) where i is
an index into geomlist1 and j is an index into geomlist.
"""
if len(geomlist1) == 0 or len(geomlist2) == 0: return
bblist1 = [g.getBB() for g in geomlist1]
bblist2 = [g.getBB() for g in geomlist2]
bb1 = bb_union(*bblist1)
bb2 = bb_union(*bblist2)
geoms1 = [(i,g) for (i,g) in enumerate(geomlist1) if bb_intersect(bblist1[i],bb2)]
geoms2 = [(i,g) for (i,g) in enumerate(geomlist2) if bb_intersect(bblist2[i],bb1)]
if pairs=='all':
for i,g in geoms1:
for j,g2 in geoms2:
#if not bb_intersect(bblist1[i],bblist2[j]): continue
if g.collides(g2):
yield (i,j)
elif callable(pairs):
for i,g in geoms1:
for j,g2 in geoms2:
if not pairs(i,j): continue
#if not bb_intersect(bblist1[i],bblist2[j]): continue
if g.collides(g2):
yield (i,j)
else:
for (i,j) in pairs:
#if not bb_intersect(bblist1[i],bblist2[j]): continue
g = geomlist1[i]
g2 = geomlist2[j]
if g.collides(g2):
yield (i,j)
def group_subset_collision_iter(
geomlist: Sequence[Geometry3D],
alist: Sequence[int],
blist: Sequence[int],
pairs='all'
) -> Iterator[Tuple[int,int]]:
"""Tests whether two subsets of geometries collide. Can be slightly faster
than `group_collision_iter` if `alist` and `blist` overlap.
Args:
geomlist (list of Geometry3D): a list of all possible geometries
alist (list of int): collision set 1, containing indices into geomlist
blist (list of int): collision set 2, containing indices into geomlist
pairs: can be:
* 'all': all pairs are tested.
* a function test(i,j) -> bool, taking geomlist1 index i and
geomlist2 index j, and returning true if they should be tested
* list of pairs (i,j) of collision indices. In this case, `alist`
and `blist` are ignored and can be set to None.
Uses a quick bounding box reject test.
Returns:
Iterator over colliding pairs (i,j) where i is
an index into alist and j is an index into glist.
"""
if len(alist) == 0 or len(blist) == 0: return
bblist = [None]*len(geomlist)
for id in alist:
bblist[id] = geomlist[id].getBB()
for id in blist:
if bblist[id] is not None:
bblist[id] = geomlist[id].getBB()
bb1 = bb_union(*[bblist[i] for i in alist])
bb2 = bb_union(*[bblist[i] for i in blist])
geoms1 = [(i,geomlist[i]) for i in alist if bb_intersect(bblist[i],bb2)]
geoms2 = [(i,geomlist[i]) for i in blist if bb_intersect(bblist[i],bb1)]
if pairs=='all':
for i,g in geoms1:
for j,g2 in geoms2:
#if not bb_intersect(bblist[i],bblist[j]): continue
if g.collides(g2):
yield (i,j)
elif callable(pairs):
for i,g in geoms1:
for j,g2 in geoms2:
if not pairs(i,j): continue
#if not bb_intersect(bblist[i],bblist[j]): continue
if g.collides(g2):
yield (i,j)
else:
for (i,j) in pairs:
#if not bb_intersect(bblist[i],bblist[j]): continue
g =geomlist[i]
g2 = geomlist[j]
if g.collides(g2):
yield (i,j)
def ray_cast(
geomlist: Sequence[Geometry3D],
s: Vector3,
d: Vector3
) -> Tuple[int,Vector3]:
"""Finds the first collision among the geometries in geomlist with the
ray at source s and direction d.
Returns:
A pair (index,point) if a collision is found, where:
* index is the index of the geometry in geomlist
* point is the collision point in world coordinates.
Returns None if no collision is found.
"""
res = None
dmin = 1e300
for i,g in enumerate(geomlist):
(coll,pt) = g.rayCast(s,d)
if coll:
dist = vectorops.dot(d,vectorops.sub(pt,s))
if dist < dmin:
dmin,res = dist,(i,pt)
return res
class WorldCollider:
"""
Used in planning routines to mask out objects in the world to check /
ignore when doing collision detection.
You should not need to interact directly with this object's attributes.
Instead, use the methods provided.
Attributes:
geomList (list): a list of (object,geom) pairs for all non-empty objects
in the world.
mask (list of sets of ints): indicating which items are activated for
collision detection. Basically, a sparse, symmetric boolean matrix
over len(geomList)*len(geomList) possible collision pairs.
terrains (list of ints): contains the geomList indices of each terrain
in the world.
rigidObjects (list of ints): contains the geomList indices of each
object in the world
robots (list of list of ints): contains the geomList indices of each
robot in the world.
"""
def __init__(self, world: WorldModel, ignore=[]):
"""Args:
world (WorldModel): the world to use
ignore (list, optional): a list of items to pass to ignoreCollision
"""
world.enableInitCollisions(True)
self.world = world
#a list of (object,geom) pairs
self.geomList = []
#self collision mask (2-D array)
self.mask = []
#indexing lists
self.terrains = []
self.rigidObjects = []
self.robots = []
for i in range(world.numTerrains()):
t = world.terrain(i)
g = t.geometry()
if g != None and g.type()!="":
self.terrains.append(len(self.geomList))
self.geomList.append((t,g))
else:
self.terrains.append(-1)
for i in range(world.numRigidObjects()):
o = world.rigidObject(i)
g = o.geometry()
if g != None and g.type()!="":
self.rigidObjects.append(len(self.geomList))
self.geomList.append((o,g))
else:
self.rigidObjects.append(-1)
for i in range(world.numRobots()):
r = world.robot(i)
self.robots.append([])
for j in range(r.numLinks()):
l = r.link(j)
g = l.geometry()
if g != None and g.type()!="":
self.robots[-1].append(len(self.geomList))
self.geomList.append((l,g))
else:
self.robots[-1].append(-1)
#construct the collision mask
for i in range(len(self.geomList)):
self.mask.append(set())
for t in self.terrains:
if t < 0: continue
for o in self.rigidObjects:
if o < 0: continue
self.mask[t].add(o)
self.mask[o].add(t)
for r in self.robots:
for l in r:
if l < 0: continue
#test for fixed links
if self.geomList[l][0].getParent() >= 0:
self.mask[l].add(t)
self.mask[t].add(l)
else:
#print("Ignoring fixed link...")
pass
for o in self.rigidObjects:
if o < 0: continue
for o2 in self.rigidObjects[:o]:
if o2 < 0: continue
self.mask[o].add(o2)
self.mask[o2].add(o)
for r in self.robots:
for l in r:
if l < 0: continue
self.mask[l].add(o)
self.mask[o].add(l)
for i,r in enumerate(self.robots):
#robot - robot collision
for r2 in self.robots[0:i]:
for l1 in r:
for l2 in r2:
if l1 < 0 or l2 < 0: continue
self.mask[l1].add(l2)
self.mask[l2].add(l1)
#robot self-collision
rob = self.geomList[r[0]][0].robot()
nl = rob.numLinks()
for i in range(nl):
for j in range(i):
if rob.selfCollisionEnabled(i,j):
self.mask[r[i]].add(r[j])
self.mask[r[j]].add(r[i])
for i in ignore:
self.ignoreCollision(i)
def _getGeomIndex(self,object) -> int:
"""Finds the geomList index corresponding to an object
Returns:
The index into self.geomList describing the object
"""
assert isinstance(object,(RobotModel,RobotModelLink,RigidObjectModel,TerrainModel))
for i,(o,g) in enumerate(self.geomList):
if o.world==object.world and o.getID()==object.getID():
assert o.getName()==object.getName()
assert type(o) == type(object)
return i
return None
def ignoreCollision(self, ign: Union[WorldBodyType,Tuple[WorldBodyType,WorldBodyType]] ) -> None:
"""Permanently removes an object or a pair of objects from
consideration.
Args:
ign: either a single body (RobotModelLink, RigidObjectModel,
TerrainModel) in the world, or a pair of bodies. In the former
case all collisions with that body will be ignored.
"""
if hasattr(ign,'__iter__'):
(a,b) = ign
ageom = self._getGeomIndex(a)
bgeom = self._getGeomIndex(b)
if ageom is None or bgeom is None:
raise ValueError("Invalid ignore collision item, must be a pair of bodies in the world")
self.mask[ageom].discard(bgeom)
self.mask[bgeom].discard(ageom)
else:
#ignore all collisions with the given geometry
geom = self._getGeomIndex(ign)
if geom is None:
raise ValueError("Invalid ignore collision item, must be a body in the world")
for i in self.mask[geom]:
#remove it from the list
self.mask[i].discard(geom)
self.mask[geom]=set()
def isCollisionEnabled(self, obj_or_pair: Union[WorldBodyType,Tuple[CollidableType,WorldBodyType]] ) -> bool:
"""Returns true if the object or pair of objects are considered for
collision.
Args:
obj_or_pair: either a single body (RobotModelLink,
RigidObjectModel, TerrainModel) in the world, or a pair of
bodies. In the former case, True is returned if the body
is checked with anything.
"""
if hasattr(obj_or_pair,'__iter__'):
(a,b) = obj_or_pair
ageom = self._getGeomIndex(a)
bgeom = self._getGeomIndex(b)
if ageom is None or bgeom is None:
return False
return ageom in self.mask[bgeom] or bgeom in self.mask[ageom]
else:
geom = self._getGeomIndex(obj_or_pair)
if geom is None:
return False
return len(self.mask[geom]) > 0
def collisionTests(self,
filter1: Optional[Callable[[WorldBodyType],bool]] = None,
filter2: Optional[Callable[[WorldBodyType],bool]] = None,
bb_reject: bool = True
) -> Iterator[Tuple[Tuple[WorldBodyType,Geometry3D],Tuple[WorldBodyType,Geometry3D]]]:
"""Returns an iterator over potential colliding pairs, which
should be tested for collisions.
Usage:
To test collisions, you call::
for i,j in worldCollider.collisionTests():
if i[1].collides(j[1]):
print("Object",i[0].getName(),"collides with",j[0].getName())
(Note that for this purpose is easier to just call :meth:`collisions`;
however you may want to use `collisionTests` to perform other queries
like proximity detection.)
Args:
filter1 (function, optional): has form f(object) -> bool
filter2 (function, optional): has form f(object) -> bool
bb_reject (bool, optional): True if we should quick reject objects
whose bounding boxes are not overlapping (broad phase collision
detection). If false, all non-ignored collision pairs are
returned.
See :meth:`collisions` for an explanation of how filter1 and filter2
are interpreted
Returns:
Iterates over ``((object1,geom1),(object2,geom2))`` pairs indicating
which objects should be tested for collision. They have type:
- object1, object2: a RobotModelLink, RigidObjectModel, or
TerrainModel
- geom1, geom2: Geometry3D corresponding to those objects.
"""
res = []
if filter1 is None: #all pairs
if bb_reject: bblist = [g[1].getBB() for g in self.geomList]
for (i,(g,objs)) in enumerate(zip(self.geomList,self.mask)):
for objIndex in objs:
#already checked
if objIndex < i: continue
if bb_reject and not bb_intersect(bblist[i],bblist[objIndex]): continue
yield (g,self.geomList[objIndex])
elif filter2 is None: #self collision with objects passing filter1
if bb_reject:
#TODO: bounding box rejection, if requested
pass
for (i,(g,objs)) in enumerate(zip(self.geomList,self.mask)):
if not filter1(g[0]): continue
for objIndex in objs:
#already checked
if objIndex < i: continue
if not filter1(self.geomList[objIndex][0]): continue
yield (g,self.geomList[objIndex])
else: #checks everything
for (i,(g,objs)) in enumerate(zip(self.geomList,self.mask)):
f1 = filter1(g[0])
f2 = filter2(g[0])
for objIndex in objs:
#already checked
if self.geomList[objIndex][0]==g[0]:
continue
if f1 and filter2(self.geomList[objIndex][0]):
yield (g,self.geomList[objIndex])
elif f2 and filter1(self.geomList[objIndex][0]):
yield (self.geomList[objIndex],g)
def collisions(self,
filter1: Optional[Callable[[WorldBodyType],bool]] = None,
filter2: Optional[Callable[[WorldBodyType],bool]] = None,
) -> Iterator[Tuple[Tuple[WorldBodyType,Geometry3D],Tuple[WorldBodyType,Geometry3D]]]:
"""Returns an iterator over the colliding pairs of objects,
optionally that satisfies the filter(s).
Args:
filter1 (function, optional): has form f(object) -> bool
filter2 (function, optional): has form f(object) -> bool
filter1 and filter2 are predicates to allow subsets of objects
to collide. The argument can be a RobotModelLink, RigidObjectModel
or TerrainModel.
If neither filter1 nor filter2 are provided, then all pairs are
checked.
If filter1 is provided but filter2 is not, then objects in the set
filter1 will be collided against each other.
If filter1 and filter2 are provided, then objects that
satisfy filter1 will be collided against objects that satisfy
filter2. (Note: in this case there is no checking of duplicates,
i.e., the sets should be disjoint to avoid duplicating work).
"""
for (g0,g1) in self.collisionTests(filter1,filter2):
if g0[1].collides(g1[1]):
yield (g0[0],g1[0])
def robotSelfCollisions(self,
robot: Union[RobotModel,int,None] = None
) -> Iterator[Tuple[RobotModelLink,RobotModelLink]]:
"""Yields an iterator over robot self collisions.
Args:
robot (RobotModel or int, optional): If None (default), all
robots are tested. If an index or a RobotModel object only
collisions for that robot are tested
Returns:
Iterates over colliding
``(RobotModelLink,RobotModelLink)`` pairs.
"""
if isinstance(robot,RobotModel):
robot = robot.index
if robot is None:
#test all robots
for r in range(len(self.robots)):
for c in self.robotSelfCollisions(r):
yield c
return
rindices = self.robots[robot]
for i in rindices:
if i < 0: continue
for j in rindices:
if i < j: break
if j not in self.mask[i]: continue
if self.geomList[i][1].collides(self.geomList[j][1]):
yield (self.geomList[i][0],self.geomList[j][0])
def robotObjectCollisions(self,
robot: Union[RobotModel,int],
object: Union[RigidObjectModel,int,None] = None
) -> Iterator[Tuple[RobotModelLink,RigidObjectModel]]:
"""Yields an iterator over robot-object collision pairs.
Args:
robot (RobotModel or int): the robot to test
object (RigidObjectModel or int, optional): the object to
test, or None to all objects.
Returns:
Iterates over colliding
(RobotModelLink,RigidObjectModel) pairs.
"""
if isinstance(robot,RobotModel):
robot = robot.index
if isinstance(object,RigidObjectModel):
object = object.index
if object is None:
#test all objects
for o in range(len(self.rigidObjects)):
for c in self.robotObjectCollisions(robot,o):
yield c
return
rindices = self.robots[robot]
oindex = self.rigidObjects[object]
if oindex < 0: return
for i in rindices:
if i < 0: continue
if oindex not in self.mask[i]: continue
if self.geomList[oindex][1].collides(self.geomList[i][1]):
yield (self.geomList[i][0],self.geomList[oindex][0])
def robotTerrainCollisions(self,
robot: Union[RobotModel,int],
terrain: Union[TerrainModel,int,None] = None
) -> Iterator[Tuple[RobotModelLink,TerrainModel]]:
"""Yields an iterator over robot-terrain collision pairs.
Args:
robot (RobotModel or int): the robot to test
terrain (TerrainModel or int, optional): the terrain to
test, or None to all terrains.
Returns:
Iterates over colliding
(RobotModelLink,TerrainModel) pairs.
"""
if isinstance(robot,RobotModel):
robot = robot.index
if isinstance(terrain,TerrainModel):
terrain = terrain.index
if terrain is None:
#test all terrains
for t in range(len(self.terrains)):
for c in self.robotTerrainCollisions(robot,t):
yield c
return
rindices = self.robots[robot]
tindex = self.terrains[terrain]
if tindex < 0: return
for i in rindices:
if i < 0: continue
if tindex not in self.mask[i]: continue
if self.geomList[tindex][1].collides(self.geomList[i][1]):
yield (self.geomList[i][0],self.geomList[tindex][0])
def objectTerrainCollisions(self,
object: Union[RigidObjectModel,int],
terrain: Union[TerrainModel,int,None] = None
) -> Iterator[Tuple[RigidObjectModel,TerrainModel]]:
"""Yields an iterator over object-terrain collision pairs.
Args:
object (RigidObjectModel or int): the object to test
terrain (TerrainModel or int, optional): the terrain to
test, or None to all terrains.
Returns:
Iterates over colliding
(RigidObjectModel,TerrainModel) pairs.
"""
if isinstance(object,RigidObjectModel):
object = object.index
if isinstance(terrain,TerrainModel):
terrain = terrain.index
if terrain is None:
#test all terrains
for t in range(len(self.terrains)):
for c in self.objectTerrainCollisions(object,t):
yield c
return
oindex = self.rigidObjects[object]
tindex = self.terrains[terrain]
if oindex < 0: return
if tindex < 0: return
if tindex not in self.mask[oindex]: return
if self.geomList[oindex][1].collides(self.geomList[tindex][1]):
yield (self.geomList[oindex][0],self.geomList[tindex][0])
return
def objectObjectCollisions(self,
object: Union[RigidObjectModel,int],
object2: Union[RigidObjectModel,int,None]
) -> Iterator[Tuple[RigidObjectModel,RigidObjectModel]]:
"""Yields an iterator over object-terrain collision pairs.
Args:
object (RigidObjectModel or int): the object to test
object2 (RigidObjectModel or int, optional): the terrain to
test, or None to all objects.
Returns:
Iterates over colliding
(RigidObjectModel,TerrainModel) pairs.
"""
if isinstance(object,RigidObjectModel):
object = object.index
if isinstance(object2,RigidObjectModel):
object2 = object2.index
if object2 is None:
#test all terrains
for o in range(len(self.rigidObjects)):
for c in self.objectObjectCollisions(object):
yield c
return
oindex = self.rigidObjects[object]
oindex2 = self.rigidObjects[object2]
if oindex < 0: return
if oindex2 < 0: return
if oindex not in self.mask[oindex2]: return
if self.geomList[oindex][1].collides(self.geomList[oindex2][1]):
yield (self.geomList[oindex][0],self.geomList[oindex2][0])
return
def rayCast(self,
s: Vector3,
d: Vector3,
indices: Optional[List[int]]=None
) -> Union[None,Tuple[WorldBodyType,Vector3]]:
"""Finds the first collision between a ray and objects in the world.
Args:
s (list of 3 floats): the ray source
d (list of 3 floats): the ray direction
indices (list of ints, optional): if given, the indices of
geometries in geomList to test.
Returns:
The (object,point) pair or None if no collision is found.
"""
res = None
dmin = 1e300
geoms = (self.geomList if indices==None else [self.geomList[i] for i in indices])
for g in geoms:
(coll,pt) = g[1].rayCast(s,d)
if coll:
dist = vectorops.dot(d,vectorops.sub(pt,s))
if dist < dmin:
dmin,res = dist,(g[0],pt)
return res
def rayCastRobot(self,
robot: Union[RobotModel,int],
s: Vector3,
d: Vector3
) -> Union[None,Tuple[RobotModelLink,Vector3]]:
"""Finds the first collision between a ray and a robot.
Args:
robot (RobotModel or int): the robot
s (list of 3 floats): the ray source
d (list of 3 floats): the ray direction
Returns:
The (object,point) pair or None if no collision is found.
"""
if isinstance(robot,RobotModel):
try:
robot = [r for r in range(self.world.numRobots()) if self.world.robot(r).getID()==robot.getID()][0]
except IndexError:
raise RuntimeError("Robot "+robot.getName()+" is not found in the world!")
rindices = self.robots[robot]
return self.rayCast(s,d,rindices)
|
{
"content_hash": "b824b5995cb6d277ff3576893b85b9d6",
"timestamp": "",
"source": "github",
"line_count": 748,
"max_line_length": 115,
"avg_line_length": 39.01470588235294,
"alnum_prop": 0.5624507418702669,
"repo_name": "krishauser/Klampt",
"id": "d762fda0f2ff046c156cd6eae88764fbabe07383",
"size": "29183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/klampt/model/collide.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1578"
},
{
"name": "C",
"bytes": "114047"
},
{
"name": "C++",
"bytes": "9529503"
},
{
"name": "CMake",
"bytes": "53132"
},
{
"name": "CSS",
"bytes": "365"
},
{
"name": "Dockerfile",
"bytes": "514"
},
{
"name": "HTML",
"bytes": "39706"
},
{
"name": "JavaScript",
"bytes": "62209"
},
{
"name": "Makefile",
"bytes": "6997"
},
{
"name": "Python",
"bytes": "6060576"
},
{
"name": "QMake",
"bytes": "3587"
},
{
"name": "SWIG",
"bytes": "327289"
},
{
"name": "Shell",
"bytes": "279"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
from version import get_git_version
VERSION, SOURCE_LABEL = get_git_version()
PROJECT = 'dossier.web'
AUTHOR = 'Diffeo, Inc.'
AUTHOR_EMAIL = 'support@diffeo.com'
URL = 'http://github.com/dossier/dossier.web'
DESC = 'DossierStack web services'
def read_file(file_name):
file_path = os.path.join(
os.path.dirname(__file__),
file_name
)
return open(file_path).read()
setup(
name=PROJECT,
version=VERSION,
description=DESC,
license='MIT',
long_description=read_file('README.rst'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
packages=find_packages(),
namespace_packages=['dossier'],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
install_requires=[
'bottle >= 0.12',
'dblogger',
'dossier.fc >= 0.2.0',
'dossier.label >= 0.4.0',
'dossier.store >= 0.3.1',
'kvlayer >= 0.5.7',
'pytest',
'pytest-diffeo',
'nilsimsa >= 0.3.4',
'regex',
'uwsgi >= 2',
'yakonfig >= 0.7.2',
],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'dossier.web = dossier.web.run:main',
],
},
)
|
{
"content_hash": "16662aa6a15f833a8c7cf98400a19cf7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 49,
"avg_line_length": 23.35593220338983,
"alnum_prop": 0.5638606676342526,
"repo_name": "dossier/dossier.web",
"id": "c0c54ad22cf8f880d862140b146e3d3823fd5c66",
"size": "1401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1691"
},
{
"name": "JavaScript",
"bytes": "51566"
},
{
"name": "Python",
"bytes": "143399"
},
{
"name": "Shell",
"bytes": "334"
}
],
"symlink_target": ""
}
|
from ._globals import *
|
{
"content_hash": "98c63bebe3b212e68bcb977d85dbc82e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.7083333333333334,
"repo_name": "nak/pyllars",
"id": "66c5a35d75225f252c1a93a9e2045bba10339acb",
"size": "24",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pyllars/globals/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "162"
},
{
"name": "C++",
"bytes": "556054"
},
{
"name": "CMake",
"bytes": "6213"
},
{
"name": "Python",
"bytes": "137840"
}
],
"symlink_target": ""
}
|
from pretend import call_recorder, call
from static_precompiler.compilers import SCSS, SASS
from static_precompiler.exceptions import StaticCompilationError
from static_precompiler.utils import normalize_path, fix_line_breaks
import os
import pytest
def test_compile_file():
compiler = SCSS()
assert (
fix_line_breaks(compiler.compile_file("styles/test.scss")) ==
"p {\n font-size: 15px; }\n p a {\n color: red; }\n"
)
def test_compile_source():
compiler = SCSS(executable="scss")
assert (
fix_line_breaks(compiler.compile_source("p {font-size: 15px; a {color: red;}}")) ==
"p {\n font-size: 15px; }\n p a {\n color: red; }\n"
)
compiler = SCSS(executable="sass")
assert (
fix_line_breaks(compiler.compile_source("p {font-size: 15px; a {color: red;}}")) ==
"p {\n font-size: 15px; }\n p a {\n color: red; }\n"
)
with pytest.raises(StaticCompilationError):
compiler.compile_source('invalid syntax')
with pytest.raises(StaticCompilationError):
compiler.compile_source('invalid syntax')
# Test non-ascii
NON_ASCII = """@charset "UTF-8";
.external_link:first-child:before {
content: "Zobacz także:";
background: url(картинка.png); }
"""
assert fix_line_breaks(compiler.compile_source(NON_ASCII)) == NON_ASCII
compiler = SASS(executable="sass")
assert (
fix_line_breaks(compiler.compile_source("p\n font-size: 15px\n a\n color: red")) ==
"p {\n font-size: 15px; }\n p a {\n color: red; }\n"
)
compiler = SASS(executable="scss")
assert (
fix_line_breaks(compiler.compile_source("p\n font-size: 15px\n a\n color: red")) ==
"p {\n font-size: 15px; }\n p a {\n color: red; }\n"
)
def test_postprocesss(monkeypatch):
compiler = SCSS()
convert_urls = call_recorder(lambda *args: "spam")
monkeypatch.setattr("static_precompiler.compilers.scss.convert_urls", convert_urls)
assert compiler.postprocess("ham", "eggs") == "spam"
assert convert_urls.calls == [call("ham", "eggs")]
def test_parse_import_string():
compiler = SCSS()
import_string = """"foo, bar" , "foo", url(bar,baz),
'bar,foo',bar screen, projection"""
assert compiler.parse_import_string(import_string) == [
"bar",
"bar,foo",
"foo",
"foo, bar",
]
import_string = """"foo,bar", url(bar,baz), 'bar,foo',bar screen, projection"""
assert compiler.parse_import_string(import_string) == [
"bar",
"bar,foo",
"foo,bar",
]
import_string = """"foo" screen"""
assert compiler.parse_import_string(import_string) == ["foo"]
def test_find_imports():
source = """
@import "foo.css", ;
@import " ";
@import "foo.scss";
@import "foo";
@import "foo.css";
@import "foo" screen;
@import "http://foo.com/bar";
@import url(foo);
@import "rounded-corners",
"text-shadow";
@import "compass";
@import "compass.scss";
@import "compass/css3";
@import url(http://fonts.googleapis.com/css?family=Arvo:400,700,400italic,700italic);
@import url("http://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,400,700,600,300");
@import "foo,bar", url(bar,baz), 'bar,foo';
"""
expected = [
"bar,foo",
"compass",
"compass.scss",
"compass/css3",
"foo",
"foo,bar",
"foo.scss",
"rounded-corners",
"text-shadow",
]
compiler = SCSS(compass_enabled=False)
assert compiler.find_imports(source) == expected
compiler = SCSS(compass_enabled=True)
expected = [
"bar,foo",
"foo",
"foo,bar",
"foo.scss",
"rounded-corners",
"text-shadow",
]
assert compiler.find_imports(source) == expected
def test_locate_imported_file(monkeypatch):
compiler = SCSS()
root = os.path.dirname(__file__)
existing_files = set()
for f in ("A/B.scss", "A/_C.scss", "A/S.sass", "D.scss"):
existing_files.add(os.path.join(root, "static", normalize_path(f)))
monkeypatch.setattr("os.path.exists", lambda x: x in existing_files)
assert compiler.locate_imported_file("A", "B.scss") == "A/B.scss"
assert compiler.locate_imported_file("A", "C") == "A/_C.scss"
assert compiler.locate_imported_file("E", "../D") == "D.scss"
assert compiler.locate_imported_file("E", "../A/B.scss") == "A/B.scss"
assert compiler.locate_imported_file("", "D.scss") == "D.scss"
assert compiler.locate_imported_file("A", "S.sass") == "A/S.sass"
assert compiler.locate_imported_file("A", "S") == "A/S.sass"
with pytest.raises(StaticCompilationError):
compiler.locate_imported_file("", "Z.scss")
def test_find_dependencies(monkeypatch):
compiler = SCSS()
files = {
"A.scss": "@import 'B/C.scss';",
"B/C.scss": "@import '../E';",
"_E.scss": "p {color: red;}",
"compass-import.scss": '@import "compass"',
}
monkeypatch.setattr("static_precompiler.compilers.scss.SCSS.get_source", lambda self, x: files[x])
root = os.path.dirname(__file__)
existing_files = set()
for f in files:
existing_files.add(os.path.join(root, "static", normalize_path(f)))
monkeypatch.setattr("os.path.exists", lambda x: x in existing_files)
assert compiler.find_dependencies("A.scss") == ["B/C.scss", "_E.scss"]
assert compiler.find_dependencies("B/C.scss") == ["_E.scss"]
assert compiler.find_dependencies("_E.scss") == []
def test_compass():
compiler = SCSS(compass_enabled=True)
assert (
fix_line_breaks(compiler.compile_file("test-compass.scss")) ==
"p {\n background: url('/static/images/test.png'); }\n"
)
def test_compass_import():
compiler = SCSS(compass_enabled=True)
assert (
fix_line_breaks(compiler.compile_file("styles/test-compass-import.scss")) ==
".round-corners {\n"
" -moz-border-radius: 4px / 4px;\n"
" -webkit-border-radius: 4px 4px;\n"
" border-radius: 4px / 4px; }\n"
)
compiler = SCSS(compass_enabled=False)
with pytest.raises(StaticCompilationError):
compiler.compile_file("styles/test-compass-import.scss")
|
{
"content_hash": "bfce3fad39f05f55dd5621bdd08b4658",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 120,
"avg_line_length": 31.18407960199005,
"alnum_prop": 0.6094447989789407,
"repo_name": "paera/django-static-precompiler",
"id": "849b1953bcd31c676fc308eb0313b2b749fd32ec",
"size": "6293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "static_precompiler/tests/test_scss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "483"
},
{
"name": "CoffeeScript",
"bytes": "139"
},
{
"name": "JavaScript",
"bytes": "30"
},
{
"name": "Python",
"bytes": "94164"
},
{
"name": "Ruby",
"bytes": "147"
},
{
"name": "Shell",
"bytes": "1037"
}
],
"symlink_target": ""
}
|
'''Support code for reading the instruction database in insns.yml'''
import itertools
import os
import re
from typing import Dict, List, Optional, Tuple, cast
from serialize.parse_helpers import (check_keys, check_str, check_bool,
check_list, index_list, get_optional_str,
load_yaml)
from .encoding import Encoding
from .encoding_scheme import EncSchemes
from .information_flow import InsnInformationFlow
from .lsu_desc import LSUDesc
from .operand import Operand
from .syntax import InsnSyntax
class Insn:
def __init__(self,
yml: object,
encoding_schemes: Optional[EncSchemes]) -> None:
yd = check_keys(yml, 'instruction',
['mnemonic', 'operands'],
['group', 'rv32i', 'synopsis',
'syntax', 'doc', 'errs', 'note',
'encoding', 'glued-ops',
'literal-pseudo-op', 'python-pseudo-op', 'lsu',
'straight-line', 'iflow'])
self.mnemonic = check_str(yd['mnemonic'], 'mnemonic for instruction')
what = 'instruction with mnemonic {!r}'.format(self.mnemonic)
encoding_yml = yd.get('encoding')
self.encoding = None
if encoding_yml is not None:
if encoding_schemes is None:
raise ValueError('{} specifies an encoding, but the file '
'didn\'t specify any encoding schemes.'
.format(what))
self.encoding = Encoding(encoding_yml,
encoding_schemes, self.mnemonic)
self.operands = [Operand(y, self.mnemonic, self.encoding)
for y in check_list(yd['operands'],
'operands for ' + what)]
self.name_to_operand = index_list('operands for ' + what,
self.operands,
lambda op: op.name)
# The call to index_list has checked that operand names are distinct.
# We also need to check that no operand abbreviation clashes with
# anything else.
operand_names = set(self.name_to_operand.keys())
for op in self.operands:
if op.abbrev is not None:
if op.abbrev in operand_names:
raise ValueError('The name {!r} appears as an operand or '
'abbreviation more than once for '
'instruction {!r}.'
.format(op.abbrev, self.mnemonic))
operand_names.add(op.abbrev)
if self.encoding is not None:
# If we have an encoding, we passed it to the Operand constructors
# above. This ensured that each operand has a field. However, it's
# possible that there are some operand names the encoding mentions
# that don't actually have an operand. Check for that here.
missing_ops = (set(self.encoding.op_to_field_name.keys()) -
set(self.name_to_operand.keys()))
if missing_ops:
raise ValueError('Encoding scheme for {} specifies '
'some non-existent operands: {}.'
.format(what, ', '.join(list(missing_ops))))
self.rv32i = check_bool(yd.get('rv32i', False),
'rv32i flag for ' + what)
self.glued_ops = check_bool(yd.get('glued-ops', False),
'glued-ops flag for ' + what)
self.synopsis = get_optional_str(yd, 'synopsis', what)
self.doc = get_optional_str(yd, 'doc', what)
self.note = get_optional_str(yd, 'note', what)
self.errs = None
if 'errs' in yd:
errs_what = 'errs field for ' + what
y_errs = check_list(yd.get('errs'), errs_what)
self.errs = []
for idx, err_desc in enumerate(y_errs):
self.errs.append(check_str(err_desc,
'element {} of the {}'
.format(idx, errs_what)))
raw_syntax = get_optional_str(yd, 'syntax', what)
if raw_syntax is not None:
self.syntax = InsnSyntax.from_yaml(self.mnemonic,
raw_syntax.strip())
else:
self.syntax = InsnSyntax.from_list([op.name
for op in self.operands])
pattern, op_to_grp = self.syntax.asm_pattern()
self.asm_pattern = re.compile(pattern)
self.pattern_op_to_grp = op_to_grp
# Make sure we have exactly the operands we expect.
if set(self.name_to_operand.keys()) != self.syntax.op_set:
raise ValueError("Operand syntax for {!r} doesn't have the "
"same list of operands as given in the "
"operand list. The syntax uses {}, "
"but the list of operands gives {}."
.format(self.mnemonic,
list(sorted(self.syntax.op_set)),
list(sorted(self.name_to_operand))))
self.python_pseudo_op = check_bool(yd.get('python-pseudo-op', False),
'python-pseudo-op flag for ' + what)
if self.python_pseudo_op and self.encoding is not None:
raise ValueError('{} specifies an encoding and also sets '
'python-pseudo-op.'.format(what))
lpo = yd.get('literal-pseudo-op')
if lpo is None:
self.literal_pseudo_op = None
else:
lpo_lst = check_list(lpo, 'literal-pseudo-op flag for ' + what)
for idx, item in enumerate(lpo_lst):
if not isinstance(item, str):
raise ValueError('Item {} of literal-pseudo-op list for '
'{} is {!r}, which is not a string.'
.format(idx, what, item))
self.literal_pseudo_op = cast(Optional[List[str]], lpo_lst)
if self.python_pseudo_op:
raise ValueError('{} specifies both python-pseudo-op and '
'literal-pseudo-op.'
.format(what))
if self.encoding is not None:
raise ValueError('{} specifies both an encoding and '
'literal-pseudo-op.'
.format(what))
lsu_yaml = yd.get('lsu', None)
if lsu_yaml is None:
self.lsu = None
else:
self.lsu = LSUDesc.from_yaml(lsu_yaml,
'lsu field for {}'.format(what))
for idx, op_name in enumerate(self.lsu.target):
if op_name not in self.name_to_operand:
raise ValueError('element {} of the target for the lsu '
'field for {} is {!r}, which is not a '
'operand name of the instruction.'
.format(idx, what, op_name))
self.straight_line = yd.get('straight-line', True)
self.iflow = InsnInformationFlow.from_yaml(yd.get('iflow', None),
'iflow field for {}'.format(what), self.operands)
def enc_vals_to_op_vals(self,
cur_pc: int,
enc_vals: Dict[str, int]) -> Dict[str, int]:
'''Convert values extracted from an encoding to their logical values
This converts between "encoded values" and "operand values" (as defined
in the OperandType class).
The enc_vals dictionary should be keyed by the instruction's operand
names (guaranteed by Encoding.extract_operands). This function should
only be called when every operand has a width (which will definitely be
the case if we just decoded these values from an instruction word).
'''
op_vals = {}
for op_name, enc_val in enc_vals.items():
op_type = self.name_to_operand[op_name].op_type
op_val = op_type.enc_val_to_op_val(enc_val, cur_pc)
# This assertion should hold because OperandType.enc_val_to_op_val
# doesn't return None if the operand type has a width and the
# function is given a PC.
assert op_val is not None
op_vals[op_name] = op_val
return op_vals
def disassemble(self,
cur_pc: int,
op_vals: Dict[str, int]) -> str:
'''Return disassembly for this instruction
op_vals should be a dictionary mapping operand names to operand values
(not encoded values). mnem_width is the width to pad the mnemonic to.
'''
hunks = self.syntax.render(cur_pc, op_vals, self.name_to_operand)
mnem = self.mnemonic
if hunks and self.glued_ops:
mnem += hunks[0] + ' '
hunks = hunks[1:]
else:
mnem += ' '
if len(mnem) < 15:
mnem += ' ' * (15 - len(mnem))
# The lstrip here deals with a tricky corner case for instructions like
# bn.mulqacc if the .z option isn't supplied. In that case, the syntax
# for the operands starts with a space (following the optional .z that
# isn't there) and would mess up our alignment.
return mnem + ''.join(hunks).lstrip()
class DummyInsn(Insn):
'''A dummy instruction that will never be decoded.
This shouldn't appear in an InsnGroup or InsnsFile, but can be handy when
you have an object that wraps an instruction but need to easily handle the
case of a bogus encoding.
'''
def __init__(self) -> None:
fake_yml = {
'mnemonic': 'dummy-insn',
'operands': []
}
super().__init__(fake_yml, None)
class InsnGroup:
def __init__(self,
path: str,
encoding_schemes: Optional[EncSchemes],
yml: object) -> None:
yd = check_keys(yml, 'insn-group',
['key', 'title', 'doc', 'insns'], [])
self.key = check_str(yd['key'], 'insn-group key')
self.title = check_str(yd['title'], 'insn-group title')
self.doc = check_str(yd['doc'], 'insn-group doc')
insns_what = 'insns field for {!r} instruction group'.format(self.key)
insns_rel_path = check_str(yd['insns'], insns_what)
insns_path = os.path.normpath(os.path.join(os.path.dirname(path),
insns_rel_path))
insns_yaml = load_yaml(insns_path, insns_what)
try:
self.insns = [Insn(i, encoding_schemes)
for i in check_list(insns_yaml, insns_what)]
except ValueError as err:
raise RuntimeError('Invalid schema in YAML file at {!r}: {}'
.format(insns_path, err)) from None
class InsnGroups:
def __init__(self,
path: str,
encoding_schemes: Optional[EncSchemes],
yml: object) -> None:
self.groups = [InsnGroup(path, encoding_schemes, y)
for y in check_list(yml, 'insn-groups')]
if not self.groups:
raise ValueError('Empty list of instruction groups: '
'we need at least one as a base group.')
self.key_to_group = index_list('insn-groups',
self.groups, lambda ig: ig.key)
class InsnsFile:
def __init__(self, path: str, yml: object) -> None:
yd = check_keys(yml, 'top-level',
['insn-groups'],
['encoding-schemes'])
enc_scheme_path = get_optional_str(yd, 'encoding-schemes', 'top-level')
if enc_scheme_path is None:
self.encoding_schemes = None
else:
src_dir = os.path.dirname(path)
es_path = os.path.normpath(os.path.join(src_dir, enc_scheme_path))
es_yaml = load_yaml(es_path, 'encoding schemes')
try:
self.encoding_schemes = EncSchemes(es_yaml)
except ValueError as err:
raise RuntimeError('Invalid schema in YAML file at {!r}: {}'
.format(es_path, err)) from None
self.groups = InsnGroups(path,
self.encoding_schemes,
yd['insn-groups'])
# The instructions are grouped by instruction group and stored in
# self.groups. Most of the time, however, we just want "an OTBN
# instruction" and don't care about the group. Retrieve them here.
self.insns = []
for grp in self.groups.groups:
self.insns += grp.insns
self.mnemonic_to_insn = index_list('insns', self.insns,
lambda insn: insn.mnemonic.lower())
masks_exc, ambiguities = self._get_masks()
if ambiguities:
raise ValueError('Ambiguous instruction encodings: ' +
', '.join(ambiguities))
self._masks = masks_exc
def grouped_insns(self) -> List[Tuple[InsnGroup, List[Insn]]]:
'''Return the instructions in groups'''
return [(grp, grp.insns) for grp in self.groups.groups]
def _get_masks(self) -> Tuple[Dict[str, Tuple[int, int]], List[str]]:
'''Generate a list of zeros/ones masks and do ambiguity checks
Returns a pair (masks, ambiguities). Masks is keyed by instruction
mnemonic. Its elements are pairs (m0, m1) where m0 is the bits that are
always zero for this instruction's in the encoding and m1 is the bits
that are always one. (Bits that can be either are not set in m0 or m1).
ambiguities is a list of error messages describing ambiguities in the
encoding. Unless something has gone wrong, it should be empty.
'''
masks_inc = {}
masks_exc = {}
for insn in self.insns:
if insn.encoding is not None:
m0, m1 = insn.encoding.get_masks()
masks_inc[insn.mnemonic] = (m0, m1)
masks_exc[insn.mnemonic] = (m0 & ~m1, m1 & ~m0)
ambiguities = []
for mnem0, mnem1 in itertools.combinations(masks_inc.keys(), 2):
m00, m01 = masks_inc[mnem0]
m10, m11 = masks_inc[mnem1]
# The pair of instructions is ambiguous if a bit pattern might be
# either instruction. That happens if each bit index is either
# allowed to be a 0 in both or allowed to be a 1 in both.
# ambiguous_mask is the set of bits that don't distinguish the
# instructions from each other.
m0 = m00 & m10
m1 = m01 & m11
ambiguous_mask = m0 | m1
if ambiguous_mask == (1 << 32) - 1:
ambiguities.append('{!r} and {!r} '
'both match bit pattern {:#010x}'
.format(mnem0, mnem1, m1 & ~m0))
return (masks_exc, ambiguities)
def mnem_for_word(self, word: int) -> Optional[str]:
'''Find the instruction that could be encoded as word
If there is no such instruction, return None.
'''
ret = None
for mnem, (m0, m1) in self._masks.items():
# If any bit is set that should be zero or if any bit is clear that
# should be one, ignore this instruction.
if word & m0 or (~ word) & m1:
continue
# Belt-and-braces ambiguity check
assert ret is None
ret = mnem
return ret
def load_file(path: str) -> InsnsFile:
'''Load the YAML file at path.
Raises a RuntimeError on syntax or schema error.
'''
try:
return InsnsFile(path, load_yaml(path, None))
except ValueError as err:
raise RuntimeError('Invalid schema in YAML file at {!r}: {}'
.format(path, err)) from None
_DEFAULT_INSNS_FILE = None # type: Optional[InsnsFile]
def load_insns_yaml() -> InsnsFile:
'''Load the insns.yml file from its default location.
Caches its result. Raises a RuntimeError on syntax or schema error.
'''
global _DEFAULT_INSNS_FILE
if _DEFAULT_INSNS_FILE is None:
dirname = os.path.dirname(__file__)
rel_path = os.path.join('..', '..', 'data', 'insns.yml')
insns_yml = os.path.normpath(os.path.join(dirname, rel_path))
_DEFAULT_INSNS_FILE = load_file(insns_yml)
return _DEFAULT_INSNS_FILE
|
{
"content_hash": "0baed03b9265d6c1552ac39cb109c87b",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 79,
"avg_line_length": 42.15308641975309,
"alnum_prop": 0.5279990627928772,
"repo_name": "lowRISC/opentitan",
"id": "0645887c098492ddb8ea1b9592905412e4e21ef6",
"size": "17220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hw/ip/otbn/util/shared/insn_yaml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "516881"
},
{
"name": "C",
"bytes": "4864968"
},
{
"name": "C++",
"bytes": "1629214"
},
{
"name": "CSS",
"bytes": "3281"
},
{
"name": "Dockerfile",
"bytes": "6732"
},
{
"name": "Emacs Lisp",
"bytes": "411542"
},
{
"name": "HTML",
"bytes": "149270"
},
{
"name": "Makefile",
"bytes": "20646"
},
{
"name": "Python",
"bytes": "2576872"
},
{
"name": "Rust",
"bytes": "856480"
},
{
"name": "SCSS",
"bytes": "54700"
},
{
"name": "Shell",
"bytes": "119163"
},
{
"name": "Smarty",
"bytes": "771102"
},
{
"name": "Starlark",
"bytes": "688003"
},
{
"name": "Stata",
"bytes": "3676"
},
{
"name": "SystemVerilog",
"bytes": "14853322"
},
{
"name": "Tcl",
"bytes": "361936"
},
{
"name": "Verilog",
"bytes": "3296"
}
],
"symlink_target": ""
}
|
"""Tests for ceilometer/publish.py
"""
import datetime
from oslo.config import cfg
import eventlet
import mock
from ceilometer import sample
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import network_utils
from ceilometer.openstack.common import rpc as oslo_rpc
from ceilometer.publisher import rpc
from ceilometer.tests import base
class TestSignature(base.TestCase):
def test_compute_signature_change_key(self):
sig1 = rpc.compute_signature({'a': 'A', 'b': 'B'},
'not-so-secret')
sig2 = rpc.compute_signature({'A': 'A', 'b': 'B'},
'not-so-secret')
self.assertNotEqual(sig1, sig2)
def test_compute_signature_change_value(self):
sig1 = rpc.compute_signature({'a': 'A', 'b': 'B'},
'not-so-secret')
sig2 = rpc.compute_signature({'a': 'a', 'b': 'B'},
'not-so-secret')
self.assertNotEqual(sig1, sig2)
def test_compute_signature_same(self):
sig1 = rpc.compute_signature({'a': 'A', 'b': 'B'},
'not-so-secret')
sig2 = rpc.compute_signature({'a': 'A', 'b': 'B'},
'not-so-secret')
self.assertEqual(sig1, sig2)
def test_compute_signature_signed(self):
data = {'a': 'A', 'b': 'B'}
sig1 = rpc.compute_signature(data, 'not-so-secret')
data['message_signature'] = sig1
sig2 = rpc.compute_signature(data, 'not-so-secret')
self.assertEqual(sig1, sig2)
def test_compute_signature_use_configured_secret(self):
data = {'a': 'A', 'b': 'B'}
sig1 = rpc.compute_signature(data, 'not-so-secret')
sig2 = rpc.compute_signature(data, 'different-value')
self.assertNotEqual(sig1, sig2)
def test_verify_signature_signed(self):
data = {'a': 'A', 'b': 'B'}
sig1 = rpc.compute_signature(data, 'not-so-secret')
data['message_signature'] = sig1
self.assertTrue(rpc.verify_signature(data, 'not-so-secret'))
def test_verify_signature_unsigned(self):
data = {'a': 'A', 'b': 'B'}
self.assertFalse(rpc.verify_signature(data, 'not-so-secret'))
def test_verify_signature_incorrect(self):
data = {'a': 'A', 'b': 'B',
'message_signature': 'Not the same'}
self.assertFalse(rpc.verify_signature(data, 'not-so-secret'))
def test_verify_signature_nested(self):
data = {'a': 'A',
'b': 'B',
'nested': {'a': 'A',
'b': 'B',
},
}
data['message_signature'] = rpc.compute_signature(
data,
'not-so-secret')
self.assertTrue(rpc.verify_signature(data, 'not-so-secret'))
def test_verify_signature_nested_list_of_dict(self):
small = 1
big = 1 << 64
nested = {small: 99, big: 42}
data = {'a': 'A',
'b': 'B',
'nested': {'list': [nested]}}
data['message_signature'] = rpc.compute_signature(
data,
'not-so-secret')
# the keys 1 and 1<<64 cause a hash collision on 64bit platforms
data['nested']['list'] = [{big: 42, small: 99}]
self.assertTrue(rpc.verify_signature(data, 'not-so-secret'))
def test_verify_signature_nested_json(self):
data = {'a': 'A',
'b': 'B',
'nested': {'a': 'A',
'b': 'B',
'c': ('c',),
'd': ['d']
},
}
data['message_signature'] = rpc.compute_signature(
data,
'not-so-secret')
jsondata = jsonutils.loads(jsonutils.dumps(data))
self.assertTrue(rpc.verify_signature(jsondata, 'not-so-secret'))
class TestCounter(base.TestCase):
TEST_COUNTER = sample.Sample(name='name',
type='typ',
unit='',
volume=1,
user_id='user',
project_id='project',
resource_id=2,
timestamp='today',
resource_metadata={'key': 'value'},
source='rpc')
def test_meter_message_from_counter_signed(self):
msg = rpc.meter_message_from_counter(self.TEST_COUNTER,
'not-so-secret')
self.assertIn('message_signature', msg)
def test_meter_message_from_counter_field(self):
def compare(f, c, msg_f, msg):
self.assertEqual(msg, c)
msg = rpc.meter_message_from_counter(self.TEST_COUNTER,
'not-so-secret')
name_map = {'name': 'counter_name',
'type': 'counter_type',
'unit': 'counter_unit',
'volume': 'counter_volume'}
for f in self.TEST_COUNTER._fields:
msg_f = name_map.get(f, f)
yield compare, f, getattr(self.TEST_COUNTER, f), msg_f, msg[msg_f]
class TestPublish(base.TestCase):
test_data = [
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test3',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def faux_cast(self, context, topic, msg):
if self.rpc_unreachable:
#note(sileht): Ugly, but when rabbitmq is unreachable
# and rabbitmq_max_retries is not 0
# oslo.rpc do a sys.exit(1), so we do the same
# things here until this is fixed in oslo
raise SystemExit(1)
else:
self.published.append((topic, msg))
def setUp(self):
super(TestPublish, self).setUp()
self.published = []
self.rpc_unreachable = False
self.stubs.Set(oslo_rpc, 'cast', self.faux_cast)
def test_published(self):
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://'))
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 1)
self.assertEqual(self.published[0][0],
cfg.CONF.publisher_rpc.metering_topic)
self.assertIsInstance(self.published[0][1]['args']['data'], list)
self.assertEqual(self.published[0][1]['method'],
'record_metering_data')
def test_publish_target(self):
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?target=custom_procedure_call'))
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 1)
self.assertEqual(self.published[0][0],
cfg.CONF.publisher_rpc.metering_topic)
self.assertIsInstance(self.published[0][1]['args']['data'], list)
self.assertEqual(self.published[0][1]['method'],
'custom_procedure_call')
def test_published_with_per_meter_topic(self):
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?per_meter_topic=1'))
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 4)
for topic, rpc_call in self.published:
meters = rpc_call['args']['data']
self.assertIsInstance(meters, list)
if topic != cfg.CONF.publisher_rpc.metering_topic:
self.assertEqual(len(set(meter['counter_name']
for meter in meters)),
1,
"Meter are published grouped by name")
topics = [topic for topic, meter in self.published]
self.assertIn(cfg.CONF.publisher_rpc.metering_topic, topics)
self.assertIn(
cfg.CONF.publisher_rpc.metering_topic + '.' + 'test', topics)
self.assertIn(
cfg.CONF.publisher_rpc.metering_topic + '.' + 'test2', topics)
self.assertIn(
cfg.CONF.publisher_rpc.metering_topic + '.' + 'test3', topics)
def test_published_concurrency(self):
"""This test the concurrent access to the local queue
of the rpc publisher
"""
def faux_cast_go(context, topic, msg):
self.published.append((topic, msg))
def faux_cast_wait(context, topic, msg):
self.stubs.Set(oslo_rpc, 'cast', faux_cast_go)
# Sleep to simulate concurrency and allow other threads to work
eventlet.sleep(0)
self.published.append((topic, msg))
self.stubs.Set(oslo_rpc, 'cast', faux_cast_wait)
publisher = rpc.RPCPublisher(network_utils.urlsplit('rpc://'))
job1 = eventlet.spawn(publisher.publish_samples, None, self.test_data)
job2 = eventlet.spawn(publisher.publish_samples, None, self.test_data)
job1.wait()
job2.wait()
self.assertEqual(publisher.policy, 'default')
self.assertEqual(len(self.published), 2)
self.assertEqual(len(publisher.local_queue), 0)
@mock.patch('ceilometer.publisher.rpc.LOG')
def test_published_with_no_policy(self, mylog):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://'))
self.assertTrue(mylog.info.called)
self.assertRaises(
SystemExit,
publisher.publish_samples,
None, self.test_data)
self.assertEqual(publisher.policy, 'default')
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 0)
@mock.patch('ceilometer.publisher.rpc.LOG')
def test_published_with_policy_block(self, mylog):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?policy=default'))
self.assertTrue(mylog.info.called)
self.assertRaises(
SystemExit,
publisher.publish_samples,
None, self.test_data)
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 0)
@mock.patch('ceilometer.publisher.rpc.LOG')
def test_published_with_policy_incorrect(self, mylog):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?policy=notexist'))
self.assertRaises(
SystemExit,
publisher.publish_samples,
None, self.test_data)
self.assertTrue(mylog.warn.called)
self.assertEqual(publisher.policy, 'default')
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 0)
def test_published_with_policy_drop_and_rpc_down(self):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?policy=drop'))
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 0)
def test_published_with_policy_queue_and_rpc_down(self):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?policy=queue'))
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 1)
def test_published_with_policy_queue_and_rpc_down_up(self):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?policy=queue'))
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 1)
self.rpc_unreachable = False
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 2)
self.assertEqual(len(publisher.local_queue), 0)
def test_published_with_policy_sized_queue_and_rpc_down(self):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?policy=queue&max_queue_length=3'))
for i in range(0, 5):
for s in self.test_data:
s.source = 'test-%d' % i
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 3)
self.assertEqual(
publisher.local_queue[0][2]['args']['data'][0]['source'],
'test-2'
)
self.assertEqual(
publisher.local_queue[1][2]['args']['data'][0]['source'],
'test-3'
)
self.assertEqual(
publisher.local_queue[2][2]['args']['data'][0]['source'],
'test-4'
)
def test_published_with_policy_default_sized_queue_and_rpc_down(self):
self.rpc_unreachable = True
publisher = rpc.RPCPublisher(
network_utils.urlsplit('rpc://?policy=queue'))
for i in range(0, 2000):
for s in self.test_data:
s.source = 'test-%d' % i
publisher.publish_samples(None,
self.test_data)
self.assertEqual(len(self.published), 0)
self.assertEqual(len(publisher.local_queue), 1024)
self.assertEqual(
publisher.local_queue[0][2]['args']['data'][0]['source'],
'test-976'
)
self.assertEqual(
publisher.local_queue[1023][2]['args']['data'][0]['source'],
'test-1999'
)
|
{
"content_hash": "762b3e2501441b20b3f6cb2db8048085",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 78,
"avg_line_length": 38.34466019417476,
"alnum_prop": 0.5369034054943663,
"repo_name": "rickerc/ceilometer_audit",
"id": "febd947f9e9201a179e0c36939ee050678339504",
"size": "16529",
"binary": false,
"copies": "2",
"ref": "refs/heads/cis-havana-staging",
"path": "tests/publisher/test_rpc_publisher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "64962"
},
{
"name": "Python",
"bytes": "1810243"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
}
|
import re
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.forms import TextInput, Select, NumberInput
from django.forms.models import modelform_factory
from django_ajax.decorators import ajax
from manage.actions import delete_torrent
from manage.models import LocalStorage, RemoteStorage, StorageMap, Setting, Torrent
def _text_input_widget(placeholder=''):
return TextInput(attrs={'placeholder': placeholder,
'class': 'form-control'})
def _select_widget():
return Select(attrs={'class': 'selectpicker form-control'})
def _number_widget(min_value, max_value, step):
return NumberInput(attrs={'min': min_value,
'max': max_value,
'step': step,
'class': 'form-control'})
class MetaModel(object):
def __init__(self, model):
self.model = model
self._re1 = re.compile('(.)([A-Z][a-z]+)')
self._re2 = re.compile('([a-z0-9])([A-Z])')
def header(self):
tmp = self._re1.sub(r'\1 \2', self.model.__name__)
return self._re2.sub(r'\1 \2', tmp)
def header_list(self):
return self.header() + ":"
def header_add(self):
return "Add " + self.header().lower()
def header_edit(self):
return "Edit " + self.header().lower()
def header_delete(self):
return "Delete " + self.header().lower()
def url(self):
tmp = self._re1.sub(r'\1_\2', self.model.__name__)
return "manage:" + self._re2.sub(r'\1 \2', tmp).lower()
def url_add(self):
return self.url() + "_add"
def url_edit(self):
return self.url() + "_edit"
def url_delete(self):
return self.url() + "_delete"
def tab_id(self):
tmp = self._re1.sub(r'\1_\2', self.model.__name__)
return "id_" + self._re2.sub(r'\1_\2', tmp).lower()
def _view(request, model, tForm):
class ViewItem:
def __init__(self, url_edit, url_delete, data):
self.url_edit = url_edit
self.url_delete = url_delete
self.data = data
form = tForm()
meta_model = MetaModel(model)
items = []
for it in model.objects.all():
url_edit = reverse(meta_model.url_edit(), args=[it.id])
url_delete = reverse(meta_model.url_delete(), args=[it.id])
data = tuple(getattr(it, field) for field in form.fields.keys())
items.append(ViewItem(url_edit, url_delete, data))
request.session['tabid'] = meta_model.tab_id()
params = {'items': items,
'action_add': reverse(meta_model.url_add()),
'labels': [it.label for it in form.fields.values()],
'header': meta_model.header_list()}
return render(request, 'manage/setting_view.html', params)
def _process(request, action, id, model, widgets):
tForm = modelform_factory(model, widgets=widgets)
meta_model = MetaModel(model)
if action == 'list':
return _view(request, model, tForm)
if action == 'edit':
inst = get_object_or_404(model, pk=id)
action_url = reverse(meta_model.url_edit(), args=[id])
header = meta_model.header_edit()
action_btn = "Save"
elif action == 'delete':
inst = get_object_or_404(model, pk=id)
action_url = reverse(meta_model.url_delete(), args=[id])
header = meta_model.header_delete()
action_btn = "Delete"
else:
inst = model()
action_url = reverse(meta_model.url_add())
header = meta_model.header_add()
action_btn = "Save"
if request.method == 'POST':
url = reverse('manage:index')
if action == 'delete':
inst.delete()
return HttpResponseRedirect(url)
form = tForm(request.POST, instance=inst)
if form.is_valid():
form.save()
return HttpResponseRedirect(url)
else:
form = tForm(instance=inst)
if action == 'delete':
for it in form.fields.values():
it.widget.attrs['disabled'] = 'disabled'
params = {'form': form,
'action': action_url,
'header': header,
'action_btn': action_btn}
return render(request, 'manage/setting_form.html', params)
@ajax
def local_storage(request, action, id=None):
widgets = {
'name': _text_input_widget('Films'),
'path': _text_input_widget('Films')}
return _process(request, action, id, LocalStorage, widgets)
@ajax
def remote_storage(request, action, id=None):
widgets = {
'path': _text_input_widget('Media/Films')}
return _process(request, action, id, RemoteStorage, widgets)
@ajax
def storage_map(request, action, id=None):
widgets = {'local_ptr': _select_widget(),
'remote_ptr': _select_widget(),
'min_ratio': _number_widget(0.0, 2.0, 0.1)}
return _process(request, action, id, StorageMap, widgets)
@ajax
def setting(request, action, id=None):
widgets = {'name': _text_input_widget('ip'),
'value': _text_input_widget('192.168.1.1')}
return _process(request, action, id, Setting, widgets)
@ajax
def torrent(request, action, id):
widgets = {'storage_map_ptr': _select_widget(),
'name': _text_input_widget(),
'idhash': _text_input_widget()}
tForm = modelform_factory(Torrent, widgets=widgets)
inst = get_object_or_404(Torrent, pk=id)
if action == 'delete':
action_url = reverse('manage:torrent_delete', args=[id])
header = "Delete torrent and files"
action_btn = "Delete torrent and files"
else:
action_url = reverse('manage:torrent_delete_files', args=[id])
header = "Delete files"
action_btn = "Delete files"
if request.method == 'POST':
file_only = (action != 'delete')
delete_torrent(inst, file_only)
return HttpResponseRedirect(reverse('manage:index'))
else:
form = tForm(instance=inst)
for it in form.fields.values():
it.widget.attrs['disabled'] = 'disabled'
params = {'form': form,
'action': action_url,
'header': header,
'action_btn': action_btn}
return render(request, 'manage/setting_form.html', params)
|
{
"content_hash": "2f6f261a0bc56a6cd09f749335ba8349",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 83,
"avg_line_length": 32.03,
"alnum_prop": 0.5838276615672807,
"repo_name": "ReanGD/web-home-manage",
"id": "9bf83616c7b9f8f9a6af25808363223074a52472",
"size": "6431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "85370"
},
{
"name": "HTML",
"bytes": "79565"
},
{
"name": "JavaScript",
"bytes": "35100"
},
{
"name": "Python",
"bytes": "40034"
},
{
"name": "Shell",
"bytes": "153"
},
{
"name": "TypeScript",
"bytes": "84436"
}
],
"symlink_target": ""
}
|
from .controllers import candidates_import_from_master_server, candidates_import_from_sample_file, \
candidate_politician_match, retrieve_candidate_photos, retrieve_candidate_politician_match_options
from .models import CandidateCampaign, CandidateCampaignListManager, CandidateCampaignManager
from .serializers import CandidateCampaignSerializer
from admin_tools.views import redirect_to_sign_in_page
from office.models import ContestOffice, ContestOfficeManager
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from election.models import Election, ElectionManager
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception, print_to_log
from import_export_vote_smart.models import VoteSmartRatingOneCandidate
from import_export_vote_smart.votesmart_local import VotesmartApiError
from politician.models import PoliticianManager
from position.models import PositionEntered, PositionListManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, \
positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
class CandidatesSyncOutView(APIView):
def get(self, request, format=None):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
candidate_list = CandidateCampaign.objects.all()
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
serializer = CandidateCampaignSerializer(candidate_list, many=True)
return Response(serializer.data)
@login_required
def candidates_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = candidates_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Candidates import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def candidates_import_from_sample_file_view(request):
"""
This gives us sample organizations, candidate campaigns, and positions for testing
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# We are importing candidate_campaigns data (and not politician data) because all we are doing is making sure we
# sync to the same We Vote ID. This is critical so we can link Positions to Organization & Candidate Campaign.
# At this point (June 2015) we assume the politicians have been imported from Google Civic. We aren't assigning
# the politicians a We Vote id, but instead use their full name as the identifier
candidates_import_from_sample_file(request, False)
messages.add_message(request, messages.INFO, 'Candidates imported.')
return HttpResponseRedirect(reverse('import_export:import_export_index', args=()))
@login_required
def candidate_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
candidate_search = request.GET.get('candidate_search', '')
candidate_list = []
try:
candidate_list = CandidateCampaign.objects.all()
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
filters = []
if positive_value_exists(candidate_search):
new_filter = Q(candidate_name__icontains=candidate_search)
filters.append(new_filter)
new_filter = Q(candidate_twitter_handle__icontains=candidate_search)
filters.append(new_filter)
new_filter = Q(candidate_url__icontains=candidate_search)
filters.append(new_filter)
new_filter = Q(party__icontains=candidate_search)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=candidate_search)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
candidate_list = candidate_list.filter(final_filters)
candidate_list = candidate_list.order_by('candidate_name')[:200]
except CandidateCampaign.DoesNotExist:
# This is fine, create new
pass
election_list = Election.objects.order_by('-election_day_text')
template_values = {
'messages_on_stage': messages_on_stage,
'candidate_list': candidate_list,
'candidate_search': candidate_search,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'candidate/candidate_list.html', template_values)
@login_required
def candidate_new_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
contest_office_id = request.GET.get('contest_office_id', 0)
# These variables are here because there was an error on the edit_process_view and the voter needs to try again
candidate_name = request.GET.get('candidate_name', "")
google_civic_candidate_name = request.GET.get('google_civic_candidate_name', "")
state_code = request.GET.get('state_code', "")
candidate_twitter_handle = request.GET.get('candidate_twitter_handle', "")
candidate_url = request.GET.get('candidate_url', "")
party = request.GET.get('party', "")
ballot_guide_official_statement = request.GET.get('ballot_guide_official_statement', "")
vote_smart_id = request.GET.get('vote_smart_id', "")
maplight_id = request.GET.get('maplight_id', "")
politician_we_vote_id = request.GET.get('politician_we_vote_id', "")
# These are the Offices already entered for this election
try:
contest_office_list = ContestOffice.objects.order_by('office_name')
contest_office_list = contest_office_list.filter(google_civic_election_id=google_civic_election_id)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
contest_office_list = []
# Its helpful to see existing candidates when entering a new candidate
candidate_list = []
try:
candidate_list = CandidateCampaign.objects.all()
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(contest_office_id):
candidate_list = candidate_list.filter(contest_office_id=contest_office_id)
candidate_list = candidate_list.order_by('candidate_name')[:500]
except CandidateCampaign.DoesNotExist:
# This is fine, create new
pass
election_manager = ElectionManager()
election_results = election_manager.retrieve_election(google_civic_election_id)
state_code_from_election = ""
if election_results['election_found']:
election = election_results['election']
election_found = election_results['election_found']
state_code_from_election = election.get_election_state()
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'office_list': contest_office_list,
'contest_office_id': contest_office_id, # We need to always pass in separately for the template to work
'google_civic_election_id': google_civic_election_id,
'candidate_list': candidate_list,
'state_code_from_election': state_code_from_election,
# Incoming variables, not saved yet
'candidate_name': candidate_name,
'google_civic_candidate_name': google_civic_candidate_name,
'state_code': state_code,
'candidate_twitter_handle': candidate_twitter_handle,
'candidate_url': candidate_url,
'party': party,
'ballot_guide_official_statement': ballot_guide_official_statement,
'vote_smart_id': vote_smart_id,
'maplight_id': maplight_id,
'politician_we_vote_id': politician_we_vote_id,
}
return render(request, 'candidate/candidate_edit.html', template_values)
@login_required
def candidate_edit_view(request, candidate_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# These variables are here because there was an error on the edit_process_view and the voter needs to try again
candidate_name = request.GET.get('candidate_name', False)
google_civic_candidate_name = request.GET.get('google_civic_candidate_name', False)
candidate_twitter_handle = request.GET.get('candidate_twitter_handle', False)
candidate_url = request.GET.get('candidate_url', False)
party = request.GET.get('party', False)
ballot_guide_official_statement = request.GET.get('ballot_guide_official_statement', False)
vote_smart_id = request.GET.get('vote_smart_id', False)
maplight_id = request.GET.get('maplight_id', False)
messages_on_stage = get_messages(request)
candidate_id = convert_to_int(candidate_id)
candidate_on_stage_found = False
candidate_on_stage = CandidateCampaign()
contest_office_id = 0
google_civic_election_id = 0
try:
candidate_on_stage = CandidateCampaign.objects.get(id=candidate_id)
candidate_on_stage_found = True
contest_office_id = candidate_on_stage.contest_office_id
google_civic_election_id = candidate_on_stage.google_civic_election_id
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except CandidateCampaign.DoesNotExist:
# This is fine, create new below
pass
if candidate_on_stage_found:
# Working with Vote Smart data
try:
vote_smart_candidate_id = candidate_on_stage.vote_smart_id
rating_list_query = VoteSmartRatingOneCandidate.objects.order_by('-timeSpan') # Desc order
rating_list = rating_list_query.filter(candidateId=vote_smart_candidate_id)
except VotesmartApiError as error_instance:
# Catch the error message coming back from Vote Smart and pass it in the status
error_message = error_instance.args
status = "EXCEPTION_RAISED: {error_message}".format(error_message=error_message)
print_to_log(logger=logger, exception_message_optional=status)
rating_list = []
# Working with We Vote Positions
try:
candidate_position_list = PositionEntered.objects.order_by('stance')
candidate_position_list = candidate_position_list.filter(candidate_campaign_id=candidate_id)
# if positive_value_exists(google_civic_election_id):
# organization_position_list = candidate_position_list.filter(
# google_civic_election_id=google_civic_election_id)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
candidate_position_list = []
# Working with Offices for this election
try:
contest_office_list = ContestOffice.objects.order_by('office_name')
contest_office_list = contest_office_list.filter(
google_civic_election_id=candidate_on_stage.google_civic_election_id)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
contest_office_list = []
template_values = {
'messages_on_stage': messages_on_stage,
'candidate': candidate_on_stage,
'rating_list': rating_list,
'candidate_position_list': candidate_position_list,
'office_list': contest_office_list,
'contest_office_id': contest_office_id,
'google_civic_election_id': google_civic_election_id,
# Incoming variables, not saved yet
'candidate_name': candidate_name,
'google_civic_candidate_name': google_civic_candidate_name,
'candidate_twitter_handle': candidate_twitter_handle,
'candidate_url': candidate_url,
'party': party,
'ballot_guide_official_statement': ballot_guide_official_statement,
'vote_smart_id': vote_smart_id,
'maplight_id': maplight_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
# Incoming variables
'vote_smart_id': vote_smart_id,
}
return render(request, 'candidate/candidate_edit.html', template_values)
@login_required
def candidate_edit_process_view(request):
"""
Process the new or edit candidate forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
look_for_politician = request.POST.get('look_for_politician', False) # If this comes in with value, don't save
remove_duplicate_process = request.POST.get('remove_duplicate_process', False)
candidate_id = convert_to_int(request.POST['candidate_id'])
candidate_name = request.POST.get('candidate_name', False)
google_civic_candidate_name = request.POST.get('google_civic_candidate_name', False)
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
candidate_twitter_handle = request.POST.get('candidate_twitter_handle', False)
if positive_value_exists(candidate_twitter_handle):
candidate_twitter_handle = extract_twitter_handle_from_text_string(candidate_twitter_handle)
candidate_url = request.POST.get('candidate_url', False)
contest_office_id = request.POST.get('contest_office_id', False)
ballot_guide_official_statement = request.POST.get('ballot_guide_official_statement', False)
party = request.POST.get('party', False)
vote_smart_id = request.POST.get('vote_smart_id', False)
maplight_id = request.POST.get('maplight_id', False)
state_code = request.POST.get('state_code', False)
politician_we_vote_id = request.POST.get('politician_we_vote_id', False)
# Check to see if this candidate is already being used anywhere
candidate_on_stage_found = False
candidate_on_stage = CandidateCampaign()
if positive_value_exists(candidate_id):
try:
candidate_query = CandidateCampaign.objects.filter(id=candidate_id)
if len(candidate_query):
candidate_on_stage = candidate_query[0]
candidate_on_stage_found = True
except Exception as e:
pass
# If linked to a Politician, make sure that both politician_id and politician_we_vote_id exist
if candidate_on_stage_found:
if positive_value_exists(candidate_on_stage.politician_we_vote_id) \
and not positive_value_exists(candidate_on_stage.politician_id):
try:
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(0, candidate_on_stage.politician_we_vote_id)
if results['politician_found']:
politician = results['politician']
candidate_on_stage.politician_id = politician.id
candidate_on_stage.save()
pass
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not save candidate.')
contest_office_we_vote_id = ''
contest_office_name = ''
if positive_value_exists(contest_office_id):
contest_office_manager = ContestOfficeManager()
results = contest_office_manager.retrieve_contest_office_from_id(contest_office_id)
if results['contest_office_found']:
contest_office = results['contest_office']
contest_office_we_vote_id = contest_office.we_vote_id
contest_office_name = contest_office.office_name
election_manager = ElectionManager()
election_results = election_manager.retrieve_election(google_civic_election_id)
state_code_from_election = ""
if election_results['election_found']:
election = election_results['election']
election_found = election_results['election_found']
state_code_from_election = election.get_election_state()
best_state_code = state_code_from_election if positive_value_exists(state_code_from_election) \
else state_code
if positive_value_exists(look_for_politician):
# If here, we specifically want to see if a politician exists, given the information submitted
match_results = retrieve_candidate_politician_match_options(vote_smart_id, maplight_id,
candidate_twitter_handle,
candidate_name, best_state_code)
if match_results['politician_found']:
messages.add_message(request, messages.INFO, 'Politician found! Information filled into this form.')
matching_politician = match_results['politician']
politician_we_vote_id = matching_politician.we_vote_id
politician_twitter_handle = matching_politician.politician_twitter_handle \
if positive_value_exists(matching_politician.politician_twitter_handle) else ""
# If Twitter handle was entered in the Add new form, leave in place. Otherwise, pull from Politician entry.
candidate_twitter_handle = candidate_twitter_handle if candidate_twitter_handle \
else politician_twitter_handle
vote_smart_id = matching_politician.vote_smart_id
maplight_id = matching_politician.maplight_id if positive_value_exists(matching_politician.maplight_id) \
else ""
party = matching_politician.political_party
google_civic_candidate_name = matching_politician.google_civic_candidate_name
candidate_name = candidate_name if positive_value_exists(candidate_name) \
else matching_politician.politician_name
else:
messages.add_message(request, messages.INFO, 'No politician found. Please make sure you have entered '
'1) Candidate Name & State Code, '
'2) Twitter Handle, or '
'3) Vote Smart Id')
url_variables = "?google_civic_election_id=" + str(google_civic_election_id) + \
"&candidate_name=" + str(candidate_name) + \
"&state_code=" + str(state_code) + \
"&google_civic_candidate_name=" + str(google_civic_candidate_name) + \
"&contest_office_id=" + str(contest_office_id) + \
"&candidate_twitter_handle=" + str(candidate_twitter_handle) + \
"&candidate_url=" + str(candidate_url) + \
"&party=" + str(party) + \
"&ballot_guide_official_statement=" + str(ballot_guide_official_statement) + \
"&vote_smart_id=" + str(vote_smart_id) + \
"&politician_we_vote_id=" + str(politician_we_vote_id) + \
"&maplight_id=" + str(maplight_id)
if positive_value_exists(candidate_id):
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)) + url_variables)
else:
return HttpResponseRedirect(reverse('candidate:candidate_new', args=()) + url_variables)
# Check to see if there is a duplicate candidate already saved for this election
existing_candidate_found = False
if not positive_value_exists(candidate_id):
try:
filter_list = Q()
at_least_one_filter = False
if positive_value_exists(vote_smart_id):
at_least_one_filter = True
filter_list |= Q(vote_smart_id=vote_smart_id)
if positive_value_exists(maplight_id):
at_least_one_filter = True
filter_list |= Q(maplight_id=maplight_id)
if at_least_one_filter:
candidate_duplicates_query = CandidateCampaign.objects.filter(filter_list)
candidate_duplicates_query = candidate_duplicates_query.filter(
google_civic_election_id=google_civic_election_id)
if len(candidate_duplicates_query):
existing_candidate_found = True
except Exception as e:
pass
try:
if existing_candidate_found:
# We have found a duplicate for this election
messages.add_message(request, messages.ERROR, 'This candidate is already saved for this election.')
url_variables = "?google_civic_election_id=" + str(google_civic_election_id) + \
"&candidate_name=" + str(candidate_name) + \
"&state_code=" + str(state_code) + \
"&google_civic_candidate_name=" + str(google_civic_candidate_name) + \
"&contest_office_id=" + str(contest_office_id) + \
"&candidate_twitter_handle=" + str(candidate_twitter_handle) + \
"&candidate_url=" + str(candidate_url) + \
"&party=" + str(party) + \
"&ballot_guide_official_statement=" + str(ballot_guide_official_statement) + \
"&vote_smart_id=" + str(vote_smart_id) + \
"&politician_we_vote_id=" + str(politician_we_vote_id) + \
"&maplight_id=" + str(maplight_id)
return HttpResponseRedirect(reverse('candidate:candidate_new', args=()) + url_variables)
elif candidate_on_stage_found:
# Update
if candidate_twitter_handle is not False:
candidate_on_stage.candidate_twitter_handle = candidate_twitter_handle
if candidate_url is not False:
candidate_on_stage.candidate_url = candidate_url
if ballot_guide_official_statement is not False:
candidate_on_stage.ballot_guide_official_statement = ballot_guide_official_statement
if party is not False:
candidate_on_stage.party = party
# Check to see if this is a We Vote-created election
# is_we_vote_google_civic_election_id = True \
# if convert_to_int(candidate_on_stage.google_civic_election_id) >= 1000000 \
# else False
if contest_office_id is not False:
# We only allow updating of candidates within the We Vote Admin in
candidate_on_stage.contest_office_id = contest_office_id
candidate_on_stage.contest_office_we_vote_id = contest_office_we_vote_id
candidate_on_stage.contest_office_name = contest_office_name
candidate_on_stage.save()
# Now refresh the cache entries for this candidate
messages.add_message(request, messages.INFO, 'Candidate Campaign updated.')
else:
# Create new
# election must be found
if not election_found:
messages.add_message(request, messages.ERROR, 'Could not find election -- required to save candidate.')
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
required_candidate_variables = True \
if positive_value_exists(candidate_name) and positive_value_exists(contest_office_id) \
else False
if required_candidate_variables:
candidate_on_stage = CandidateCampaign(
candidate_name=candidate_name,
google_civic_election_id=google_civic_election_id,
contest_office_id=contest_office_id,
contest_office_we_vote_id=contest_office_we_vote_id,
state_code=best_state_code,
)
if google_civic_candidate_name is not False:
candidate_on_stage.google_civic_candidate_name = google_civic_candidate_name
if candidate_twitter_handle is not False:
candidate_on_stage.candidate_twitter_handle = candidate_twitter_handle
if candidate_url is not False:
candidate_on_stage.candidate_url = candidate_url
if party is not False:
candidate_on_stage.party = party
if ballot_guide_official_statement is not False:
candidate_on_stage.ballot_guide_official_statement = ballot_guide_official_statement
if vote_smart_id is not False:
candidate_on_stage.vote_smart_id = vote_smart_id
if maplight_id is not False:
candidate_on_stage.maplight_id = maplight_id
if politician_we_vote_id is not False:
candidate_on_stage.politician_we_vote_id = politician_we_vote_id
candidate_on_stage.save()
candidate_id = candidate_on_stage.id
messages.add_message(request, messages.INFO, 'New candidate saved.')
else:
# messages.add_message(request, messages.INFO, 'Could not save -- missing required variables.')
url_variables = "?google_civic_election_id=" + str(google_civic_election_id) + \
"&candidate_name=" + str(candidate_name) + \
"&state_code=" + str(state_code) + \
"&google_civic_candidate_name=" + str(google_civic_candidate_name) + \
"&contest_office_id=" + str(contest_office_id) + \
"&candidate_twitter_handle=" + str(candidate_twitter_handle) + \
"&candidate_url=" + str(candidate_url) + \
"&party=" + str(party) + \
"&ballot_guide_official_statement=" + str(ballot_guide_official_statement) + \
"&vote_smart_id=" + str(vote_smart_id) + \
"&politician_we_vote_id=" + str(politician_we_vote_id) + \
"&maplight_id=" + str(maplight_id)
if positive_value_exists(candidate_id):
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)) +
url_variables)
else:
return HttpResponseRedirect(reverse('candidate:candidate_new', args=()) +
url_variables)
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not save candidate.')
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
if remove_duplicate_process:
return HttpResponseRedirect(reverse('candidate:find_and_remove_duplicate_candidates', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
else:
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
@login_required
def candidate_politician_match_view(request):
"""
Try to match the current candidate to an existing politician entry. If a politician entry isn't found,
create an entry.
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_id = request.GET.get('candidate_id', 0)
candidate_id = convert_to_int(candidate_id)
# google_civic_election_id is included for interface usability reasons and isn't used in the processing
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
if not positive_value_exists(candidate_id):
messages.add_message(request, messages.ERROR, "The candidate_id variable was not passed in.")
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_id(candidate_id)
if not positive_value_exists(results['candidate_campaign_found']):
messages.add_message(request, messages.ERROR,
"Candidate '{candidate_id}' not found.".format(candidate_id=candidate_id))
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
we_vote_candidate = results['candidate_campaign']
# Make sure we have a politician for this candidate. If we don't, create a politician entry, and save the
# politician_we_vote_id in the candidate
results = candidate_politician_match(we_vote_candidate)
display_messages = True
if results['status'] and display_messages:
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def candidate_politician_match_for_this_election_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_list = []
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
# We only want to process if a google_civic_election_id comes in
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, "Google Civic Election ID required.")
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()))
try:
candidate_list = CandidateCampaign.objects.order_by('candidate_name')
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
except CandidateCampaign.DoesNotExist:
messages.add_message(request, messages.INFO, "No candidates found for this election: {id}.".format(
id=google_civic_election_id))
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) + "?google_civic_election_id={var}"
"".format(
var=google_civic_election_id))
num_candidates_reviewed = 0
num_that_already_have_politician_we_vote_id = 0
new_politician_created = 0
existing_politician_found = 0
multiple_politicians_found = 0
other_results = 0
message = "About to loop through all of the candidates in this election to make sure we have a politician record."
print_to_log(logger, exception_message_optional=message)
# Loop through all of the candidates in this election
for we_vote_candidate in candidate_list:
num_candidates_reviewed += 1
match_results = candidate_politician_match(we_vote_candidate)
if we_vote_candidate.politician_we_vote_id:
num_that_already_have_politician_we_vote_id += 1
elif match_results['politician_created']:
new_politician_created += 1
elif match_results['politician_found']:
existing_politician_found += 1
elif match_results['politician_list_found']:
multiple_politicians_found += 1
else:
other_results += 1
message = "Google Civic Election ID: {election_id}, " \
"{num_candidates_reviewed} candidates reviewed, " \
"{num_that_already_have_politician_we_vote_id} Candidates that already have Politician Ids, " \
"{new_politician_created} politicians just created, " \
"{existing_politician_found} politicians found that already exist, " \
"{multiple_politicians_found} times we found multiple politicians and could not link, " \
"{other_results} other results". \
format(election_id=google_civic_election_id,
num_candidates_reviewed=num_candidates_reviewed,
num_that_already_have_politician_we_vote_id=num_that_already_have_politician_we_vote_id,
new_politician_created=new_politician_created,
existing_politician_found=existing_politician_found,
multiple_politicians_found=multiple_politicians_found,
other_results=other_results)
print_to_log(logger, exception_message_optional=message)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) + "?google_civic_election_id={var}".format(
var=google_civic_election_id))
@login_required
def candidate_retrieve_photos_view(request, candidate_id):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_id = convert_to_int(candidate_id)
force_retrieve = request.GET.get('force_retrieve', 0)
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_id(candidate_id)
if not positive_value_exists(results['candidate_campaign_found']):
messages.add_message(request, messages.ERROR,
"Candidate '{candidate_id}' not found.".format(candidate_id=candidate_id))
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
we_vote_candidate = results['candidate_campaign']
display_messages = True
retrieve_candidate_results = retrieve_candidate_photos(we_vote_candidate, force_retrieve)
if retrieve_candidate_results['status'] and display_messages:
messages.add_message(request, messages.INFO, retrieve_candidate_results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
@login_required
def find_and_remove_duplicate_candidates_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_list = []
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
number_of_duplicate_candidates_processed = 0
number_of_duplicate_candidates_failed = 0
number_of_duplicates_could_not_process = 0
# TODO DALE Consider moving to candidate_campaign_list.find_and_remove_duplicate_candidates ?
# We only want to process if a google_civic_election_id comes in
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, "Google Civic Election ID required.")
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()))
try:
# We sort by ID so that the entry which was saved first becomes the "master"
candidate_list = CandidateCampaign.objects.order_by('id')
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
except CandidateCampaign.DoesNotExist:
pass
# Loop through all of the candidates in this election
candidate_campaign_list_manager = CandidateCampaignListManager()
for we_vote_candidate in candidate_list:
# Search for other candidates within this election that match name and election
try:
candidate_duplicates_query = CandidateCampaign.objects.order_by('candidate_name')
candidate_duplicates_query = candidate_duplicates_query.filter(
google_civic_election_id=google_civic_election_id)
candidate_duplicates_query = candidate_duplicates_query.filter(
candidate_name=we_vote_candidate.candidate_name)
candidate_duplicates_query = candidate_duplicates_query.exclude(id=we_vote_candidate.id)
number_of_duplicates = candidate_duplicates_query.count()
if number_of_duplicates > 1:
# Our system can't deal with this yet
number_of_duplicates_could_not_process += 1
pass
elif number_of_duplicates == 1:
candidate_duplicate_list = candidate_duplicates_query
# If we can automatically merge, we should do it
is_automatic_merge_ok_results = candidate_campaign_list_manager.is_automatic_merge_ok(
we_vote_candidate, candidate_duplicate_list[0])
if is_automatic_merge_ok_results['automatic_merge_ok']:
automatic_merge_results = candidate_campaign_list_manager.do_automatic_merge(
we_vote_candidate, candidate_duplicate_list[0])
if automatic_merge_results['success']:
number_of_duplicate_candidates_processed += 1
else:
number_of_duplicate_candidates_failed += 1
else:
# If we cannot automatically merge, direct to a page where we can look at the two side-by-side
message = "Google Civic Election ID: {election_id}, " \
"{num_of_duplicate_candidates_processed} duplicates processed, " \
"{number_of_duplicate_candidates_failed} duplicate merges failed, " \
"{number_of_duplicates_could_not_process} could not be processed because 3 exist " \
"".format(election_id=google_civic_election_id,
num_of_duplicate_candidates_processed=number_of_duplicate_candidates_processed,
number_of_duplicate_candidates_failed=number_of_duplicate_candidates_failed,
number_of_duplicates_could_not_process=number_of_duplicates_could_not_process)
messages.add_message(request, messages.INFO, message)
message = "{is_automatic_merge_ok_results_status} " \
"".format(is_automatic_merge_ok_results_status=is_automatic_merge_ok_results['status'])
messages.add_message(request, messages.ERROR, message)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'candidate_option1': we_vote_candidate,
'candidate_option2': candidate_duplicate_list[0],
}
return render(request, 'candidate/candidate_merge.html', template_values)
except CandidateCampaign.DoesNotExist:
pass
message = "Google Civic Election ID: {election_id}, " \
"{number_of_duplicate_candidates_processed} duplicates processed, " \
"{number_of_duplicate_candidates_failed} duplicate merges failed, " \
"{number_of_duplicates_could_not_process} could not be processed because 3 exist " \
"".format(election_id=google_civic_election_id,
number_of_duplicate_candidates_processed=number_of_duplicate_candidates_processed,
number_of_duplicate_candidates_failed=number_of_duplicate_candidates_failed,
number_of_duplicates_could_not_process=number_of_duplicates_could_not_process)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) + "?google_civic_election_id={var}".format(
var=google_civic_election_id))
@login_required
def remove_duplicate_candidate_view(request):
"""
We use this view to semi-automate the process of finding candidate duplicates. Exact
copies can be deleted automatically, and similar entries can be manually reviewed and deleted.
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
candidate_id = request.GET.get('candidate_id', 0)
remove_duplicate_process = request.GET.get('remove_duplicate_process', False)
missing_variables = False
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, "Google Civic Election ID required.")
missing_variables = True
if not positive_value_exists(candidate_id):
messages.add_message(request, messages.ERROR, "Candidate ID required.")
missing_variables = True
if missing_variables:
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) + "?google_civic_election_id={var}"
"".format(
var=google_civic_election_id))
candidate_campaign_list_manager = CandidateCampaignListManager()
results = candidate_campaign_list_manager.remove_duplicate_candidate(candidate_id, google_civic_election_id)
if results['success']:
if remove_duplicate_process:
# Continue this process
return HttpResponseRedirect(reverse('candidate:find_and_remove_duplicate_candidates', args=()) +
"?google_civic_election_id=" + google_civic_election_id)
else:
messages.add_message(request, messages.ERROR, results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
else:
messages.add_message(request, messages.ERROR, "Could not remove candidate {candidate_id} '{candidate_name}'."
"".format(candidate_id=candidate_id,
candidate_name=candidate_id)) # TODO Add candidate_name
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) + "?google_civic_election_id={var}"
"".format(
var=google_civic_election_id))
@login_required
def retrieve_candidate_photos_for_election_view(request, election_id):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_list = []
google_civic_election_id = convert_to_int(election_id)
# We only want to process if a google_civic_election_id comes in
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, "Google Civic Election ID required.")
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()))
try:
candidate_list = CandidateCampaign.objects.order_by('candidate_name')
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
except CandidateCampaign.DoesNotExist:
pass
display_messages_per_candidate = False
force_retrieve = False
num_candidates_reviewed = 0
num_with_vote_smart_ids = 0
num_candidates_just_retrieved = 0
num_with_vote_smart_photos = 0
num_candidate_photos_just_retrieved = 0
message = "About to loop through all of the candidates in this election and retrieve photos."
print_to_log(logger, exception_message_optional=message)
# Loop through all of the candidates in this election
for we_vote_candidate in candidate_list:
num_candidates_reviewed += 1
retrieve_candidate_results = retrieve_candidate_photos(we_vote_candidate, force_retrieve)
if retrieve_candidate_results['vote_smart_candidate_exists']:
num_with_vote_smart_ids += 1
if retrieve_candidate_results['vote_smart_candidate_just_retrieved']:
num_candidates_just_retrieved += 1
if retrieve_candidate_results['vote_smart_candidate_photo_exists']:
num_with_vote_smart_photos += 1
if retrieve_candidate_results['vote_smart_candidate_photo_just_retrieved']:
num_candidate_photos_just_retrieved += 1
if retrieve_candidate_results['status'] and display_messages_per_candidate:
messages.add_message(request, messages.INFO, retrieve_candidate_results['status'])
message = "Google Civic Election ID: {election_id}, " \
"{num_candidates_reviewed} candidates reviewed, " \
"{num_with_vote_smart_ids} with Vote Smart Ids, " \
"{num_candidates_just_retrieved} candidates just retrieved, " \
"{num_with_vote_smart_photos} with Vote Smart Photos, and " \
"{num_candidate_photos_just_retrieved} photos just retrieved.".\
format(election_id=google_civic_election_id,
num_candidates_reviewed=num_candidates_reviewed,
num_with_vote_smart_ids=num_with_vote_smart_ids,
num_candidates_just_retrieved=num_candidates_just_retrieved,
num_with_vote_smart_photos=num_with_vote_smart_photos,
num_candidate_photos_just_retrieved=num_candidate_photos_just_retrieved)
print_to_log(logger, exception_message_optional=message)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) + "?google_civic_election_id={var}".format(
var=google_civic_election_id))
@login_required
def candidate_summary_view(request, candidate_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
candidate_id = convert_to_int(candidate_id)
candidate_on_stage_found = False
candidate_on_stage = CandidateCampaign()
try:
candidate_on_stage = CandidateCampaign.objects.get(id=candidate_id)
candidate_on_stage_found = True
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except CandidateCampaign.DoesNotExist:
# This is fine, create new
pass
if candidate_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'candidate': candidate_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'candidate/candidate_summary.html', template_values)
@login_required
def candidate_delete_process_view(request):
"""
Delete this candidate
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_id = convert_to_int(request.GET.get('candidate_id', 0))
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
# Retrieve this candidate
candidate_on_stage_found = False
candidate_on_stage = CandidateCampaign()
if positive_value_exists(candidate_id):
try:
candidate_query = CandidateCampaign.objects.filter(id=candidate_id)
if len(candidate_query):
candidate_on_stage = candidate_query[0]
candidate_on_stage_found = True
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not find candidate -- exception.')
if not candidate_on_stage_found:
messages.add_message(request, messages.ERROR, 'Could not find candidate.')
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
# Are there any positions attached to this candidate that should be moved to another
# instance of this candidate?
position_list_manager = PositionListManager()
retrieve_public_positions = True # The alternate is positions for friends-only
position_list = position_list_manager.retrieve_all_positions_for_candidate_campaign(retrieve_public_positions, candidate_id)
if positive_value_exists(len(position_list)):
positions_found_for_this_candidate = True
else:
positions_found_for_this_candidate = False
try:
if not positions_found_for_this_candidate:
# Delete the candidate
candidate_on_stage.delete()
messages.add_message(request, messages.INFO, 'Candidate Campaign deleted.')
else:
messages.add_message(request, messages.ERROR, 'Could not delete -- '
'positions still attached to this candidate.')
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not delete candidate -- exception.')
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
|
{
"content_hash": "f5f31341ca3243573cecb98129136bc7",
"timestamp": "",
"source": "github",
"line_count": 1024,
"max_line_length": 128,
"avg_line_length": 52.3212890625,
"alnum_prop": 0.6365791291039066,
"repo_name": "wevote/WebAppPublic",
"id": "95d912b2b2d9d36f0c440507f8e2550d08e2d0f3",
"size": "53667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "candidate/views_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8022"
},
{
"name": "HTML",
"bytes": "131153"
},
{
"name": "JavaScript",
"bytes": "296860"
},
{
"name": "Python",
"bytes": "1700558"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
}
|
import json
import time
from tempest.common.rest_client import RestClient
from tempest import exceptions
class InterfacesV3ClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(InterfacesV3ClientJSON, self).__init__(config, username,
password, auth_url,
tenant_name)
self.service = self.config.compute.catalog_v3_type
def list_interfaces(self, server):
resp, body = self.get('servers/%s/os-attach-interfaces' % server)
body = json.loads(body)
return resp, body['interface_attachments']
def create_interface(self, server, port_id=None, network_id=None,
fixed_ip=None):
post_body = dict(interface_attachment=dict())
if port_id:
post_body['port_id'] = port_id
if network_id:
post_body['net_id'] = network_id
if fixed_ip:
post_body['fixed_ips'] = [dict(ip_address=fixed_ip)]
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/os-attach-interfaces' % server,
headers=self.headers,
body=post_body)
body = json.loads(body)
return resp, body['interface_attachment']
def show_interface(self, server, port_id):
resp, body =\
self.get('servers/%s/os-attach-interfaces/%s' % (server, port_id))
body = json.loads(body)
return resp, body['interface_attachment']
def delete_interface(self, server, port_id):
resp, body =\
self.delete('servers/%s/os-attach-interfaces/%s' % (server,
port_id))
return resp, body
def wait_for_interface_status(self, server, port_id, status):
"""Waits for a interface to reach a given status."""
resp, body = self.show_interface(server, port_id)
interface_status = body['port_state']
start = int(time.time())
while(interface_status != status):
time.sleep(self.build_interval)
resp, body = self.show_interface(server, port_id)
interface_status = body['port_state']
timed_out = int(time.time()) - start >= self.build_timeout
if interface_status != status and timed_out:
message = ('Interface %s failed to reach %s status within '
'the required time (%s s).' %
(port_id, status, self.build_timeout))
raise exceptions.TimeoutException(message)
return resp, body
|
{
"content_hash": "c30a5f2445680959194506e018883480",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 40.205882352941174,
"alnum_prop": 0.5537673738112655,
"repo_name": "eltonkevani/tempest_el_env",
"id": "7fb0fa97ba40af0339af596ca0b12b79997b73d0",
"size": "3359",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/services/compute/v3/json/interfaces_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1871339"
},
{
"name": "Shell",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
"""
<Author>
Justin Cappos
<Start Date>
March 14th, 2013
<Description>
A basic library that demonstrates PolyHash when applied to passwords (see
https://polypasswordhasher.poly.edu/ for details). This includes thresholdless
password support via AES 256.
<Usage>
import polypasswordhasher
# require knowledge of 10 shares to decode others. Create a blank, new
# password file...
pph = polypasswordhasher.PolyPasswordHasher(threshold = 10, passwordfile = None)
# create three admins so that any two have the appropriate threshold
pph.create_account('admin','correct horse',5)
pph.create_account('root','battery staple',5)
pph.create_account('superuser','purple monkey dishwasher',5)
# make some normal user accounts...
pph.create_account('alice','kitten',1)
pph.create_account('bob','puppy',1)
pph.create_account('charlie','velociraptor',1)
pph.create_account('dennis','menace',0)
pph.create_account('eve','iamevil',0)
# try some logins and make sure we see what we expect...
assert(pph.is_valid_login('alice','kitten') == True)
assert(pph.is_valid_login('admin','correct horse') == True)
assert(pph.is_valid_login('alice','nyancat!') == False)
assert(pph.is_valid_login('dennis','menace') == True)
assert(pph.is_valid_login('dennis','password') == False)
# persist the password file to disk
pph.write_password_data('securepasswords')
# If I remove this from memory, I can't use the data on disk to check
# passwords without a threshold
pph = None
# let's load it back in
pph = polypasswordhasher.PolyPasswordHasher(threshold = 10,passwordfile = 'securepasswords')
# The password information is essentially useless alone. You cannot know
# if a password is valid without threshold or more other passwords!!!
try:
pph.is_valid_login('alice','kitten')
except ValueError:
pass
else:
print "Can't get here! It's still locked!!!"
# with a threshold (or more) of correct passwords, it decodes and is usable.
pph.unlock_password_data([('admin','correct horse'), ('root','battery staple'), ('bob','puppy'),('dennis','menace'])
# now, I can do the usual operations with it...
assert(pph.is_valid_login('alice','kitten') == True)
pph.create_account('moe','tadpole',1)
pph.create_account('larry','fish',0)
...
"""
__author__ = 'Justin Cappos (jcappos@poly.edu)'
__version__ = '0.1'
__license__ = 'MIT'
from .pph import PolyPasswordHasher
|
{
"content_hash": "c0d5dd5747e883272d2732864e8c50d9",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 118,
"avg_line_length": 32.68,
"alnum_prop": 0.6997144022847818,
"repo_name": "PolyPasswordHasher/PolyPasswordHasher-Python",
"id": "cdd60920e4a19987da1280fcf716b691c5ce8a0e",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polypasswordhasher/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12071"
},
{
"name": "Python",
"bytes": "37635"
}
],
"symlink_target": ""
}
|
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import time
import IPCThread
IPCThread = IPCThread.Class
# put your imports here
class Class (IPCThread):
def __init__(self, name, API):
IPCThread.__init__(self, name, API)
# put your self.registerOutput here
def run(self):
# put your init and global variab.les here - global variables need 'self.' in front of it
while 1:
# put your logic here
# you can use: output, getInputs, message, flags
# if you want to limit framerate, put it at the end
time.sleep(0.2)
|
{
"content_hash": "011c45d900470bb958c53a7944ab9af0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 97,
"avg_line_length": 25.25925925925926,
"alnum_prop": 0.624633431085044,
"repo_name": "kamil-mech/tvcs",
"id": "18bf0d0e2ed841737308e9d23e89e6c2a435dddc",
"size": "732",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/legacy/ClassTemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11554"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import re
from unittest import TestCase
from uuid_upload_path.uuid import uuid
from uuid_upload_path.storage import upload_to_factory, upload_to
class TestCaseBase(TestCase):
# HACK: Backport for Python 2.6.
def assertRegexpMatches(self, value, regexp):
self.assertTrue(re.match(regexp, value))
# HACK: Backport for Python 2.6.
def assertNotIn(self, value, container):
self.assertFalse(value in container)
class UuidTest(TestCaseBase):
# It's hard to test random data, but more iterations makes the tests
# more robust.
TEST_ITERATIONS = 1000
def testUuidFormat(self):
for _ in range(self.TEST_ITERATIONS):
self.assertRegexpMatches(uuid(), r"^[a-zA-Z0-9\-_]{22}$")
def testUuidUnique(self):
generated_uuids = set()
for _ in range(self.TEST_ITERATIONS):
new_uuid = uuid()
self.assertNotIn(new_uuid, generated_uuids)
generated_uuids.add(new_uuid)
class TestModel(object):
class _meta:
app_label = "test"
class StorageTest(TestCaseBase):
def testUploadToFactory(self):
self.assertRegexpMatches(
upload_to_factory("test")(object(), "test.txt"),
r"^test/[a-zA-Z0-9\-_]{22}\.txt$"
)
def testUploadTo(self):
self.assertRegexpMatches(
upload_to(TestModel(), "test.txt"),
r"^test/testmodel/[a-zA-Z0-9\-_]{22}\.txt$"
)
|
{
"content_hash": "fad01da26efa0dd6ba54741431ac68b4",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 72,
"avg_line_length": 26,
"alnum_prop": 0.6319628647214854,
"repo_name": "etianen/django-uuid-upload-path",
"id": "6f905f41f803192a94afe623e7280b616acaee5a",
"size": "1508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uuid_upload_path/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3560"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name = 'meerkat',
packages = ['meerkat'],
version = '0.3.7',
description = 'A program for reciprocal space reconstruction',
author = 'Arkadiy Simonov, Dmitry Logvinovich',
author_email = 'aglietto@gmail.com',
url = 'https://github.com/aglie/meerkat.git',
# download_url =
keywords = ['crystallography', 'single crystal', 'reciprocal space reconstruction'],
classifiers = [],
install_requires = ['fabio','h5py','numpy'],
)
|
{
"content_hash": "a71f55157038b0eec70e990a3b986de2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.6854166666666667,
"repo_name": "aglie/meerkat",
"id": "1c932b90fc3a2ec9199b3c3b402bb8598951f076",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31591"
}
],
"symlink_target": ""
}
|
from rbuild import errors
from rbuild import pluginapi
from rbuild.pluginapi import command
from rbuild_plugins.build import packages
from rbuild_plugins.build import refresh
class BuildPackagesCommand(command.BaseCommand):
"""
Builds or rebuilds specified packages, or all checked-out packages
if none are specified.
Additionally, rebuilds any other packages in the product group that
depend on the built packages.
"""
help = 'Build edited packages for this stage'
paramHelp = '[package]*'
docs = {'refresh' : 'refreshes the source of specified packages, or all '
'checked-out packages if none are specified',
'message' : 'message describing why the commit was performed',
'no-watch' : 'do not watch the job after starting the build',
'no-commit' : 'do not automatically commit successful builds',
'no-recurse' : 'default behavior left for backwards compatibility',
'recurse' : 'build every package listed on the '
'command line plus all of its dependencies',
}
def addLocalParameters(self, argDef):
argDef['no-watch'] = command.NO_PARAM
argDef['no-commit'] = command.NO_PARAM
argDef['no-recurse'] = command.NO_PARAM
argDef['recurse'] = command.NO_PARAM
argDef['refresh'] = command.NO_PARAM
argDef['message'] = '-m', command.ONE_PARAM
#pylint: disable-msg=R0201,R0903
# could be a function, and too few public methods
def runCommand(self, handle, argSet, args):
watch = not argSet.pop('no-watch', False)
commit = not argSet.pop('no-commit', False)
recurse = argSet.pop('recurse', False)
argSet.pop('no-recurse', False) # ignored, now the default
refreshArg = argSet.pop('refresh', False)
message = argSet.pop('message', None)
success = True
_, packageList, = self.requireParameters(args, allowExtra=True)
if not packageList:
if refreshArg:
handle.BuildPackages.refreshAllPackages()
jobId = handle.BuildPackages.buildAllPackages()
else:
if refreshArg:
handle.BuildPackages.refreshPackages(packageList)
jobId = handle.BuildPackages.buildPackages(packageList, recurse)
if watch and commit:
success = handle.Build.watchAndCommitJob(jobId, message)
elif watch:
success = handle.Build.watchJob(jobId)
if not success:
raise errors.PluginError('Package build failed')
class BuildPackages(pluginapi.Plugin):
def initialize(self):
self.handle.Commands.getCommandClass('build').registerSubCommand(
'packages', BuildPackagesCommand,
aliases=['package', ])
def buildAllPackages(self):
self.handle.Build.warnIfOldProductDefinition('building all packages')
job = self.createJobForAllPackages()
jobId = self.handle.facade.rmake.buildJob(job)
self.handle.productStore.setPackageJobId(jobId)
return jobId
def buildPackages(self, packageList, recurse=True):
self.handle.Build.warnIfOldProductDefinition('building packages')
job = self.createJobForPackages(packageList, recurse)
jobId = self.handle.facade.rmake.buildJob(job)
self.handle.productStore.setPackageJobId(jobId)
return jobId
def createJobForAllPackages(self):
return packages.createRmakeJobForAllPackages(self.handle)
def createJobForPackages(self, packageList, recurse=True):
return packages.createRmakeJobForPackages(self.handle, packageList,
recurse)
def refreshPackages(self, packageList=None):
return refresh.refreshPackages(self.handle, packageList)
def refreshAllPackages(self):
return refresh.refreshAllPackages(self.handle)
|
{
"content_hash": "1c80119e7c99f4686ae8e99f3271b26a",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 40.244897959183675,
"alnum_prop": 0.6610040567951319,
"repo_name": "fedora-conary/rbuild",
"id": "8283f2547a85abe57a2d6bcb653cad21a4b8b010",
"size": "4531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/buildpackages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "686899"
},
{
"name": "Shell",
"bytes": "3446"
}
],
"symlink_target": ""
}
|
import re
from mezzanine import template
from django.core.urlresolvers import RegexURLResolver
from research import urls
register = template.Library()
@register.simple_tag
def active(request, pattern=''):
path = request.path
if pattern == '/' and pattern != path:
return ''
if re.search(pattern, path):
return 'active'
else:
return ''
|
{
"content_hash": "5566e9968f348c8ecaa15b482e53431f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 53,
"avg_line_length": 17.40909090909091,
"alnum_prop": 0.6657963446475196,
"repo_name": "inkasjasonk/rs",
"id": "19ae36f9a8f0aa27c179eb4aedaea88cd2683bb9",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/base/templatetags/base_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "190435"
},
{
"name": "Python",
"bytes": "43798"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "2438"
}
],
"symlink_target": ""
}
|
"""
This script generates a convolutional neural network based classifier to detect X-ray astronomical cavities.
The codes are written under the structure designed by Theano and Lasagne.
References
==========
[1] Lasagne tutorial
http://lasagne.readthedocs.io/en/latest/user/tutorial.html
[2] Theano tutorial
http://www.deeplearning.net/software/theano/
Methods
=======
load_data: load the prepared dataset
cnn_build: build the cnn network
cnn_train: train the cnn network
cnn_test: test and estimate by the trained network
iterate_minibathces: a batch helper method
"""
import time
import pickle
import numpy as np
import scipy.io as sio
import theano
import theano.tensor as T
import lasagne
def load_data(inpath, ratio_train = 0.8, ratio_val = 0.2):
"""
Load the prepared dataset
Inputs
======
inpath: str
Path of the mat dataset
ratio_train: float
Ratio of training samples in the sample set, default as 0.8
ratio_val: float
Ratio of validation samples in the training set, default as 0.2
Outputs
=======
x_train: np.ndarray
The training data
y_train: np.ndarray
Labels for the training data
x_val: np.ndarray
The validation data
y_val: np.ndarray
Labels for the validation data
x_test: np.ndarray
The test data
y_test: np.ndarray
Labels for the test data
boxsize: integer
boxsize of the subimage
"""
# load the dataset
try:
data = sio.loadmat(inpath)
except IOError:
print("Path does not exist.")
return
data_bkg = data['data_bkg']
data_ext = data['data_ext']
data_cav = data['data_cav']
label_bkg = data['label_bkg']
label_ext = data['label_ext']
label_cav = data['label_cav']
# manually repeat the minor categories
data_ext = np.row_stack((data_ext, data_ext, data_ext))
label_ext = np.row_stack((label_ext, label_ext, label_ext))
data_cav = np.row_stack((data_cav, data_cav, data_cav, data_cav))
label_cav = np.row_stack((label_cav, label_cav, label_cav, label_cav))
# boxsize
box = data_bkg.shape[1]
boxsize = int(np.sqrt(box))
# calc train, val, test amounts, and shuffle
idx_bkg = np.random.permutation(len(label_bkg))
idx_ext = np.random.permutation(len(label_ext))
idx_cav = np.random.permutation(len(label_cav))
numtrain_bkg = int(np.floor(len(label_bkg)*ratio_train))
numtrain_ext = int(np.floor(len(label_ext)*ratio_train))
numtrain_cav = int(np.floor(len(label_cav)*ratio_train))
numval_bkg = int(np.floor(numtrain_bkg * ratio_val))
numval_ext = int(np.floor(numtrain_ext * ratio_val))
numval_cav = int(np.floor(numtrain_cav * ratio_val))
# form dataset
x_train_bkg = data_bkg[idx_bkg[0:numtrain_bkg],:]
y_train_bkg = label_bkg[idx_bkg[0:numtrain_bkg],:]
x_test_bkg = data_bkg[idx_bkg[numtrain_bkg:],:]
y_test_bkg = label_bkg[idx_bkg[numtrain_bkg:],:]
x_train_ext = data_ext[idx_ext[0:numtrain_ext],:]
y_train_ext = label_ext[idx_ext[0:numtrain_ext],:]
x_test_ext = data_ext[idx_ext[numtrain_ext:],:]
y_test_ext = label_ext[idx_ext[numtrain_ext:],:]
x_train_cav = data_cav[idx_cav[0:numtrain_cav],:]
y_train_cav = label_cav[idx_cav[0:numtrain_cav],:]
x_test_cav = data_cav[idx_cav[numtrain_cav:],:]
y_test_cav = label_cav[idx_cav[numtrain_cav:],:]
# val
x_val = np.row_stack((x_train_bkg[0:numval_bkg,:],
x_train_ext[0:numval_ext,:],
x_train_cav[0:numval_cav,:]))
y_val = np.row_stack((y_train_bkg[0:numval_bkg],
y_train_ext[0:numval_ext],
y_train_cav[0:numval_cav]))
x_val_temp = x_val.reshape(-1,1,boxsize,boxsize)
x_val = x_val_temp.astype('float32')
y_val = y_val[:,0].astype('int32')
# train
x_train = np.row_stack((x_train_bkg[numval_bkg:,:],
x_train_ext[numval_ext:,:],
x_train_cav[numval_cav:,:]))
y_train = np.row_stack((y_train_bkg[numval_bkg:],
y_train_ext[numval_ext:],
y_train_cav[numval_cav:]))
x_train_temp = x_train.reshape(-1,1,boxsize,boxsize)
x_train = x_train_temp.astype('float32')
y_train = y_train[:,0].astype('int32')
# test
x_test = np.row_stack((x_test_bkg,x_test_ext,x_test_cav))
y_test = np.row_stack((y_test_bkg,y_test_ext,y_test_cav))
x_test_temp = x_test.reshape(-1,1,boxsize,boxsize)
x_test = x_test_temp.astype('float32')
y_test = y_test[:,0].astype('int32')
return x_train,y_train,x_val,y_val,x_test,y_test,boxsize
def cnn_build(boxsize=10, num_class=3, kernel_size=[2,3,4], kernel_num=[12,12,12],
pool_flag=[False,False,False], input_var=None):
"""
Build the cnn network
Inputs
======
boxsize: integer
The size of the image boxsize
num_class: integer
Number of classes
kernel_size: list
Kernel sizes in the convolutional layers
kernel_num: list
Number of kernels (feature maps) in the ConvLayers
pool_flag: list
Flags of whether max pooling after the ConvLayers
input_var: np.ndarray
The input dataset or batched data, default as None
Output
======
network: Lasagne.layers
The pre-constructed network
"""
# Input layer
network = lasagne.layers.InputLayer(shape=(None,1,boxsize,boxsize),
input_var = input_var)
# ConvLayers
s = boxsize # size of the feature map at the last Conv layer
for k in range(len(kernel_size)):
# ConvLayer
network = lasagne.layers.Conv2DLayer(
network, num_filters=kernel_num[k],
filter_size=(kernel_size[k], kernel_size[k]),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Max pooling
if pool_flag[k]:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2))
s = (s - kernel_size[k] + 1) // 2
else:
s = s = kernel_size[k] + 1
# Full connected layer
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=0.2),
num_units = s**2 * kernel_num[-1],
nonlinearity = lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Output Layer
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=0.2),
num_units = num_class,
nonlinearity = lasagne.nonlinearities.softmax)
return network
def iterate_minibatches(inputs, targets, batchsize=100, shuffle=True):
"""
Design a iterator to generate batches for cnn training.
Inputs
======
inputs: np.ndarray
The dataset
targets: np.ndarray
Labels of the samples with respect to inputs
batchsize: integer
Size of the batch
shuffle: bool
Whether shuffle the indices.
output
======
yield an iterator
"""
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def cnn_train(inputs_train, targets_train, inputs_val, targets_val,
network=None, batchsize=100, num_epochs=100):
"""
Train the cnn network
Inputs
======
inputs: np.ndarray
The training data
targets: np.ndarray
Labels of the samples
network: lasagne.layers
The CNN network
batchsize: integer
Size of the single batch
num_epochs: integer
Number of epochs for training
Output
======
network: lasagne.layers
The trained network
"""
# Init
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = cnn_build(boxsize=20, num_class=3,
kernel_size=[5,5,5], kernel_num=[15,15,15],
pool_flag=[False,False,True], input_var=input_var)
# Create the loss expression for training
prediction = lasagne.layers.get_output(network)
loss =lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# Create the update expressions for training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create the loss expression for validation
val_prediction = lasagne.layers.get_output(network, deterministic=True)
val_loss = lasagne.objectives.categorical_crossentropy(val_prediction,
target_var)
val_loss = val_loss.mean()
val_acc = T.mean(T.eq(T.argmax(val_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create the loss expression for estimation
est_prediction = lasagne.layers.get_output(network, deterministic=True)
label_est = T.argmax(est_prediction, axis=1)
# Compile the train function
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile the validation function
val_fn = theano.function([input_var, target_var], [val_loss, val_acc])
# Compile the estimation function
est_fn = theano.function([input_var], [label_est])
# Training
print("Starting training...")
for epoch in range(num_epochs):
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(inputs_train, targets_train, batchsize, shuffle=True):
input_batch, target_batch = batch
train_err += train_fn(input_batch, target_batch)
train_batches += 1
# validation
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(inputs_val, targets_val, batchsize, shuffle=True):
input_batch, target_batch = batch
err, acc =val_fn(input_batch, target_batch)
val_err += err
val_acc += acc
val_batches += 1
# print result
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(val_acc / val_batches * 100))
return network, val_fn, est_fn
def cnn_test(inputs, targets, network, test_fn, batchsize=100):
"""
Test the trained cnn network
Inputs
======
inputs: np.ndarray
The test dataset
targets: np.ndarray
Labels of the samples
network: lasagne.layers
The trained cnn network
"""
# test
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(inputs, targets, batchsize, shuffle=True):
input_batch, target_batch = batch
err, acc =test_fn(input_batch, target_batch)
test_err += err
test_acc += acc
test_batches += 1
# print result
print("Testing the network...")
print("test loss:\t\t{:.6f}".format(test_err / test_batches))
print("test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
def cnn_estimate(inputs, network, est_fn):
"""
Estimated labelf by the trained cnn network
Inputs
======
inputs: np.ndarray
The test dataset
network: lasagne.layers
The trained cnn network
Output
======
label_est: np.ndarray
The estimated labels
"""
# estimate
print("Estimating ...")
label_est = est_fn(inputs)
return label_est[0]
def cnn_save(savepath,savedict):
"""
Save the trained network, and the corresponding theano functions
Reference
=========
[1] Save python data by pickle
http://www.cnblogs.com/pzxbc/archive/2012/03/18/2404715.html
Inputs
======
savepath: str
Path to save the result
savedict: dict
The data to be saved.
For instance: {'network':newtork,'est_fn':est_fun}
"""
# Init
fp = open(savepath,'wb')
# save
pickle.dump(savedict, fp)
# close
fp.close()
def cnn_load(modelpath):
"""
Load the saved model
Input
=====
modelpath: str
Path to load the saved model
Output
======
model: dict
The dictonary that saved the network and functions.
"""
# Init
fp = open(modelpath,'rb')
# load
model = pickle.load(fp)
# close
fp.close()
return model
def get_map(network,savepath=None):
"""
Get the feature maps from the trained network
Inputs
======
network: lasagne.layers
The trained network
savepath: str
Path to save the maps, default as None
Output
======
maps: dict
The dict that saves the maps
"""
# Init
params = lasagne.layers.get_all_params(network,
regularizable=True,
unwrap_shared=False)
# delete useless params
# del(params[-4:])
# get weights and biases
maps = {}
# numlayer = len(params)//2
numlayer = len(params)
for i in range(numlayer-2):
w = params[i]
weight = w.get_value()
# weight = weight.sum(axis=1)
# b = params[i+1]
key_weight = ('w%d' % (i+1))
# key_bias = ('b%d' % (i+1))
maps[key_weight] = weight
# maps[key_bias] = b.get_value()
if not savepath is None:
print('Saving the parameters...')
sio.savemat(savepath, maps)
return maps
|
{
"content_hash": "e9b7cf8c34c6e83830b850c4d713e744",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 108,
"avg_line_length": 29.976645435244162,
"alnum_prop": 0.5996175366527374,
"repo_name": "myinxd/cavdet",
"id": "108730f00e9fa227c9469c0cd3d3d2bbcf3e725c",
"size": "14171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cnn/cnn_build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "820698"
},
{
"name": "Matlab",
"bytes": "23470"
},
{
"name": "Python",
"bytes": "259657"
}
],
"symlink_target": ""
}
|
"""Adding the ability to specify certificate replacements
Revision ID: 33de094da890
Revises: ed422fc58ba
Create Date: 2015-11-30 15:40:19.827272
"""
# revision identifiers, used by Alembic.
revision = '33de094da890'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('certificate_replacement_associations',
sa.Column('replaced_certificate_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['replaced_certificate_id'], ['certificates.id'], ondelete='cascade')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('certificate_replacement_associations')
### end Alembic commands ###
|
{
"content_hash": "d10468af27206556a8adf1933ae15c52",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 97,
"avg_line_length": 32.41935483870968,
"alnum_prop": 0.7213930348258707,
"repo_name": "nevins-b/lemur",
"id": "76624e966725318d75becb00909ab58dfd1faadf",
"size": "1005",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lemur/migrations/versions/33de094da890_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2688"
},
{
"name": "HTML",
"bytes": "181370"
},
{
"name": "JavaScript",
"bytes": "13785"
},
{
"name": "Makefile",
"bytes": "2581"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "610910"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.