hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfce53c8a75ecf06215e78d7bde392f8f7ffcbb
| 37,497
|
py
|
Python
|
src/transformers/models/ctrl/modeling_tf_ctrl.py
|
Ravoxsg/transformers
|
3212a1d4a6fbded40daad7153f222c91acabe82d
|
[
"Apache-2.0"
] | 31
|
2022-02-02T13:13:41.000Z
|
2022-03-29T08:37:20.000Z
|
src/transformers/models/ctrl/modeling_tf_ctrl.py
|
Ravoxsg/transformers
|
3212a1d4a6fbded40daad7153f222c91acabe82d
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/ctrl/modeling_tf_ctrl.py
|
Ravoxsg/transformers
|
3212a1d4a6fbded40daad7153f222c91acabe82d
|
[
"Apache-2.0"
] | 2
|
2022-02-07T10:53:33.000Z
|
2022-02-17T10:03:01.000Z
|
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 CTRL model."""
import warnings
import numpy as np
import tensorflow as tf
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
TFSequenceClassificationLoss,
TFSharedEmbeddings,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_ctrl import CTRLConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "ctrl"
_CONFIG_FOR_DOC = "CTRLConfig"
_TOKENIZER_FOR_DOC = "CTRLTokenizer"
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = [
"ctrl"
# See all CTRL models at https://huggingface.co/models?filter=ctrl
]
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size)
return pos * angle_rates
def positional_encoding(position, d_model_size):
# create the sinusoidal pattern for the positional encoding
angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
sines = np.sin(angle_rads[:, 0::2])
cosines = np.cos(angle_rads[:, 1::2])
pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1))
return pos_encoding
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attention_logits += tf.cast(mask * -1e4, dtype=scaled_attention_logits.dtype)
if attention_mask is not None:
# Apply the attention mask
attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype)
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = tf.matmul(attention_weights, v)
return output, attention_weights
class TFMultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.d_model_size = d_model_size
self.output_attentions = output_attentions
self.depth = int(d_model_size / self.num_heads)
self.Wq = tf.keras.layers.Dense(d_model_size, name="Wq")
self.Wk = tf.keras.layers.Dense(d_model_size, name="Wk")
self.Wv = tf.keras.layers.Dense(d_model_size, name="Wv")
self.dense = tf.keras.layers.Dense(d_model_size, name="dense")
def split_into_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
batch_size = shape_list(q)[0]
q = self.Wq(q)
k = self.Wk(k)
v = self.Wv(v)
q = self.split_into_heads(q, batch_size)
k = self.split_into_heads(k, batch_size)
v = self.split_into_heads(v, batch_size)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=0)
k = tf.concat((past_key, k), axis=-2)
v = tf.concat((past_value, v), axis=-2)
if use_cache:
present = tf.stack((k, v), axis=0)
else:
present = (None,)
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
attn = output[1]
original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
output = self.dense(original_size_attention)
outputs = (output, present)
if output_attentions:
outputs = outputs + (attn,)
return outputs
class TFPointWiseFeedForwardLayer(tf.keras.layers.Layer):
def __init__(self, d_model_size, dff, **kwargs):
super().__init__(**kwargs)
self.dense_0 = tf.keras.layers.Dense(dff, activation="relu", name="0")
self.dense_2 = tf.keras.layers.Dense(d_model_size, name="2")
def call(self, inputs, trainable=False):
dense_0_output = self.dense_0(inputs)
dense_2_output = self.dense_2(dense_0_output)
return dense_2_output
class TFEncoderLayer(tf.keras.layers.Layer):
def __init__(
self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs
):
super().__init__(**kwargs)
self.output_attentions = output_attentions
self.multi_head_attention = TFMultiHeadAttention(
d_model_size, num_heads, output_attentions=self.output_attentions, name="multi_head_attention"
)
self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name="ffn")
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1")
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2")
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(
normed,
normed,
normed,
mask,
layer_past,
attention_mask,
head_mask,
use_cache,
output_attentions,
training=training,
)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output, training=training)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
@keras_serializable
class TFCTRLMainLayer(tf.keras.layers.Layer):
config_class = CTRLConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.use_cache = config.use_cache
self.return_dict = config.use_return_dict
self.d_model_size = config.n_embd
self.num_layers = config.n_layer
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
self.w = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="w"
)
self.dropout = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [
TFEncoderLayer(
config.n_embd,
config.n_head,
config.dff,
config.resid_pdrop,
config.layer_norm_epsilon,
self.output_attentions,
name=f"h_._{i}",
)
for i in range(config.n_layer)
]
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm")
def get_input_embeddings(self):
return self.w
def set_input_embeddings(self, value):
self.w.weight = value
self.w.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
# If using past key value states, only the last tokens
# should be given as an input
if inputs["past"] is not None:
if inputs["input_ids"] is not None:
inputs["input_ids"] = inputs["input_ids"][:, -1:]
if inputs["inputs_embeds"] is not None:
inputs["inputs_embeds"] = inputs["inputs_embeds"][:, -1:]
if inputs["token_type_ids"] is not None:
inputs["token_type_ids"] = inputs["token_type_ids"][:, -1:]
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
inputs["input_ids"] = tf.reshape(inputs["input_ids"], [-1, input_shape[-1]])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["past"] is None:
past_length = 0
inputs["past"] = [None] * len(self.h)
else:
past_length = shape_list(inputs["past"][0][0])[-2]
if inputs["position_ids"] is None:
inputs["position_ids"] = tf.expand_dims(
tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0
)
inputs["position_ids"] = tf.tile(inputs["position_ids"], [input_shape[0], 1])
# Attention mask.
if inputs["attention_mask"] is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
inputs["attention_mask"] = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
one_cst = tf.constant(1.0)
ten_thousand_cst = tf.constant(-10000.0)
inputs["attention_mask"] = tf.cast(inputs["attention_mask"], dtype=one_cst.dtype)
inputs["attention_mask"] = tf.multiply(tf.subtract(one_cst, inputs["attention_mask"]), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.num_layers
if inputs["token_type_ids"] is not None:
inputs["token_type_ids"] = tf.reshape(
inputs["token_type_ids"], [-1, shape_list(inputs["token_type_ids"])[-1]]
)
token_type_embeds = self.w(inputs["token_type_ids"], mode="embedding")
token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype))
else:
token_type_embeds = tf.constant(0.0)
inputs["position_ids"] = tf.reshape(inputs["position_ids"], [-1, shape_list(inputs["position_ids"])[-1]])
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.w(inputs["input_ids"], mode="embedding")
seq_len = input_shape[-1]
mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
inputs["inputs_embeds"] *= tf.math.sqrt(tf.cast(self.d_model_size, inputs["inputs_embeds"].dtype))
pos_embeds = tf.gather(self.pos_encoding, inputs["position_ids"])
pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype)
hidden_states = inputs["inputs_embeds"] + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states, training=inputs["training"])
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = () if inputs["use_cache"] else None
all_hidden_states = () if inputs["output_hidden_states"] else None
all_attentions = () if inputs["output_attentions"] else None
for i, (h, layer_past) in enumerate(zip(self.h, inputs["past"])):
if inputs["output_hidden_states"]:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = h(
hidden_states,
mask,
layer_past,
inputs["attention_mask"],
inputs["head_mask"][i],
inputs["use_cache"],
inputs["output_attentions"],
training=inputs["training"],
)
hidden_states, present = outputs[:2]
if inputs["use_cache"]:
presents = presents + (present,)
if inputs["output_attentions"]:
all_attentions = all_attentions + (outputs[2],)
hidden_states = self.layernorm(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
if inputs["output_hidden_states"]:
all_hidden_states = all_hidden_states + (hidden_states,)
if inputs["output_attentions"]:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
if not inputs["return_dict"]:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class TFCTRLPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CTRLConfig
base_model_prefix = "transformer"
CTRL_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Parameters:
config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CTRL_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
input past key value states).
Indices of input sequence tokens in the vocabulary.
If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
Indices can be obtained using [`CTRLTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
past (`List[tf.Tensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
`past` output below). Can be used to speed up sequential decoding. The token ids which have their past
given to this model should not be passed as input ids as they have already been computed.
attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
CTRL_START_DOCSTRING,
)
class TFCTRLModel(TFCTRLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.transformer(
input_ids=inputs["input_ids"],
past=inputs["past"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutputWithPast(
last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns
)
class TFCTRLLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""
The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
CTRL_START_DOCSTRING,
)
class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
self.lm_head = TFCTRLLMHead(config, self.transformer.w, name="lm_head")
def get_lm_head(self):
return self.lm_head
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.lm_head.name
def prepare_inputs_for_generation(self, inputs, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
inputs = tf.expand_dims(inputs[:, -1], -1)
return {"input_ids": inputs, "past": past, "use_cache": kwargs["use_cache"]}
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFCausalLMOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
config.vocab_size - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
transformer_outputs = self.transformer(
input_ids=inputs["input_ids"],
past=inputs["past"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if inputs["labels"] is not None:
# shift labels to the left and cut last logit token
shifted_logits = logits[:, :-1]
labels = inputs["labels"][:, 1:]
loss = self.hf_compute_loss(labels, shifted_logits)
if not inputs["return_dict"]:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def serving_output(self, output):
pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutputWithPast(logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
The CTRL Model transformer with a sequence classification head on top (linear layer).
[`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-1, GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
""",
CTRL_START_DOCSTRING,
)
class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.classifier = tf.keras.layers.Dense(
config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
use_bias=False,
)
self.transformer = TFCTRLMainLayer(config, name="transformer")
def get_output_embeddings(self):
return self.transformer.w
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
config.vocab_size - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
transformer_outputs = self.transformer(
input_ids=inputs["input_ids"],
past=inputs["past"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
logits = self.classifier(hidden_states)
in_logits = None
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if inputs["input_ids"] is not None:
sequence_lengths = (
tf.reduce_sum(
tf.cast(
tf.math.not_equal(inputs["input_ids"], self.config.pad_token_id),
dtype=inputs["input_ids"].dtype,
),
-1,
keepdims=False,
)
- 1
)
in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
loss = None
if inputs["labels"] is not None:
if input_ids is not None:
batch_size, sequence_length = shape_list(inputs["input_ids"])[:2]
else:
batch_size, sequence_length = shape_list(inputs["inputs_embeds"])[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if not tf.is_tensor(sequence_lengths):
in_logits = logits[0:batch_size, sequence_lengths]
loss = self.hf_compute_loss(
tf.reshape(inputs["labels"], [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels])
)
pooled_logits = in_logits if in_logits is not None else logits
if not inputs["return_dict"]:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| 40.890949
| 119
| 0.647065
|
acfce5567f616d1d98c372b468f2f012b692d23c
| 296
|
py
|
Python
|
src/plugins/oregistryrm/plugin.py
|
radomirklacza/C-BAS
|
5005cf43f57302dc0f58b9d1b9cf7e4e3ab70e32
|
[
"BSD-3-Clause"
] | null | null | null |
src/plugins/oregistryrm/plugin.py
|
radomirklacza/C-BAS
|
5005cf43f57302dc0f58b9d1b9cf7e4e3ab70e32
|
[
"BSD-3-Clause"
] | null | null | null |
src/plugins/oregistryrm/plugin.py
|
radomirklacza/C-BAS
|
5005cf43f57302dc0f58b9d1b9cf7e4e3ab70e32
|
[
"BSD-3-Clause"
] | 2
|
2017-08-07T15:24:05.000Z
|
2018-10-11T10:53:23.000Z
|
import eisoil.core.pluginmanager as pm
import oregistryexceptions
from oregistryresourcemanager import ORegistryResourceManager
def setup():
reg_rm = ORegistryResourceManager()
pm.registerService('oregistryrm', reg_rm)
pm.registerService('oregistryexceptions', oregistryexceptions)
| 29.6
| 66
| 0.820946
|
acfce62970ced19091b5d743958afbbd282f546d
| 3,755
|
py
|
Python
|
ml_code/AudioSync/hough.py
|
ankitshah009/Daisy_Shooter_Localization
|
55ebbc084e40ddc2072f24802a4b1e32cad84d9e
|
[
"Apache-2.0"
] | 18
|
2019-06-03T14:19:02.000Z
|
2020-10-19T18:20:23.000Z
|
ml_code/AudioSync/hough.py
|
ankitshah009/Daisy_Shooter_Localization
|
55ebbc084e40ddc2072f24802a4b1e32cad84d9e
|
[
"Apache-2.0"
] | 1
|
2019-06-22T19:49:43.000Z
|
2019-07-06T21:57:03.000Z
|
ml_code/AudioSync/hough.py
|
ankitshah009/Daisy_Shooter_Localization
|
55ebbc084e40ddc2072f24802a4b1e32cad84d9e
|
[
"Apache-2.0"
] | 7
|
2019-06-22T19:45:18.000Z
|
2020-10-01T22:32:25.000Z
|
# coding=utf-8
# get hough-like transform from matching matrix
import sys,os
import numpy as np
from ChunWai import *
import warnings
warnings.filterwarnings("error")
class hough:
def __init__(self,m,addf,keepfor): # whether to remember evidence of each diagonal. cost space
self.addf = addf
self.keepfor = keepfor
self.m = m # m is a (n,m), n is the time frame for one, m is time frame for other one
#self.k = 1 # xie lv
#self.timeWidth = 1 # the time hist of the line
self.minFrame = 1 # ignore line contains less frame than minFrame
self.histCount = m.shape[0]+m.shape[1]-2*self.minFrame
self.histVal = np.zeros(self.histCount,dtype="float") # the raw hist value. will add up timeWidth's
self.histLength = np.zeros(self.histCount)
self.suppress = 0 # suppress absolute hist value lower than this.tricky, it could be over 100
self.evidence = [(-1.0,0,0) for i in xrange(self.histCount)] # the significant part in the hough transform, will fill up in countVal
def run(self):
#self.m = np.fliplr(self.m) # what? still has peaks?
# caculate each line,return the highest value line's start and end in each sequence
for i in xrange(self.histCount):
#get a line of points to calculate value
self.histVal[i],self.evidence[i],self.histLength[i] = self.countVal(np.diagonal(self.m,i-self.m.shape[0]+self.minFrame)) # start from the left bottom
#return [sum(self.histVal[i:i+self.timeWidth]) for i in range(0,self.histCount,self.timeWidth)] # may be slow
#self.histVal.resize((int(self.histCount/float(self.timeWidth)),self.timeWidth))
#res = self.histVal.sum(axis=1) # timeWidth is not correct, should have over lap, cumulative sum a timeWidth's histVal# or a sum of neighboring histVal
#res = self.histVal
res = self.histVal/self.histLength
res[res<self.suppress] = 0
#norm = np.linalg.norm(res)
#if(norm == 0):
# return res,-self.m.shape[0]+1
#return res/norm,-self.m.shape[0]+1#np.argmax(res)-self.m.shape[0]+1 # the normalize hough matrix, and the offset of video1 to video2(x axis,shape[1]),
# how many time frame of video2 is passed before video1 is played
return res,-self.m.shape[0]+1
def countVal(self,arr):
#give an arr, sum up value. for continues non-zero, add more
# the following makes big
#addf = 4
#keepfor = 10 # keep accumulated addf for how many none thre one
addf = self.addf
keepfor = self.keepfor
thres = 1.0
sumVal = 0.0
cont1,cont0,accf = 0,0,0 # continue non-zero count, continue zero count, accumulated factor
#remember where the sumVal get the most value
maxValTime = [-1.0,0,0] # the maxVal and the offset index,accumulated for how long
preVal = 0.0
for i in xrange(len(arr)):
try:
thisVal = arr[i] * addf**accf
except Exception as e: #OverflowError: long int too large to convert to float
thisVal = preVal
preVal = thisVal # save for next error
if(thisVal > maxValTime[0]):
maxValTime = [thisVal,i,accf+1] # +1 , the total length of none zero
preSum = sumVal
try:
sumVal+= thisVal
except Exception as e: # over flow again
print "warning, sum val overflow, so it stays the same"
sumVal = preSum
if(arr[i] > thres): # not necessary to be zero, since the matching matrix is not 0-1
cont1+=1
accf+=1
cont0=0
else:
cont1=0
cont0+=1
if(cont0 > keepfor):
accf=0
return sumVal,maxValTime,len(arr)
if __name__ == "__main__":
a = np.array([0,1,1,1,1,0,0,1,1,0,0,0,1,0,0,0,0])
b = np.array([0,1,1,1,1,1,1,0,0,0,0,0,1,0,0,0,0])
h = hough(np.zeros((20,20)))
print h.countVal(a)
print h.countVal(b)
timeWidth = 2
print a
print [sum(a[i:i+timeWidth]) for i in range(0,len(a),timeWidth)]
a.resize((int(len(a)/float(timeWidth)),timeWidth))
print a.sum(axis=1)
| 38.71134
| 154
| 0.694541
|
acfce63d359aa8a1e888572b0e4642a85de94bfd
| 4,458
|
py
|
Python
|
__init__.py
|
JarbasSkills/skill-mytmovies
|
ed1c541c96a47c5608f25abd898ef84ec4c84279
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
JarbasSkills/skill-mytmovies
|
ed1c541c96a47c5608f25abd898ef84ec4c84279
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
JarbasSkills/skill-mytmovies
|
ed1c541c96a47c5608f25abd898ef84ec4c84279
|
[
"Apache-2.0"
] | null | null | null |
from os.path import join, dirname
from ovos_plugin_common_play.ocp import MediaType, PlaybackType
from ovos_utils.log import LOG
from ovos_utils.parse import fuzzy_match
from ovos_workshop.skills.common_play import OVOSCommonPlaybackSkill, \
ocp_search, ocp_featured_media
from youtube_archivist import YoutubeMonitor
class MytMoviesSkill(OVOSCommonPlaybackSkill):
def __init__(self):
super().__init__("MytMovies")
self.supported_media = [MediaType.MOVIE,
MediaType.GENERIC]
self.skill_icon = self.default_bg = join(dirname(__file__), "ui", "mytmovies_icon.jpg")
self.archive = YoutubeMonitor(db_name="MytMovies",
min_duration=30 * 60,
logger=LOG,
blacklisted_kwords=["trailer", "teaser", "movie scene",
"movie clip", "behind the scenes",
"Movie Preview", "Documentary", "Episode",
"soundtrack", " OST", "opening theme"])
def initialize(self):
url = "https://www.youtube.com/c/SuperheroMovieClip"
bootstrap = f"https://raw.githubusercontent.com/OpenJarbas/streamindex/main/{self.archive.db.name}.json"
self.archive.bootstrap_from_url(bootstrap)
self.archive.monitor(url)
self.archive.setDaemon(True)
self.archive.start()
# matching
def match_skill(self, phrase, media_type):
score = 0
if self.voc_match(phrase, "movie") or media_type == MediaType.MOVIE:
score += 10
if self.voc_match(phrase, "maverick"):
score += 50
return score
def normalize_title(self, title):
title = title.lower().strip()
title = self.remove_voc(title, "maverick")
title = self.remove_voc(title, "movie")
title = title.replace("|", "").replace('"', "") \
.replace(':', "").replace('”', "").replace('“', "") \
.strip()
return " ".join(
[w for w in title.split(" ") if w]) # remove extra spaces
def calc_score(self, phrase, match, base_score=0):
score = base_score
score += 100 * fuzzy_match(phrase.lower(), match["title"].lower())
return min(100, score)
def get_playlist(self, score=50, num_entries=250):
pl = self.featured_media()[:num_entries]
return {
"match_confidence": score,
"media_type": MediaType.MOVIE,
"playlist": pl,
"playback": PlaybackType.VIDEO,
"skill_icon": self.skill_icon,
"image": self.skill_icon,
"bg_image": self.default_bg,
"title": "Myt Movies (Movie Playlist)",
"author": "Myt Movies"
}
@ocp_search()
def search_db(self, phrase, media_type):
base_score = self.match_skill(phrase, media_type)
if self.voc_match(phrase, "mytmovies"):
yield self.get_playlist(base_score)
if media_type == MediaType.MOVIE:
# only search db if user explicitly requested movies
phrase = self.normalize_title(phrase)
for url, video in self.archive.db.items():
yield {
"title": video["title"],
"author": "Full Free Films",
"match_confidence": self.calc_score(phrase, video, base_score),
"media_type": MediaType.MOVIE,
"uri": "youtube//" + url,
"playback": PlaybackType.VIDEO,
"skill_icon": self.skill_icon,
"skill_id": self.skill_id,
"image": video["thumbnail"],
"bg_image": self.default_bg
}
@ocp_featured_media()
def featured_media(self):
return [{
"title": video["title"],
"image": video["thumbnail"],
"match_confidence": 70,
"media_type": MediaType.MOVIE,
"uri": "youtube//" + video["url"],
"playback": PlaybackType.VIDEO,
"skill_icon": self.skill_icon,
"bg_image": video["thumbnail"],
"skill_id": self.skill_id
} for video in self.archive.sorted_entries()]
def create_skill():
return MytMoviesSkill()
| 40.527273
| 112
| 0.549349
|
acfce7c14144726d66f64a81c52ff4a2716fc131
| 697
|
py
|
Python
|
torchmetrics/text/__init__.py
|
BeyondTheProof/metrics
|
8af688daff819a95f4cb3d757ffc919c86072ee9
|
[
"Apache-2.0"
] | null | null | null |
torchmetrics/text/__init__.py
|
BeyondTheProof/metrics
|
8af688daff819a95f4cb3d757ffc919c86072ee9
|
[
"Apache-2.0"
] | null | null | null |
torchmetrics/text/__init__.py
|
BeyondTheProof/metrics
|
8af688daff819a95f4cb3d757ffc919c86072ee9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.text.bleu import BLEUScore # noqa: F401
from torchmetrics.text.wer import WER # noqa: F401
| 43.5625
| 74
| 0.767575
|
acfce847ca9efa954225536d2497f3c1a023b28c
| 7,461
|
py
|
Python
|
test/IECoreScene/MeshAlgoDistributePointsTest.py
|
aitorvfx/cortex
|
c0c27794fc67ccfce68b064e284747165c49ef1c
|
[
"BSD-3-Clause"
] | 5
|
2015-09-13T14:49:30.000Z
|
2017-02-04T21:04:59.000Z
|
test/IECoreScene/MeshAlgoDistributePointsTest.py
|
aitorvfx/cortex
|
c0c27794fc67ccfce68b064e284747165c49ef1c
|
[
"BSD-3-Clause"
] | null | null | null |
test/IECoreScene/MeshAlgoDistributePointsTest.py
|
aitorvfx/cortex
|
c0c27794fc67ccfce68b064e284747165c49ef1c
|
[
"BSD-3-Clause"
] | 3
|
2015-02-03T17:13:40.000Z
|
2022-01-07T15:55:00.000Z
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import re
import unittest
import imath
import IECore
import IECoreScene
class MeshAlgoDistributePointsTest( unittest.TestCase ) :
def pointTest( self, mesh, points, density, error=0.05 ) :
self.failUnless( "P" in points )
self.assertEqual( points.numPoints, points['P'].data.size() )
self.failUnless( points.arePrimitiveVariablesValid() )
mesh = IECoreScene.MeshAlgo.triangulate( mesh )
meshEvaluator = IECoreScene.MeshPrimitiveEvaluator( mesh )
result = meshEvaluator.createResult()
pointsPerFace = [ 0 ] * mesh.verticesPerFace.size()
positions = points["P"].data
## test that the points are on the mesh
for p in positions :
self.assertAlmostEqual( meshEvaluator.signedDistance( p, result ), 0.0, 3 )
pointsPerFace[result.triangleIndex()] += 1
## test that we have roughly the expected density per face
origDensity = density
mesh["faceArea"] = IECoreScene.MeshAlgo.calculateFaceArea( mesh )
for f in range( 0, mesh.verticesPerFace.size() ) :
if "density" in mesh :
density = mesh["density"].data[f] * origDensity
self.failUnless( abs(pointsPerFace[f] - density * mesh['faceArea'].data[f]) <= density * error )
def testSimple( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = 100 )
self.pointTest( m, p, 100 )
def testRaisesExceptionIfInvalidUVs( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
del m['uv']
with self.assertRaisesRegexp( RuntimeError, re.escape('MeshAlgo::distributePoints : MeshPrimitive has no uv primitive variable named "uv" of type FaceVarying or Vertex.') ) as cm:
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = 100 )
def testHighDensity( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = 50000, offset = imath.V2f( 0.0001, 0.0001 ) )
self.pointTest( m, p, 50000 )
def testDensityMaskPrimVar( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
m = IECoreScene.MeshAlgo.triangulate( m )
numFaces = m.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform )
m['density'] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ float(x)/numFaces for x in range( 0, numFaces ) ] ) )
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = 100 )
self.pointTest( m, p, 100, error=0.1 )
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = 1000 )
self.pointTest( m, p, 1000, error=0.1 )
def testOffsetParameter( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
density = 500
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = density, offset = imath.V2f( 0, 0 ) )
pOffset = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = density, offset = imath.V2f( 0.5, 0.75 ) )
self.pointTest( m, p, density )
self.pointTest( m, pOffset, density )
self.assertNotEqual( p.numPoints, pOffset.numPoints )
pos = p["P"].data
posOffset = pOffset["P"].data
for i in range( 0, min(p.numPoints, pOffset.numPoints) ) :
self.assertNotEqual( pos[i], posOffset[i] )
def testDistanceBetweenPoints( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
density = 300
points = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = density )
positions = points["P"].data
tree = IECore.V3fTree( points["P"].data )
for i in range( 0, positions.size() ) :
neighbours = list(tree.nearestNNeighbours( positions[i], 6 ))
self.failUnless( i in neighbours )
neighbours.remove( i )
for n in neighbours :
self.assert_( ( positions[i] - positions[n] ).length() > 1.0 / density )
def testPointOrder( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
m2 = m.copy()
m2['P'].data += imath.V3f( 0, 5, 0 )
pos = m["P"].data
pos2 = m2["P"].data
for i in range( 0, pos.size() ) :
self.assertNotEqual( pos[i], pos2[i] )
density = 500
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = density )
p2 = IECoreScene.MeshAlgo.distributePoints( mesh = m2, density = density )
self.pointTest( m, p, density )
self.pointTest( m2, p2, density )
self.assertEqual( p.numPoints, p2.numPoints )
pos = p["P"].data
pos2 = p2["P"].data
for i in range( 0, p.numPoints ) :
self.failUnless( pos2[i].equalWithRelError( pos[i] + imath.V3f( 0, 5, 0 ), 1e-6 ) )
def testDensityRange( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/pCubeShape1.cob" ).read()
self.assertRaises( RuntimeError, IECoreScene.MeshAlgo.distributePoints, m, -1.0 )
def testVertexUVs( self ) :
m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 4 ) )
# We know that createPlane creates FaceVarying uvs, but the data is actually per vertex, and just indexed
# to be FaceVarying, so we can copy the data as Vertex uvs
m2 = m.copy()
m2["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, m["uv"].data )
density = 500
p = IECoreScene.MeshAlgo.distributePoints( mesh = m, density = density )
p2 = IECoreScene.MeshAlgo.distributePoints( mesh = m2, density = density )
self.assertEqual( p, p2 )
def setUp( self ) :
os.environ["CORTEX_POINTDISTRIBUTION_TILESET"] = "test/IECore/data/pointDistributions/pointDistributionTileSet2048.dat"
if __name__ == "__main__":
unittest.main()
| 40.548913
| 181
| 0.702185
|
acfce8c21e1061c4efaf5c9cb9e2140429b77b7f
| 2,220
|
py
|
Python
|
tobiko/tests/functional/podman/test_client.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | 1
|
2022-01-11T20:50:06.000Z
|
2022-01-11T20:50:06.000Z
|
tobiko/tests/functional/podman/test_client.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
tobiko/tests/functional/podman/test_client.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import types
import testtools
import tobiko
from tobiko import podman
from tobiko.openstack import keystone
from tobiko.openstack import topology
class PodmanNodeFixture(tobiko.SharedFixture):
node = None
def setup_fixture(self):
nodes = topology.list_openstack_nodes()
for node in nodes:
assert node.ssh_client is not None
if podman.is_podman_running(ssh_client=node.ssh_client):
self.node = node
break
if self.node is None:
nodes_text = ' '.join(node.name for node in nodes)
tobiko.skip_test("Podman server is not running in any of nodes "
f"{nodes_text}")
@keystone.skip_unless_has_keystone_credentials()
class PodmanClientTest(testtools.TestCase):
node = tobiko.required_setup_fixture(PodmanNodeFixture)
@property
def ssh_client(self):
return self.node.node.ssh_client
def test_get_podman_client(self):
client = podman.get_podman_client(ssh_client=self.ssh_client)
self.assertIsInstance(client, podman.PodmanClientFixture)
def test_connect_podman_client(self):
client = podman.get_podman_client(
ssh_client=self.ssh_client).connect()
self.assertTrue(client.system.ping())
def test_list_podman_containers(self):
client = podman.get_podman_client(
ssh_client=self.ssh_client).connect()
self.assertIsInstance(client.containers.list(),
types.GeneratorType)
| 32.173913
| 78
| 0.690991
|
acfcea2728cac268bf86da2b357fb052631cd672
| 781
|
py
|
Python
|
nipype/interfaces/spm/__init__.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/spm/__init__.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/spm/__init__.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-level namespace for spm."""
from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname,
scans_for_fnames)
from .preprocess import (SliceTiming, Realign, Coregister, Normalize, Segment,
Smooth, NewSegment, DARTEL, DARTELNorm2MNI,
CreateWarped, VBMSegment)
from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold,
OneSampleTTestDesign, TwoSampleTTestDesign,
PairedTTestDesign, MultipleRegressionDesign)
from .utils import Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation,ResliceToReference
| 52.066667
| 116
| 0.68758
|
acfcea50d2b16b85b8fb6b37f5019b8c98fdb71a
| 1,486
|
py
|
Python
|
nova/tests/unit/cmd/test_compute.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,874
|
2015-01-04T05:18:34.000Z
|
2022-03-31T03:30:28.000Z
|
nova/tests/unit/cmd/test_compute.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 40
|
2015-04-13T02:32:42.000Z
|
2022-02-16T02:28:06.000Z
|
nova/tests/unit/cmd/test_compute.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,996
|
2015-01-04T15:11:51.000Z
|
2022-03-31T11:03:13.000Z
|
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from nova.cmd import compute
from nova import context
from nova.db.main import api as db
from nova import exception
from nova import test
@contextlib.contextmanager
def restore_db():
orig = db.DISABLE_DB_ACCESS
try:
yield
finally:
db.DISABLE_DB_ACCESS = orig
class ComputeMainTest(test.NoDBTestCase):
@mock.patch('nova.conductor.api.API.wait_until_ready')
@mock.patch('oslo_reports.guru_meditation_report')
def _call_main(self, mod, gmr, cond):
@mock.patch.object(mod, 'config')
@mock.patch.object(mod, 'service')
def run_main(serv, conf):
mod.main()
run_main()
def test_compute_main_blocks_db(self):
ctxt = context.get_admin_context()
with restore_db():
self._call_main(compute)
self.assertRaises(exception.DBNotAllowed, db.instance_get, ctxt, 2)
| 29.137255
| 79
| 0.711978
|
acfceb3b44a0f2e45928173099055b4793d4e958
| 13,326
|
py
|
Python
|
tests/test_app_routers_bond_transfer_approvals_{token_Address}_{id}_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 2
|
2021-08-19T12:35:25.000Z
|
2022-02-16T04:13:38.000Z
|
tests/test_app_routers_bond_transfer_approvals_{token_Address}_{id}_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 46
|
2021-09-02T03:22:05.000Z
|
2022-03-31T09:20:00.000Z
|
tests/test_app_routers_bond_transfer_approvals_{token_Address}_{id}_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 1
|
2021-11-17T23:18:27.000Z
|
2021-11-17T23:18:27.000Z
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from datetime import datetime
from pytz import timezone
import config
from app.model.db import (
Token,
TokenType,
IDXTransferApproval
)
local_tz = timezone(config.TZ)
class TestAppRoutersBondTransferApprovalsTokenAddressIdGET:
# target API endpoint
base_url = "/bond/transfer_approvals/{}/{}"
test_transaction_hash = "test_transaction_hash"
test_issuer_address = "test_issuer_address"
test_token_address = "test_token_address"
test_exchange_address = "test_exchange_address"
test_from_address = "test_from_address"
test_to_address = "test_to_address"
test_application_datetime = datetime(year=2019, month=9, day=1)
test_application_datetime_str = timezone("UTC").localize(test_application_datetime).astimezone(local_tz).isoformat()
test_application_blocktimestamp = datetime(year=2019, month=9, day=2)
test_application_blocktimestamp_str = timezone("UTC").localize(test_application_blocktimestamp).astimezone(
local_tz).isoformat()
test_approval_datetime = datetime(year=2019, month=9, day=3)
test_approval_datetime_str = timezone("UTC").localize(test_approval_datetime).astimezone(local_tz).isoformat()
test_approval_blocktimestamp = datetime(year=2019, month=9, day=4)
test_approval_blocktimestamp_str = timezone("UTC").localize(test_approval_blocktimestamp).astimezone(
local_tz).isoformat()
###########################################################################
# Normal Case
###########################################################################
# <Normal_1>
# unapproved data
def test_normal_1(self, client, db):
# prepare data: Token
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = self.test_transaction_hash
_token.issuer_address = self.test_issuer_address
_token.token_address = self.test_token_address
_token.abi = {}
db.add(_token)
id = 10
_idx_transfer_approval = IDXTransferApproval()
_idx_transfer_approval.id = id
_idx_transfer_approval.token_address = self.test_token_address
_idx_transfer_approval.exchange_address = self.test_exchange_address
_idx_transfer_approval.application_id = 100
_idx_transfer_approval.from_address = self.test_from_address
_idx_transfer_approval.to_address = self.test_to_address
_idx_transfer_approval.amount = 200
_idx_transfer_approval.application_datetime = self.test_application_datetime
_idx_transfer_approval.application_blocktimestamp = self.test_application_blocktimestamp
_idx_transfer_approval.approval_datetime = None
_idx_transfer_approval.approval_blocktimestamp = None
_idx_transfer_approval.cancelled = None
_idx_transfer_approval.transfer_approved = None
db.add(_idx_transfer_approval)
# request target API
resp = client.get(
self.base_url.format(self.test_token_address, id)
)
# assertion
assert resp.status_code == 200
assert resp.json() == {
"id": 10,
"token_address": self.test_token_address,
"exchange_address": self.test_exchange_address,
"application_id": 100,
"from_address": self.test_from_address,
"to_address": self.test_to_address,
"amount": 200,
"application_datetime": self.test_application_datetime_str,
"application_blocktimestamp": self.test_application_blocktimestamp_str,
"approval_datetime": None,
"approval_blocktimestamp": None,
"cancelled": False,
"transfer_approved": False,
}
# <Normal_2>
# canceled data
def test_normal_2(self, client, db):
# prepare data: Token
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = self.test_transaction_hash
_token.issuer_address = self.test_issuer_address
_token.token_address = self.test_token_address
_token.abi = {}
db.add(_token)
id = 10
_idx_transfer_approval = IDXTransferApproval()
_idx_transfer_approval.id = id
_idx_transfer_approval.token_address = self.test_token_address
_idx_transfer_approval.exchange_address = self.test_exchange_address
_idx_transfer_approval.application_id = 100
_idx_transfer_approval.from_address = self.test_from_address
_idx_transfer_approval.to_address = self.test_to_address
_idx_transfer_approval.amount = 200
_idx_transfer_approval.application_datetime = self.test_application_datetime
_idx_transfer_approval.application_blocktimestamp = self.test_application_blocktimestamp
_idx_transfer_approval.approval_datetime = None
_idx_transfer_approval.approval_blocktimestamp = None
_idx_transfer_approval.cancelled = True
_idx_transfer_approval.transfer_approved = None
db.add(_idx_transfer_approval)
# request target API
resp = client.get(
self.base_url.format(self.test_token_address, id)
)
# assertion
assert resp.status_code == 200
assert resp.json() == {
"id": 10,
"token_address": self.test_token_address,
"exchange_address": self.test_exchange_address,
"application_id": 100,
"from_address": self.test_from_address,
"to_address": self.test_to_address,
"amount": 200,
"application_datetime": self.test_application_datetime_str,
"application_blocktimestamp": self.test_application_blocktimestamp_str,
"approval_datetime": None,
"approval_blocktimestamp": None,
"cancelled": True,
"transfer_approved": False,
}
# <Normal_3>
# approved data(no ownership vesting)
def test_normal_3(self, client, db):
# prepare data: Token
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = self.test_transaction_hash
_token.issuer_address = self.test_issuer_address
_token.token_address = self.test_token_address
_token.abi = {}
db.add(_token)
id = 10
_idx_transfer_approval = IDXTransferApproval()
_idx_transfer_approval.id = id
_idx_transfer_approval.token_address = self.test_token_address
_idx_transfer_approval.exchange_address = self.test_exchange_address
_idx_transfer_approval.application_id = 100
_idx_transfer_approval.from_address = self.test_from_address
_idx_transfer_approval.to_address = self.test_to_address
_idx_transfer_approval.amount = 200
_idx_transfer_approval.application_datetime = self.test_application_datetime
_idx_transfer_approval.application_blocktimestamp = self.test_application_blocktimestamp
_idx_transfer_approval.approval_datetime = None
_idx_transfer_approval.approval_blocktimestamp = None
_idx_transfer_approval.cancelled = None
_idx_transfer_approval.transfer_approved = True
db.add(_idx_transfer_approval)
# request target API
resp = client.get(
self.base_url.format(self.test_token_address, id)
)
# assertion
assert resp.status_code == 200
assert resp.json() == {
"id": 10,
"token_address": self.test_token_address,
"exchange_address": self.test_exchange_address,
"application_id": 100,
"from_address": self.test_from_address,
"to_address": self.test_to_address,
"amount": 200,
"application_datetime": self.test_application_datetime_str,
"application_blocktimestamp": self.test_application_blocktimestamp_str,
"approval_datetime": None,
"approval_blocktimestamp": None,
"cancelled": False,
"transfer_approved": True,
}
# <Normal_4>
# approved data
def test_normal_4(self, client, db):
# prepare data: Token
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = self.test_transaction_hash
_token.issuer_address = self.test_issuer_address
_token.token_address = self.test_token_address
_token.abi = {}
db.add(_token)
id = 10
_idx_transfer_approval = IDXTransferApproval()
_idx_transfer_approval.id = id
_idx_transfer_approval.token_address = self.test_token_address
_idx_transfer_approval.exchange_address = self.test_exchange_address
_idx_transfer_approval.application_id = 100
_idx_transfer_approval.from_address = self.test_from_address
_idx_transfer_approval.to_address = self.test_to_address
_idx_transfer_approval.amount = 200
_idx_transfer_approval.application_datetime = self.test_application_datetime
_idx_transfer_approval.application_blocktimestamp = self.test_application_blocktimestamp
_idx_transfer_approval.approval_datetime = self.test_approval_datetime
_idx_transfer_approval.approval_blocktimestamp = self.test_approval_blocktimestamp
_idx_transfer_approval.cancelled = None
_idx_transfer_approval.transfer_approved = True
db.add(_idx_transfer_approval)
# request target API
resp = client.get(
self.base_url.format(self.test_token_address, id)
)
# assertion
assert resp.status_code == 200
assert resp.json() == {
"id": 10,
"token_address": self.test_token_address,
"exchange_address": self.test_exchange_address,
"application_id": 100,
"from_address": self.test_from_address,
"to_address": self.test_to_address,
"amount": 200,
"application_datetime": self.test_application_datetime_str,
"application_blocktimestamp": self.test_application_blocktimestamp_str,
"approval_datetime": self.test_approval_datetime_str,
"approval_blocktimestamp": self.test_approval_blocktimestamp_str,
"cancelled": False,
"transfer_approved": True,
}
###########################################################################
# Error Case
###########################################################################
# <Error_1>
# token not found
def test_error_1(self, client, db):
id = 10
# request target API
resp = client.get(
self.base_url.format(self.test_token_address, id)
)
# assertion
assert resp.status_code == 404
assert resp.json() == {
"meta": {
"code": 1,
"title": "NotFound"
},
"detail": "token not found"
}
# <Error_2>
# processing token
def test_error_2(self, client, db):
id = 10
# prepare data: Token
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = self.test_transaction_hash
_token.issuer_address = self.test_issuer_address
_token.token_address = self.test_token_address
_token.abi = {}
_token.token_status = 0
db.add(_token)
# request target API
resp = client.get(
self.base_url.format(self.test_token_address, id)
)
# assertion
assert resp.status_code == 400
assert resp.json() == {
"meta": {
"code": 1,
"title": "InvalidParameterError"
},
"detail": "wait for a while as the token is being processed"
}
# <Error_3>
# transfer approval not found
def test_error_3(self, client, db):
id = 10
# prepare data: Token
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = self.test_transaction_hash
_token.issuer_address = self.test_issuer_address
_token.token_address = self.test_token_address
_token.abi = {}
_token.token_status = 1
db.add(_token)
# request target API
resp = client.get(
self.base_url.format(self.test_token_address, id)
)
# assertion
assert resp.status_code == 404
assert resp.json() == {
"meta": {
"code": 1,
"title": "NotFound"
},
"detail": "transfer approval not found"
}
| 38.074286
| 120
| 0.651208
|
acfcebdfce3827075b81171697133e56b116e19b
| 210
|
py
|
Python
|
hsm/hsm_working/doctype/biller/test_biller.py
|
PratikMane34/BILLS
|
4be47dacb213a7b5bf66dad07d1bed4794754113
|
[
"MIT"
] | null | null | null |
hsm/hsm_working/doctype/biller/test_biller.py
|
PratikMane34/BILLS
|
4be47dacb213a7b5bf66dad07d1bed4794754113
|
[
"MIT"
] | null | null | null |
hsm/hsm_working/doctype/biller/test_biller.py
|
PratikMane34/BILLS
|
4be47dacb213a7b5bf66dad07d1bed4794754113
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Pratik Mane and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestBiller(unittest.TestCase):
pass
| 19.090909
| 50
| 0.757143
|
acfcec76e8a06a7b8cef015113b24198d6c53968
| 1,556
|
py
|
Python
|
snmpwalk/setup.py
|
FlorianVeaux/integrations-extras
|
87133d9cf2085e650b981c991a392f0bdb5ed8d0
|
[
"BSD-3-Clause"
] | null | null | null |
snmpwalk/setup.py
|
FlorianVeaux/integrations-extras
|
87133d9cf2085e650b981c991a392f0bdb5ed8d0
|
[
"BSD-3-Clause"
] | 3
|
2020-03-30T12:26:28.000Z
|
2021-08-25T11:58:47.000Z
|
snmpwalk/setup.py
|
FlorianVeaux/integrations-extras
|
87133d9cf2085e650b981c991a392f0bdb5ed8d0
|
[
"BSD-3-Clause"
] | 1
|
2020-12-01T21:04:57.000Z
|
2020-12-01T21:04:57.000Z
|
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'snmpwalk', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CHECKS_BASE_REQ = 'datadog-checks-base>=4.2.0'
setup(
name='datadog-snmpwalk',
version=ABOUT['__version__'],
description='The Snmpwalk check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent snmpwalk check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author_email='help@datadoghq.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
],
# The package we're going to ship
packages=['datadog_checks.snmpwalk'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| 30.509804
| 78
| 0.678021
|
acfcee7e32aed7d12ca964f257a8f0143b270662
| 208
|
py
|
Python
|
ifttt_price_emergency_applet.py
|
theintegralanomaly/cryptonite
|
00840c96a653df6224800e4f6c62116b37e6e005
|
[
"MIT"
] | null | null | null |
ifttt_price_emergency_applet.py
|
theintegralanomaly/cryptonite
|
00840c96a653df6224800e4f6c62116b37e6e005
|
[
"MIT"
] | null | null | null |
ifttt_price_emergency_applet.py
|
theintegralanomaly/cryptonite
|
00840c96a653df6224800e4f6c62116b37e6e005
|
[
"MIT"
] | null | null | null |
import requests
ifttt_webhook_url = 'https://maker.ifttt.com/trigger/stellar_price_emergency/with/key' \
'/kc78OIzFAg6W7swCRlP27VkPJuRGxgOmVKyS7869Rky '
requests.post(ifttt_webhook_url)
| 29.714286
| 88
| 0.759615
|
acfceecc371e0500ab9db6f44b97f937ccdc1558
| 8,630
|
py
|
Python
|
api_json.py
|
cltk/cltk_api
|
eb736ec9f20c008436e93cd0be4bcd05be7d455c
|
[
"MIT"
] | 16
|
2015-12-07T15:05:12.000Z
|
2019-11-21T21:16:20.000Z
|
api_json.py
|
cltk/cltk_api
|
eb736ec9f20c008436e93cd0be4bcd05be7d455c
|
[
"MIT"
] | 40
|
2015-11-20T16:15:25.000Z
|
2017-06-30T18:13:45.000Z
|
api_json.py
|
cltk/cltk_api
|
eb736ec9f20c008436e93cd0be4bcd05be7d455c
|
[
"MIT"
] | 29
|
2015-11-22T18:53:44.000Z
|
2020-01-15T12:43:07.000Z
|
"""Open JSON file and serve."""
import json
import os
from flask import Flask
from flask import request # for getting query string
# eg: request.args.get('user') will get '?user=some-value'
from flask_restful import Resource, Api
from util.jsonp import jsonp
from metadata.pos.views import POSTagger
from metadata.stem.views import Stem
from metadata.definition.views import Definition
from flask_restful import reqparse
app = Flask(__name__)
api = Api(app)
# example
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
# example
class TodoSimple(Resource):
def get(self, todo_id):
return {'example with token': todo_id}
def open_json(fp):
"""Open json file, return json."""
with open(fp) as fo:
return json.load(fo)
def get_cltk_text_dir(lang, corpus='perseus'):
"""Take relative filepath, return absolute"""
cltk_home = os.path.expanduser('~/cltk_data')
text_dir = os.path.join(cltk_home, lang.casefold(), 'text', lang.casefold() + '_text_' + corpus, 'json')
return text_dir
def get_cltk_translation_dir(lang, translation_lang, corpus='perseus'):
"""Take relative filepath, return absolute"""
cltk_home = os.path.expanduser('~/cltk_data')
translation_dir = os.path.join(cltk_home, lang.casefold(), 'text', lang.casefold() + '_text_' + corpus, 'translation', translation_lang)
return translation_dir
def get_cltk_commentary_dir(lang, corpus='perseus'):
"""Take relative filepath, return absolute"""
cltk_home = os.path.expanduser('~/cltk_data')
commentary_dir = os.path.join(cltk_home, lang.casefold(), 'text', lang.casefold() + '_text_' + corpus, 'commentary')
return commentary_dir
class Text(Resource):
def get(self, lang, corpus, author, work):
parser = reqparse.RequestParser()
parser.add_argument('translation')
parser.add_argument('commentary')
args = parser.parse_args()
translation_lang = args.get('translation')
commentary_author = args.get('commentary')
if(commentary_author):
_dir = get_cltk_commentary_dir(lang)
file = author + "__" + work + ".json";
json_fp = os.path.join(_dir, file);
try:
file_dict = open_json(json_fp)
except Exception as e:
return
commentary = []
if(commentary_author == "all"):
# Add all commentary
commentary = file_dict["commentary"]
else:
# Add commentary by specific author
for item in file_dict["commentary"]:
print(item)
if item['author'] == commentary_author:
commentary.append(item)
return {'language': lang,
'corpus': corpus,
'author': author,
'work': work,
'commentary': commentary,
'meta': file_dict['meta'],
}
elif(translation_lang):
# Assumes translation data file name as "author__work__language.json"
_dir = get_cltk_translation_dir(lang, translation_lang)
file = author + "__" + work + ".json";
json_fp = os.path.join(_dir, file);
try:
file_dict = open_json(json_fp)
except Exception as e:
return
return {'language': lang,
'corpus': corpus,
'author': author,
'work': work,
'translations': file_dict['translations'],
'meta': file_dict['meta'],
}
else:
_dir = get_cltk_text_dir(lang)
file = author + "__" + work + ".json";
json_fp = os.path.join(_dir, file)
try:
file_dict = open_json(json_fp)
except Exception as e:
return
text = file_dict['text']
chunk1 = request.args.get('chunk1')
chunk2 = request.args.get('chunk2')
chunk3 = request.args.get('chunk3')
if chunk1:
text = text[chunk1]
if chunk2:
text = text[chunk2]
if chunk3:
text = text[chunk3]
return {'language': lang,
'corpus': corpus,
'author': author,
'work': work,
'text': text,
'meta': file_dict['meta'],
}
class Lang(Resource):
def get(self):
cltk_home = os.path.expanduser('~/cltk_data')
dirs = os.listdir(cltk_home)
langs_with_perseus_corpus = []
for _dir_lang in dirs:
is_perseus_corpus = get_cltk_text_dir(_dir_lang)
if os.path.isdir(is_perseus_corpus):
langs_with_perseus_corpus.append(_dir_lang)
return {'languages': langs_with_perseus_corpus}
class Corpus(Resource):
def get(self, lang):
possible_perseus_corpora_json = get_cltk_text_dir(lang)
possible_perseus_corpora = os.path.split(possible_perseus_corpora_json)[0]
is_perseus = os.path.isdir(possible_perseus_corpora)
corpora = []
if is_perseus and possible_perseus_corpora.endswith('_perseus'):
corpus_name = os.path.split(possible_perseus_corpora)[1]
corpora.append('perseus')
return {'language': lang,
'corpora': corpora}
class Author(Resource):
def get(self, lang, corpus):
possible_perseus_corpora_json = get_cltk_text_dir(lang)
authors = set() # use set to avoid dupes
if os.path.isdir(possible_perseus_corpora_json):
files = os.listdir(possible_perseus_corpora_json)
for file in files:
author = file.split('__')[0]
authors.add(author)
else:
print('Corpus not installed into "~/cltk_data".')
return {'language': lang,
'authors': list(authors)} # cast to list, set() not serializable
class Texts(Resource):
def get(self, lang, corpus, author):
home_dir = os.path.expanduser('~/cltk_data')
possible_corpus = os.path.join(home_dir, lang, 'text', lang + '_text_' + corpus, 'json')
dir_contents = os.listdir(possible_corpus)
texts = []
for file in dir_contents:
if file.startswith(author):
text = file.split('__')[1][:-5]
texts.append(text)
return {'language': lang,
'corpus': corpus,
'author': author,
'texts': texts}
# http://localhost:5000/lang/latin/corpus/perseus/author/vergil/text
# http://localhost:5000/lang/greek/corpus/perseus/author/homer/text
api.add_resource(Texts, '/lang/<string:lang>/corpus/<string:corpus>/author/<string:author>/text')
# http://localhost:5000/lang/latin/corpus/perseus/author
api.add_resource(Author, '/lang/<string:lang>/corpus/<string:corpus>/author')
# http://localhost:5000/lang/latin/corpus
api.add_resource(Corpus, '/lang/<string:lang>/corpus')
# http://localhost:5000/lang
api.add_resource(Lang, '/lang')
# http://localhost:5000/lang/greek/corpus/perseus/author/achilles_tatius/text/leucippe_et_clitophon?chunk1=1&chunk2=1&chunk3=1
# http://localhost:5000/lang/greek/corpus/perseus/author/homer/text/odyssey
# http://localhost:5000/lang/greek/corpus/perseus/author/homer/text/odyssey?chunk1=1&chunk2=1
# http://localhost:5000/lang/greek/corpus/perseus/author/homer/text/odyssey?translation=english
# http://localhost:5000/lang/greek/corpus/perseus/author/homer/text/odyssey?commentary=all
# http://localhost:5000/lang/greek/corpus/perseus/author/homer/text/odyssey?commentary=E. T. Merril
api.add_resource(Text, '/lang/<string:lang>/corpus/<string:corpus>/author/<string:author>/text/<string:work>')
#api.add_resource(Text, '/lang/<string:lang>/corpus/<string:corpus>/author/<string:author>/text/<string:work>/<string:chunk1>')
# CLTK core pos
api.add_resource(POSTagger, '/core/pos', endpoint='pos')
# CLTK core stemmer
api.add_resource(Stem, '/core/stem/<string:sentence>')
# CLTK definitions
# http://localhost:5000/lang/latin/define/abante
api.add_resource(Definition, '/lang/<string:lang>/define/<string:word>')
# simple examples
api.add_resource(TodoSimple, '/todo/<string:todo_id>')
api.add_resource(HelloWorld, '/hello')
if __name__ == '__main__':
#app.run(debug=True)
app.run(host='0.0.0.0', debug=True)
| 33.843137
| 140
| 0.60927
|
acfceefd790d389a2921cb4d6ea496a0fea0d157
| 7,572
|
py
|
Python
|
analysis/make_submission_plots.py
|
larsbratholm/champs_kaggle
|
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
|
[
"MIT"
] | 9
|
2020-08-14T23:11:16.000Z
|
2021-08-09T16:23:43.000Z
|
analysis/make_submission_plots.py
|
larsbratholm/champs_kaggle
|
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
|
[
"MIT"
] | 1
|
2020-11-19T09:29:14.000Z
|
2020-11-19T09:29:14.000Z
|
analysis/make_submission_plots.py
|
larsbratholm/champs_kaggle
|
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
|
[
"MIT"
] | 2
|
2020-09-09T02:53:57.000Z
|
2020-12-06T08:20:52.000Z
|
"""
Visualizations of the team submissions.
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
import scipy
import sklearn.manifold
import sklearn.decomposition
def get_variance_by_type(data, couplings, type_to_idx):
n = len(type_to_idx)
scores = {}
for type_, idx in type_to_idx.items():
idx = np.asarray(idx)
type_data = data[:,idx]
type_couplings = couplings[idx]
scores[type_] = np.var(type_data - type_couplings[None], axis=1)
return scores
def extract_weighted_subset(data, couplings, type_to_idx, subset=None, metric='mae'):
"""
Extract 20000 points of each type and optionally weight them according to their MAE/RMSE in the ensemble
"""
np.random.seed(42)
if subset is None:
subset = np.arange(data.shape[0])
else:
subset = np.asarray(subset, dtype=int)
if metric == 'rmse':
metric_fun = lambda x: np.std(x, axis=1)[:,None]
elif metric == 'mae':
metric_fun = lambda x: np.mean(abs(x), axis=1)[:,None]
elif metric is None:
metric_fun = lambda x: 1
else:
print("Unknown metric", metric)
raise SystemExit
#weights = np.exp(len(type_to_idx)*scores)
#difference = data[subset,:]-couplings[None]
x = np.empty((len(subset), len(type_to_idx) * 20000))
for i, (type_, idx) in enumerate(type_to_idx.items()):
# weight differences by the chosen metric to get
# different types to be on same scale
unscaled_data = (data[subset]-couplings[None])[:,idx]
score = metric_fun(unscaled_data)
scaled_data = unscaled_data / score
x[:,i*20000:(i+1)*20000] = scaled_data[:,np.random.choice(np.arange(len(idx)), size=20000, replace=False)]
return x
def plot_correlation(data, couplings, name, type_to_idx, subset=[0,1,2,3,4,5,11],
filename='correlation_matrix.pdf', linkage=None):
"""
Plot correlation between solutions
"""
subset = np.asarray(subset, dtype=int)
name = np.asarray(name)
x = extract_weighted_subset(data, couplings, type_to_idx, subset=subset, metric='rmse')
corr = np.corrcoef(x)
if linkage not in (None, 'single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'):
print("Unknown linkage", linkage)
raise SystemExit
# Unsorted plots
if linkage is None:
# Plot heatmap
sns.heatmap((corr), square=True, linewidths=.25, cbar_kws={"shrink": .5},
cmap=sns.diverging_palette(220, 10, as_cmap=True),
yticklabels=name[subset], xticklabels=subset+1,
center=0.5, vmax=1, vmin=0)
# Sorted from clustering
else:
d = scipy.cluster.hierarchy.distance.pdist(x)#, 'cityblock')
L = scipy.cluster.hierarchy.linkage(d, method=linkage, optimal_ordering=True)
sns.clustermap((corr), square=True, linewidths=.25, cbar_kws={"shrink": .5},
cmap = sns.diverging_palette(220, 10, as_cmap=True),
yticklabels=name[subset], xticklabels=subset+1,
center=0.5, vmax=1, vmin=0, row_linkage=L, col_linkage=L)
#plt.xticks(rotation=-45)
plt.yticks(rotation=0)
plt.savefig(filename, bbox_inches = "tight")
plt.clf()
def visualize_methods(data, couplings, name, type_to_idx, scores, manifold='mds', n_samples=100,
subset=[0,1,2,3,4,5,11], scale=True, filename="manifold.pdf"):
"""
Get reduced dimensional projection
"""
# Set grid
#plt.style.use('seaborn-whitegrid')
# Set fig size
plt.rcParams["figure.figsize"] = (16,9)
# Set font size
plt.rcParams["font.size"] = 30
if scale:
metric = 'mae'
else:
metric = None
x = extract_weighted_subset(data, couplings, type_to_idx, subset=np.arange(n_samples), metric=metric)
# Use manhattan distance when possible
if manifold in ['mds', 'tsne_mds']:
dissimilarity = np.zeros((n_samples, n_samples))
for i in range(n_samples):
for j in range(i+1, n_samples):
distance = np.mean(abs(x[i]-x[j]))
dissimilarity[i,j] = distance
dissimilarity[j,i] = distance
if manifold == 'mds':
m = sklearn.manifold.MDS(n_components=2, n_init=10, random_state=42, dissimilarity='precomputed')
y = m.fit_transform(dissimilarity)
else:
if manifold == 'tsne_mds':
m = sklearn.manifold.MDS(n_components=50, n_init=10, random_state=42, dissimilarity='precomputed')
x = m.fit_transform(dissimilarity)
m = sklearn.manifold.TSNE(n_components=2, random_state=42, verbose=10, perplexity=15,
metric='manhattan', init='random', n_iter=40000, learning_rate=50)
y = m.fit_transform(x)
elif manifold == 'tsne':
m = sklearn.manifold.TSNE(n_components=2, random_state=42, verbose=10, perplexity=15,
metric='manhattan', init='random', n_iter=40000, learning_rate=50)
y = m.fit_transform(x)
elif manifold == 'pca':
m = sklearn.decomposition.PCA(n_components=2)
y = m.fit_transform(x)
elif manifold == 'tsne_pca':
m = sklearn.decomposition.PCA(n_components=50)
x = m.fit_transform(x)
m = sklearn.manifold.TSNE(n_components=2, random_state=42, verbose=10, perplexity=15,
metric='manhattan', init='random', n_iter=40000, learning_rate=50)
y = m.fit_transform(x)
elif manifold == 'lsa':
m = sklearn.decomposition.TruncatedSVD(n_components=2)
y = m.fit_transform(x)
else:
print("Unknown manifold %s" % manifold)
quit()
# Get scores for coloring
score_averages = sum(type_scores[:n_samples] for type_scores in scores.values())/8
fig, ax = plt.subplots()
im = ax.scatter(y[:,0], y[:,1], c=score_averages, s=120, cmap="viridis_r")
# Add colorbar
fig.colorbar(im, ax=ax)
# Add the rank in the plot for the subset items
txt = [str(i+1) if i in subset else '' for i in range(n_samples)]
for i, string in enumerate(txt):
ax.annotate(string, (y[i,0], y[i,1]))
# Remove ticks
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Circle
#plt.scatter(y[subset,0], y[subset,1], edgecolors='k', facecolors='None', s=320)
# remove top and right spine
sns.despine()
plt.tight_layout()
plt.savefig(filename)
plt.clf()
if __name__ == "__main__":
# Get script location
script_dir = os.path.abspath(os.path.dirname(__file__))
try:
with open(f'{script_dir}/data/data.pkl', "rb") as f:
scores, data, id_to_type, id_to_idx, type_to_idx, rank, name, filenames, couplings = pickle.load(f)
except FileNotFoundError:
print("No data pickle found")
raise SystemExit
# Correlation plot of top methods
plot_correlation(data, couplings, name, type_to_idx, subset=[0,1,2,3,4,11],
filename=f"{script_dir}/output/correlation_matrix.png")
# Clustered correlation plot of top 50 methods
plot_correlation(data, couplings, name, type_to_idx, subset=np.arange(50),
filename=f"{script_dir}/output/correlation_matrix_clustering_50.png", linkage='complete')
# Solutions projected down to a 2D manifold
visualize_methods(data, couplings, name, type_to_idx, scores, scale=True,
subset=[0,1,2,3,4,11], filename=f"{script_dir}/output/manifold.png")
| 37.300493
| 114
| 0.635895
|
acfcef2560b948dfc1837b871d326bd642b4016d
| 4,964
|
py
|
Python
|
chunonline/apps/users/migrations/0001_initial.py
|
andanlove/chunonline
|
041814777aef2bc764256892fc7dac8a0a6dde04
|
[
"Apache-2.0"
] | 1
|
2019-07-07T07:32:02.000Z
|
2019-07-07T07:32:02.000Z
|
chunonline/apps/users/migrations/0001_initial.py
|
andanlove/chunonline
|
041814777aef2bc764256892fc7dac8a0a6dde04
|
[
"Apache-2.0"
] | null | null | null |
chunonline/apps/users/migrations/0001_initial.py
|
andanlove/chunonline
|
041814777aef2bc764256892fc7dac8a0a6dde04
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0 on 2018-10-14 21:21
import datetime
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nick_name', models.CharField(default='', max_length=50, verbose_name='昵称')),
('birthday', models.DateField(blank=True, null=True, verbose_name='生日')),
('gender', models.CharField(choices=[('male', '男'), ('female', '女')], default='female', max_length=7)),
('address', models.CharField(default='', max_length=100)),
('moblie', models.CharField(blank=True, max_length=11, null=True)),
('image', models.ImageField(default='image/default.png', upload_to='image/%Y/%m')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户信息',
'verbose_name_plural': '用户信息',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('image', models.ImageField(upload_to='banner/%Y/%m', verbose_name='轮播图')),
('url', models.URLField(verbose_name='访问地址')),
('index', models.IntegerField(default=100, verbose_name='顺序')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '轮播图',
'verbose_name_plural': '轮播图',
},
),
migrations.CreateModel(
name='EmailVerifyRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20, verbose_name='验证码')),
('email', models.EmailField(max_length=50, verbose_name='邮箱')),
('send_type', models.CharField(choices=[('register', '注册'), ('forget', '找回密码'), ('update_email', '修改邮箱')], max_length=12, verbose_name='验证码类型')),
('send_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='验证码时间')),
],
options={
'verbose_name': '邮箱验证码',
'verbose_name_plural': '邮箱验证码',
},
),
]
| 62.05
| 329
| 0.618856
|
acfcf0c142ed9c8f1ef08ff407f2d40129a3e2b9
| 901
|
py
|
Python
|
youtube_dl_gui/__main__.py
|
VaiTon/youtube-dl-gui
|
cbfaccdc73a1cc9a2bc04df4a2e20eeb4ca31a85
|
[
"Unlicense"
] | null | null | null |
youtube_dl_gui/__main__.py
|
VaiTon/youtube-dl-gui
|
cbfaccdc73a1cc9a2bc04df4a2e20eeb4ca31a85
|
[
"Unlicense"
] | 1
|
2019-09-04T14:34:26.000Z
|
2019-09-04T14:34:26.000Z
|
youtube_dl_gui/__main__.py
|
VaiTon/youtube-dl-gui
|
cbfaccdc73a1cc9a2bc04df4a2e20eeb4ca31a85
|
[
"Unlicense"
] | null | null | null |
"""Youtubedlg __main__ file.
__main__ file is a python 'executable' file which calls the youtubedlg
main() function in order to start the app. It can be used to start
the app from the package directory OR it can be used to start the app
from a different directory after you have installed the youtube_dl_gui
package.
Example:
In order to run the app from the package directory.
$ cd <package director>
$ python __main__.py
In order to run the app from /usr/local/bin etc.. AFTER
you have installed the package using setup.py.
$ youtube-dl-gui
"""
import sys
import youtube_dl_gui
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
PATH = os.path.realpath(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(PATH)))
if __name__ == '__main__':
youtube_dl_gui.main()
| 26.5
| 70
| 0.718091
|
acfcf13fe9c948e46d7fca1b5e56ef447dad9472
| 616
|
py
|
Python
|
peekingduck/pipeline/nodes/dabble/trackingv1/__init__.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | 1
|
2021-12-02T05:15:58.000Z
|
2021-12-02T05:15:58.000Z
|
peekingduck/pipeline/nodes/dabble/trackingv1/__init__.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | null | null | null |
peekingduck/pipeline/nodes/dabble/trackingv1/__init__.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tracking-by-detection trackers."""
| 38.5
| 74
| 0.75974
|
acfcf1ebff14edee96e385e4bc5a1b04312e392a
| 3,540
|
py
|
Python
|
qa/rpc-tests/nodehandling.py
|
zahidaliayub/WeyCoin
|
a4251e2ff53f5b6ac3122a9b6b1ffa67fe052412
|
[
"MIT"
] | 7
|
2018-02-02T05:21:26.000Z
|
2018-08-25T08:50:53.000Z
|
qa/rpc-tests/nodehandling.py
|
zahidaliayub/WeyCoin
|
a4251e2ff53f5b6ac3122a9b6b1ffa67fe052412
|
[
"MIT"
] | 3
|
2018-02-04T15:15:02.000Z
|
2018-10-30T20:34:32.000Z
|
qa/rpc-tests/nodehandling.py
|
zahidaliayub/WeyCoin
|
a4251e2ff53f5b6ac3122a9b6b1ffa67fe052412
|
[
"MIT"
] | 14
|
2018-02-06T02:12:04.000Z
|
2018-07-08T18:48:21.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The WeyCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import WeyCoinTestFramework
from test_framework.util import *
import urllib.parse
class NodeHandlingTest (WeyCoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
###########################
# RPC disconnectnode test #
###########################
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
| 41.162791
| 136
| 0.60339
|
acfcf2084a7b9cb80fc569d9f8f8e1fdefa24c26
| 7,496
|
py
|
Python
|
tests/app/api/business/brief_overview/conftest.py
|
ArenaNetworks/dto-digitalmarketplace-api
|
d0d58924719d889503ed112b0d5801b528b0398c
|
[
"MIT"
] | null | null | null |
tests/app/api/business/brief_overview/conftest.py
|
ArenaNetworks/dto-digitalmarketplace-api
|
d0d58924719d889503ed112b0d5801b528b0398c
|
[
"MIT"
] | null | null | null |
tests/app/api/business/brief_overview/conftest.py
|
ArenaNetworks/dto-digitalmarketplace-api
|
d0d58924719d889503ed112b0d5801b528b0398c
|
[
"MIT"
] | 1
|
2021-08-23T06:05:06.000Z
|
2021-08-23T06:05:06.000Z
|
import mock
import pendulum
import pytest
from flask_login import current_user
from app.api.services import frameworks_service, lots_service
from app.models import (Brief, BriefResponse, Framework, Lot, Supplier, User,
WorkOrder, db)
@pytest.fixture()
def overview_briefs(app, users):
now = pendulum.now('utc')
framework = frameworks_service.find(slug='digital-marketplace').one_or_none()
atm_lot = lots_service.find(slug='atm').one_or_none()
rfx_lot = lots_service.find(slug='rfx').one_or_none()
specialist_lot = lots_service.find(slug='specialist').one_or_none()
training_lot = lots_service.find(slug='training2').one_or_none()
with app.app_context():
db.session.add(
Brief(
id=5,
data={},
framework=framework,
lot=specialist_lot,
users=users,
published_at=None,
withdrawn_at=None
)
)
published_atm = Brief(
id=6,
data={},
framework=framework,
lot=atm_lot,
users=users,
published_at=now.subtract(days=2),
withdrawn_at=None
)
published_atm.questions_closed_at = now.add(days=3)
published_atm.closed_at = now.add(days=5)
db.session.add(published_atm)
published_rfx_open_to_one = Brief(
id=7,
data={
'sellerSelector': 'oneSeller',
'sellers': {
'2': {
'name': 'FriendFace'
}
}
},
framework=framework,
lot=rfx_lot,
users=users,
published_at=now.subtract(days=2),
withdrawn_at=None
)
published_rfx_open_to_one.questions_closed_at = now.add(days=3)
published_rfx_open_to_one.closed_at = now.add(days=5)
db.session.add(published_rfx_open_to_one)
published_training_open_to_one = Brief(
id=8,
data={
'sellerSelector': 'oneSeller',
'sellers': {
'2': {
'name': 'FriendFace'
}
}
},
framework=framework,
lot=training_lot,
users=users,
published_at=now.subtract(days=2),
withdrawn_at=None
)
published_training_open_to_one.questions_closed_at = now.add(days=3)
published_training_open_to_one.closed_at = now.add(days=5)
db.session.add(published_training_open_to_one)
published_specialist_open_to_some = Brief(
id=9,
data={
'numberOfSuppliers': '3',
'sellerSelector': 'someSellers',
'sellers': {
'2': {
'name': 'FriendFace'
}
}
},
framework=framework,
lot=specialist_lot,
users=users,
published_at=now.subtract(days=2),
withdrawn_at=None
)
published_specialist_open_to_some.questions_closed_at = now.add(days=3)
published_specialist_open_to_some.closed_at = now.add(days=5)
db.session.add(published_specialist_open_to_some)
closed_specialist = Brief(
id=10,
data={},
framework=framework,
lot=specialist_lot,
users=users,
created_at=now.subtract(days=3),
published_at=now.subtract(days=3),
withdrawn_at=None
)
closed_specialist.questions_closed_at = now.subtract(days=2)
closed_specialist.closed_at = now.subtract(days=1)
db.session.add(closed_specialist)
withdrawn_specialist = Brief(
id=11,
data={},
framework=framework,
lot=specialist_lot,
users=users,
created_at=now.subtract(days=2),
published_at=now.subtract(days=3),
withdrawn_at=None
)
withdrawn_specialist.questions_closed_at = now.add(days=3)
withdrawn_specialist.closed_at = now.add(days=5)
withdrawn_specialist.withdrawn_at = now
db.session.add(withdrawn_specialist)
db.session.commit()
yield db.session.query(Brief).all()
@pytest.fixture()
def brief_responses(app, overview_briefs, suppliers):
with app.app_context():
now = pendulum.now('utc')
db.session.add(
BriefResponse(
id=1,
brief_id=7,
data={},
submitted_at=now,
supplier_code=2
)
)
db.session.add(
BriefResponse(
id=2,
brief_id=8,
data={},
submitted_at=now,
supplier_code=2
)
)
db.session.add(
BriefResponse(
id=3,
brief_id=9,
data={},
submitted_at=now,
supplier_code=2
)
)
db.session.add(
BriefResponse(
id=4,
brief_id=9,
data={},
submitted_at=now,
supplier_code=2
)
)
db.session.add(
BriefResponse(
id=5,
brief_id=9,
data={},
submitted_at=now,
supplier_code=2
)
)
db.session.commit()
yield db.session.query(BriefResponse).all()
@pytest.fixture()
def suppliers(app):
with app.app_context():
db.session.add(
Supplier(
id=2,
code=2,
name='FriendFace',
is_recruiter=False,
data={}
)
)
db.session.add(
Supplier(
id=3,
code=3,
name='FriendFlutter',
is_recruiter=False,
data={}
)
)
db.session.commit()
yield db.session.query(Supplier).all()
@pytest.fixture()
def users(app):
with app.app_context():
db.session.add(
User(
id=3,
name='Maurice Moss',
email_address='moss@ri.gov.au',
password='mossman',
active=True,
password_changed_at=pendulum.now('utc'),
role='buyer'
)
)
db.session.commit()
yield db.session.query(User).all()
@pytest.fixture()
def framework():
return Framework(id=1, slug='digital-marketplace')
@pytest.fixture()
def work_order():
return WorkOrder(id=1)
@pytest.fixture()
def outcome_lot():
return Lot(id=1, slug='digital-outcome', one_service_limit=True)
@pytest.fixture()
def specialist_lot():
return Lot(id=1, slug='digital-professionals', one_service_limit=True)
@pytest.fixture()
def outcome_brief(framework, outcome_lot):
return Brief(id=1, data={}, framework=framework, lot=outcome_lot, work_order=None)
@pytest.fixture()
def specialist_brief(framework, specialist_lot):
return Brief(id=1, data={}, framework=framework, lot=specialist_lot, work_order=None)
| 26.676157
| 89
| 0.515342
|
acfcf3228bda514d79c3b2987616d44aa2e5d7cc
| 3,289
|
py
|
Python
|
src/Algorithm/SVRG/SVRG.py
|
Yutong-Dai/ML-Algorithm-Pytorch
|
ab0eb8fa0c9c44dbf6847f6c44feaf4f3a18ec2d
|
[
"MIT"
] | null | null | null |
src/Algorithm/SVRG/SVRG.py
|
Yutong-Dai/ML-Algorithm-Pytorch
|
ab0eb8fa0c9c44dbf6847f6c44feaf4f3a18ec2d
|
[
"MIT"
] | null | null | null |
src/Algorithm/SVRG/SVRG.py
|
Yutong-Dai/ML-Algorithm-Pytorch
|
ab0eb8fa0c9c44dbf6847f6c44feaf4f3a18ec2d
|
[
"MIT"
] | null | null | null |
'''
File: SVRG.py
Author: Yutong Dai (yutongdai95@gmail.com)
File Created: 2021-03-10 14:17
Last Modified: 2021-03-12 01:23
--------------------------------------------
Description:
'''
import torch
import numpy as np
np.random.seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class SVRG:
def __init__(self, prob):
self.prob = prob
self.nSamples = prob.m
self.dim = prob.n
def solve(self, x0, params):
x = x0
print(f"Computation is done on: {x0.device.type}")
if not x.requires_grad:
raise ValueError('Input x0 must be a tensor requires gradient')
epoch = 0
samples = [i for i in range(self.nSamples)]
totalBatches = int(np.ceil(self.nSamples / params['batchsize']))
flag = 'Reach the maximum number of iterations.'
fseq = []
if params['printlevel'] > 0:
print(f'*******************************************************************')
print(f' SVRG Version: (03/11/2021) ')
print(f' Algorithm parameters')
for k, v in params.items():
print(f' params: {k} | value:{v}')
print(f'*******************************************************************')
while epoch <= params['maxepoch']:
# outter loop: evaluate full gradient
# clear gradient
if x.grad is not None:
x.grad.data.zero_()
gradfx_full = self.prob.grad(x) + 0.0
gradfx_full_norm = torch.linalg.norm(gradfx_full)
if epoch % params['printevery'] == 0:
print(f' epoch f |grad| ')
print(f'{epoch:5d} {self.prob.loss:3.4e} {gradfx_full_norm:3.4e}')
fseq.append(self.prob.loss.item())
# check termination
if epoch == 0:
gradfx0_norm = gradfx_full_norm
if gradfx_full_norm <= params['tol'] * gradfx0_norm:
flag = 'Find the optimal solution with the desired accuracy.'
break
# inner iteration
x_trial = x.clone().detach().requires_grad_(True)
for j in range(params['effective_pass']):
np.random.shuffle(samples)
for i in range(totalBatches):
start, end = i * params['batchsize'], (i + 1) * params['batchsize']
minibatch = samples[start:end]
gradfx_minibacth = self.prob.grad(x, minibatch)
if i == 0 and j == 0:
gradfx_trial_minibacth = gradfx_minibacth
else:
gradfx_trial_minibacth = self.prob.grad(x_trial, minibatch)
v = gradfx_trial_minibacth - gradfx_minibacth + gradfx_full
with torch.no_grad():
x_trial.sub_(params['stepsize'] * v)
x = x_trial.clone().detach().requires_grad_(True)
epoch += 1
print(f'-------------------------------------------------------------------')
print(f'Exit: {flag}')
result = {'x': x.detach(), 'fx': fseq[-1], 'gradNorm': gradfx_full_norm, 'fseq': fseq}
return result
| 41.1125
| 94
| 0.489206
|
acfcf3de01c43068019ffd0433952d0446ea8871
| 8,541
|
py
|
Python
|
homeassistant/components/mqtt/switch.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/mqtt/switch.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:34:57.000Z
|
2022-03-12T00:59:48.000Z
|
homeassistant/components/mqtt/switch.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | null | null | null |
"""Support for MQTT switches."""
import logging
import voluptuous as vol
from homeassistant.components import mqtt, switch
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import (
CONF_DEVICE, CONF_ICON, CONF_NAME, CONF_OPTIMISTIC, CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON, CONF_VALUE_TEMPLATE, STATE_ON)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT Switch'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_PAYLOAD_OFF = 'OFF'
DEFAULT_OPTIMISTIC = False
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_STATE_ON): cv.string,
vol.Optional(CONF_STATE_OFF): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT switch through configuration.yaml."""
await _async_setup_entity(config, async_add_entities,
discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT switch dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT switch."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(switch.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT switch."""
async_add_entities([MqttSwitch(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttSwitch(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, SwitchDevice, RestoreEntity):
"""Representation of a switch that can be toggled using MQTT."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize the MQTT switch."""
self._state = False
self._sub_state = None
self._state_on = None
self._state_off = None
self._optimistic = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
state_on = config.get(CONF_STATE_ON)
self._state_on = state_on if state_on else config.get(CONF_PAYLOAD_ON)
state_off = config.get(CONF_STATE_OFF)
self._state_off = state_off if state_off else \
config.get(CONF_PAYLOAD_OFF)
self._optimistic = config.get(CONF_OPTIMISTIC)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
@callback
def state_message_received(msg):
"""Handle new MQTT state messages."""
payload = msg.payload
if template is not None:
payload = template.async_render_with_possible_json_value(
payload)
if payload == self._state_on:
self._state = True
elif payload == self._state_off:
self._state = False
self.async_write_ha_state()
if self._config.get(CONF_STATE_TOPIC) is None:
# Force into optimistic mode.
self._optimistic = True
else:
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
{CONF_STATE_TOPIC:
{'topic': self._config.get(CONF_STATE_TOPIC),
'msg_callback': state_message_received,
'qos': self._config.get(CONF_QOS)}})
if self._optimistic:
last_state = await self.async_get_last_state()
if last_state:
self._state = last_state.state == STATE_ON
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the switch."""
return self._config.get(CONF_NAME)
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon."""
return self._config.get(CONF_ICON)
async def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_ON),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_OFF),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = False
self.async_write_ha_state()
| 35.886555
| 79
| 0.666315
|
acfcf4306fbf4649dd8284ba8001bc5075bdac57
| 19,702
|
py
|
Python
|
homeassistant/components/netatmo/climate.py
|
csseal/core
|
5802d65ef71697e6627b82e1677894d13d0f16d7
|
[
"Apache-2.0"
] | 3
|
2019-11-13T18:19:33.000Z
|
2021-07-18T11:40:37.000Z
|
homeassistant/components/netatmo/climate.py
|
csseal/core
|
5802d65ef71697e6627b82e1677894d13d0f16d7
|
[
"Apache-2.0"
] | 45
|
2020-07-21T12:58:24.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/netatmo/climate.py
|
csseal/core
|
5802d65ef71697e6627b82e1677894d13d0f16d7
|
[
"Apache-2.0"
] | 1
|
2021-11-19T19:01:57.000Z
|
2021-11-19T19:01:57.000Z
|
"""Support for Netatmo Smart thermostats."""
import logging
from typing import List, Optional
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
DEFAULT_MIN_TEMP,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_TEMPERATURE,
PRECISION_HALVES,
STATE_OFF,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
ATTR_HEATING_POWER_REQUEST,
ATTR_SCHEDULE_NAME,
DATA_HANDLER,
DATA_HOMES,
DATA_SCHEDULES,
DOMAIN,
EVENT_TYPE_CANCEL_SET_POINT,
EVENT_TYPE_SET_POINT,
EVENT_TYPE_THERM_MODE,
MANUFACTURER,
SERVICE_SET_SCHEDULE,
SIGNAL_NAME,
)
from .data_handler import HOMEDATA_DATA_CLASS_NAME, HOMESTATUS_DATA_CLASS_NAME
from .netatmo_entity_base import NetatmoBase
_LOGGER = logging.getLogger(__name__)
PRESET_FROST_GUARD = "Frost Guard"
PRESET_SCHEDULE = "Schedule"
PRESET_MANUAL = "Manual"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_PRESET = [PRESET_AWAY, PRESET_BOOST, PRESET_FROST_GUARD, PRESET_SCHEDULE]
STATE_NETATMO_SCHEDULE = "schedule"
STATE_NETATMO_HG = "hg"
STATE_NETATMO_MAX = "max"
STATE_NETATMO_AWAY = PRESET_AWAY
STATE_NETATMO_OFF = STATE_OFF
STATE_NETATMO_MANUAL = "manual"
STATE_NETATMO_HOME = "home"
PRESET_MAP_NETATMO = {
PRESET_FROST_GUARD: STATE_NETATMO_HG,
PRESET_BOOST: STATE_NETATMO_MAX,
PRESET_SCHEDULE: STATE_NETATMO_SCHEDULE,
PRESET_AWAY: STATE_NETATMO_AWAY,
STATE_NETATMO_OFF: STATE_NETATMO_OFF,
}
NETATMO_MAP_PRESET = {
STATE_NETATMO_HG: PRESET_FROST_GUARD,
STATE_NETATMO_MAX: PRESET_BOOST,
STATE_NETATMO_SCHEDULE: PRESET_SCHEDULE,
STATE_NETATMO_AWAY: PRESET_AWAY,
STATE_NETATMO_OFF: STATE_NETATMO_OFF,
STATE_NETATMO_MANUAL: STATE_NETATMO_MANUAL,
}
HVAC_MAP_NETATMO = {
PRESET_SCHEDULE: HVAC_MODE_AUTO,
STATE_NETATMO_HG: HVAC_MODE_AUTO,
PRESET_FROST_GUARD: HVAC_MODE_AUTO,
PRESET_BOOST: HVAC_MODE_HEAT,
STATE_NETATMO_OFF: HVAC_MODE_OFF,
STATE_NETATMO_MANUAL: HVAC_MODE_AUTO,
PRESET_MANUAL: HVAC_MODE_AUTO,
STATE_NETATMO_AWAY: HVAC_MODE_AUTO,
}
CURRENT_HVAC_MAP_NETATMO = {True: CURRENT_HVAC_HEAT, False: CURRENT_HVAC_IDLE}
DEFAULT_MAX_TEMP = 30
NA_THERM = "NATherm1"
NA_VALVE = "NRV"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Netatmo energy platform."""
data_handler = hass.data[DOMAIN][entry.entry_id][DATA_HANDLER]
await data_handler.register_data_class(
HOMEDATA_DATA_CLASS_NAME, HOMEDATA_DATA_CLASS_NAME, None
)
home_data = data_handler.data.get(HOMEDATA_DATA_CLASS_NAME)
if not home_data:
return
async def get_entities():
"""Retrieve Netatmo entities."""
entities = []
for home_id in get_all_home_ids(home_data):
_LOGGER.debug("Setting up home %s ...", home_id)
for room_id in home_data.rooms[home_id].keys():
room_name = home_data.rooms[home_id][room_id]["name"]
_LOGGER.debug("Setting up room %s (%s) ...", room_name, room_id)
signal_name = f"{HOMESTATUS_DATA_CLASS_NAME}-{home_id}"
await data_handler.register_data_class(
HOMESTATUS_DATA_CLASS_NAME, signal_name, None, home_id=home_id
)
home_status = data_handler.data.get(signal_name)
if home_status and room_id in home_status.rooms:
entities.append(NetatmoThermostat(data_handler, home_id, room_id))
hass.data[DOMAIN][DATA_SCHEDULES][home_id] = {
schedule_id: schedule_data.get("name")
for schedule_id, schedule_data in (
data_handler.data[HOMEDATA_DATA_CLASS_NAME]
.schedules[home_id]
.items()
)
}
hass.data[DOMAIN][DATA_HOMES] = {
home_id: home_data.get("name")
for home_id, home_data in (
data_handler.data[HOMEDATA_DATA_CLASS_NAME].homes.items()
)
}
return entities
async_add_entities(await get_entities(), True)
platform = entity_platform.current_platform.get()
if home_data is not None:
platform.async_register_entity_service(
SERVICE_SET_SCHEDULE,
{vol.Required(ATTR_SCHEDULE_NAME): cv.string},
"_service_set_schedule",
)
class NetatmoThermostat(NetatmoBase, ClimateEntity):
"""Representation a Netatmo thermostat."""
def __init__(self, data_handler, home_id, room_id):
"""Initialize the sensor."""
ClimateEntity.__init__(self)
super().__init__(data_handler)
self._id = room_id
self._home_id = home_id
self._home_status_class = f"{HOMESTATUS_DATA_CLASS_NAME}-{self._home_id}"
self._data_classes.extend(
[
{
"name": HOMEDATA_DATA_CLASS_NAME,
SIGNAL_NAME: HOMEDATA_DATA_CLASS_NAME,
},
{
"name": HOMESTATUS_DATA_CLASS_NAME,
"home_id": self._home_id,
SIGNAL_NAME: self._home_status_class,
},
]
)
self._home_status = self.data_handler.data[self._home_status_class]
self._room_status = self._home_status.rooms[room_id]
self._room_data = self._data.rooms[home_id][room_id]
self._model = NA_VALVE
for module in self._room_data.get("module_ids"):
if self._home_status.thermostats.get(module):
self._model = NA_THERM
break
self._state = None
self._device_name = self._data.rooms[home_id][room_id]["name"]
self._name = f"{MANUFACTURER} {self._device_name}"
self._current_temperature = None
self._target_temperature = None
self._preset = None
self._away = None
self._operation_list = [HVAC_MODE_AUTO, HVAC_MODE_HEAT]
self._support_flags = SUPPORT_FLAGS
self._hvac_mode = None
self._battery_level = None
self._connected = None
self._away_temperature = None
self._hg_temperature = None
self._boilerstatus = None
self._setpoint_duration = None
if self._model == NA_THERM:
self._operation_list.append(HVAC_MODE_OFF)
self._unique_id = f"{self._id}-{self._model}"
async def async_added_to_hass(self) -> None:
"""Entity created."""
await super().async_added_to_hass()
for event_type in (
EVENT_TYPE_SET_POINT,
EVENT_TYPE_THERM_MODE,
EVENT_TYPE_CANCEL_SET_POINT,
):
self._listeners.append(
async_dispatcher_connect(
self.hass,
f"signal-{DOMAIN}-webhook-{event_type}",
self.handle_event,
)
)
async def handle_event(self, event):
"""Handle webhook events."""
data = event["data"]
if not data.get("home"):
return
home = data["home"]
if self._home_id == home["id"] and data["event_type"] == EVENT_TYPE_THERM_MODE:
self._preset = NETATMO_MAP_PRESET[home[EVENT_TYPE_THERM_MODE]]
self._hvac_mode = HVAC_MAP_NETATMO[self._preset]
if self._preset == PRESET_FROST_GUARD:
self._target_temperature = self._hg_temperature
elif self._preset == PRESET_AWAY:
self._target_temperature = self._away_temperature
elif self._preset == PRESET_SCHEDULE:
self.async_update_callback()
self.async_write_ha_state()
return
if not home.get("rooms"):
return
for room in home["rooms"]:
if data["event_type"] == EVENT_TYPE_SET_POINT:
if self._id == room["id"]:
if room["therm_setpoint_mode"] == "off":
self._hvac_mode = HVAC_MODE_OFF
else:
self._target_temperature = room["therm_setpoint_temperature"]
self.async_write_ha_state()
break
elif data["event_type"] == EVENT_TYPE_CANCEL_SET_POINT:
if self._id == room["id"]:
self.async_update_callback()
self.async_write_ha_state()
break
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return PRECISION_HALVES
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode."""
return self._hvac_mode
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes."""
return self._operation_list
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
if self._model == NA_THERM:
return CURRENT_HVAC_MAP_NETATMO[self._boilerstatus]
# Maybe it is a valve
if self._room_status and self._room_status.get("heating_power_request", 0) > 0:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self.turn_off()
elif hvac_mode == HVAC_MODE_AUTO:
if self.hvac_mode == HVAC_MODE_OFF:
self.turn_on()
self.set_preset_mode(PRESET_SCHEDULE)
elif hvac_mode == HVAC_MODE_HEAT:
self.set_preset_mode(PRESET_BOOST)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self.target_temperature == 0:
self._home_status.set_room_thermpoint(
self._id, STATE_NETATMO_HOME,
)
if preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX] and self._model == NA_VALVE:
self._home_status.set_room_thermpoint(
self._id, STATE_NETATMO_MANUAL, DEFAULT_MAX_TEMP,
)
elif preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]:
self._home_status.set_room_thermpoint(
self._id, PRESET_MAP_NETATMO[preset_mode]
)
elif preset_mode in [PRESET_SCHEDULE, PRESET_FROST_GUARD, PRESET_AWAY]:
self._home_status.set_thermmode(PRESET_MAP_NETATMO[preset_mode])
else:
_LOGGER.error("Preset mode '%s' not available", preset_mode)
self.async_write_ha_state()
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp."""
return self._preset
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes."""
return SUPPORT_PRESET
def set_temperature(self, **kwargs):
"""Set new target temperature for 2 hours."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is None:
return
self._home_status.set_room_thermpoint(self._id, STATE_NETATMO_MANUAL, temp)
self.async_write_ha_state()
@property
def device_state_attributes(self):
"""Return the state attributes of the thermostat."""
attr = {}
if self._battery_level is not None:
attr[ATTR_BATTERY_LEVEL] = self._battery_level
if self._model == NA_VALVE:
attr[ATTR_HEATING_POWER_REQUEST] = self._room_status.get(
"heating_power_request", 0
)
return attr
def turn_off(self):
"""Turn the entity off."""
if self._model == NA_VALVE:
self._home_status.set_room_thermpoint(
self._id, STATE_NETATMO_MANUAL, DEFAULT_MIN_TEMP,
)
elif self.hvac_mode != HVAC_MODE_OFF:
self._home_status.set_room_thermpoint(self._id, STATE_NETATMO_OFF)
self.async_write_ha_state()
def turn_on(self):
"""Turn the entity on."""
self._home_status.set_room_thermpoint(self._id, STATE_NETATMO_HOME)
self.async_write_ha_state()
@property
def available(self) -> bool:
"""If the device hasn't been able to connect, mark as unavailable."""
return bool(self._connected)
@callback
def async_update_callback(self):
"""Update the entity's state."""
self._home_status = self.data_handler.data[self._home_status_class]
self._room_status = self._home_status.rooms.get(self._id)
self._room_data = self._data.rooms.get(self._home_id, {}).get(self._id)
if not self._room_status or not self._room_data:
if self._connected:
_LOGGER.info(
"The thermostat in room %s seems to be out of reach",
self._device_name,
)
self._connected = False
return
roomstatus = {"roomID": self._room_status.get("id", {})}
if self._room_status.get("reachable"):
roomstatus.update(self._build_room_status())
self._away_temperature = self._data.get_away_temp(self._home_id)
self._hg_temperature = self._data.get_hg_temp(self._home_id)
self._setpoint_duration = self._data.setpoint_duration[self._home_id]
if "current_temperature" not in roomstatus:
return
if self._model is None:
self._model = roomstatus["module_type"]
self._current_temperature = roomstatus["current_temperature"]
self._target_temperature = roomstatus["target_temperature"]
self._preset = NETATMO_MAP_PRESET[roomstatus["setpoint_mode"]]
self._hvac_mode = HVAC_MAP_NETATMO[self._preset]
self._battery_level = roomstatus.get("battery_level")
self._connected = True
self._away = self._hvac_mode == HVAC_MAP_NETATMO[STATE_NETATMO_AWAY]
def _build_room_status(self):
"""Construct room status."""
try:
roomstatus = {
"roomname": self._room_data["name"],
"target_temperature": self._room_status["therm_setpoint_temperature"],
"setpoint_mode": self._room_status["therm_setpoint_mode"],
"current_temperature": self._room_status["therm_measured_temperature"],
"module_type": self._data.get_thermostat_type(
home_id=self._home_id, room_id=self._id
),
"module_id": None,
"heating_status": None,
"heating_power_request": None,
}
batterylevel = None
for module_id in self._room_data["module_ids"]:
if (
self._data.modules[self._home_id][module_id]["type"] == NA_THERM
or roomstatus["module_id"] is None
):
roomstatus["module_id"] = module_id
if roomstatus["module_type"] == NA_THERM:
self._boilerstatus = self._home_status.boiler_status(
roomstatus["module_id"]
)
roomstatus["heating_status"] = self._boilerstatus
batterylevel = self._home_status.thermostats[
roomstatus["module_id"]
].get("battery_level")
elif roomstatus["module_type"] == NA_VALVE:
roomstatus["heating_power_request"] = self._room_status[
"heating_power_request"
]
roomstatus["heating_status"] = roomstatus["heating_power_request"] > 0
if self._boilerstatus is not None:
roomstatus["heating_status"] = (
self._boilerstatus and roomstatus["heating_status"]
)
batterylevel = self._home_status.valves[roomstatus["module_id"]].get(
"battery_level"
)
if batterylevel:
batterypct = interpolate(batterylevel, roomstatus["module_type"])
if (
not roomstatus.get("battery_level")
or batterypct < roomstatus["battery_level"]
):
roomstatus["battery_level"] = batterypct
return roomstatus
except KeyError as err:
_LOGGER.error("Update of room %s failed. Error: %s", self._id, err)
return {}
def _service_set_schedule(self, **kwargs):
schedule_name = kwargs.get(ATTR_SCHEDULE_NAME)
schedule_id = None
for sid, name in self.hass.data[DOMAIN][DATA_SCHEDULES][self._home_id].items():
if name == schedule_name:
schedule_id = sid
if not schedule_id:
_LOGGER.error("You passed an invalid schedule")
return
self._data.switch_home_schedule(home_id=self._home_id, schedule_id=schedule_id)
_LOGGER.debug(
"Setting %s schedule to %s (%s)",
self._home_id,
kwargs.get(ATTR_SCHEDULE_NAME),
schedule_id,
)
def interpolate(batterylevel, module_type):
"""Interpolate battery level depending on device type."""
na_battery_levels = {
NA_THERM: {
"full": 4100,
"high": 3600,
"medium": 3300,
"low": 3000,
"empty": 2800,
},
NA_VALVE: {
"full": 3200,
"high": 2700,
"medium": 2400,
"low": 2200,
"empty": 2200,
},
}
levels = sorted(na_battery_levels[module_type].values())
steps = [20, 50, 80, 100]
na_battery_level = na_battery_levels[module_type]
if batterylevel >= na_battery_level["full"]:
return 100
if batterylevel >= na_battery_level["high"]:
i = 3
elif batterylevel >= na_battery_level["medium"]:
i = 2
elif batterylevel >= na_battery_level["low"]:
i = 1
else:
return 0
pct = steps[i - 1] + (
(steps[i] - steps[i - 1])
* (batterylevel - levels[i])
/ (levels[i + 1] - levels[i])
)
return int(pct)
def get_all_home_ids(home_data):
"""Get all the home ids returned by NetAtmo API."""
if home_data is None:
return []
return [
home_data.homes[home_id]["id"]
for home_id in home_data.homes
if (
"therm_schedules" in home_data.homes[home_id]
and "modules" in home_data.homes[home_id]
)
]
| 33.910499
| 88
| 0.615826
|
acfcf46ee58bb1fc74821d7eb2af1fd19442ad05
| 7,146
|
py
|
Python
|
catalyst/metrics/_auc.py
|
sergunya17/catalyst
|
f98d71138c09cd1b5a69b788cb5006115f5c7fda
|
[
"Apache-2.0"
] | null | null | null |
catalyst/metrics/_auc.py
|
sergunya17/catalyst
|
f98d71138c09cd1b5a69b788cb5006115f5c7fda
|
[
"Apache-2.0"
] | null | null | null |
catalyst/metrics/_auc.py
|
sergunya17/catalyst
|
f98d71138c09cd1b5a69b788cb5006115f5c7fda
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Tuple
import torch
from catalyst.metrics._metric import ICallbackLoaderMetric
from catalyst.metrics.functional._auc import auc, binary_auc
from catalyst.metrics.functional._misc import process_multilabel_components
from catalyst.settings import SETTINGS
from catalyst.utils import get_device
from catalyst.utils.distributed import all_gather, get_backend
if SETTINGS.xla_required:
import torch_xla.core.xla_model as xm
class AUCMetric(ICallbackLoaderMetric):
"""AUC metric,
Args:
compute_on_call: if True, computes and returns metric value during metric call
compute_per_class_metrics: boolean flag to compute per-class metrics
(default: SETTINGS.compute_per_class_metrics or False).
prefix: metric prefix
suffix: metric suffix
.. warning::
This metric is under API improvement.
Examples:
.. code-block:: python
import torch
from catalyst import metrics
scores = torch.tensor([
[0.9, 0.1],
[0.1, 0.9],
])
targets = torch.tensor([
[1, 0],
[0, 1],
])
metric = metrics.AUCMetric()
# for efficient statistics storage
metric.reset(num_batches=1, num_samples=len(scores))
metric.update(scores, targets)
metric.compute()
# (
# tensor([1., 1.]) # per class
# 1.0, # micro
# 1.0, # macro
# 1.0 # weighted
# )
metric.compute_key_value()
# {
# 'auc': 1.0,
# 'auc/_micro': 1.0,
# 'auc/_macro': 1.0,
# 'auc/_weighted': 1.0
# 'auc/class_00': 1.0,
# 'auc/class_01': 1.0,
# }
metric.reset(num_batches=1, num_samples=len(scores))
metric(scores, targets)
# (
# tensor([1., 1.]) # per class
# 1.0, # micro
# 1.0, # macro
# 1.0 # weighted
# )
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_samples, num_features, num_classes = int(1e4), int(1e1), 4
X = torch.rand(num_samples, num_features)
y = (torch.rand(num_samples,) * num_classes).to(torch.int64)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_classes)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features", output_key="logits", target_key="targets", loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=3,
valid_loader="valid",
valid_metric="accuracy03",
minimize_valid_metric=False,
verbose=True,
callbacks=[
dl.AccuracyCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.PrecisionRecallF1SupportCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.AUCCallback(input_key="logits", target_key="targets"),
],
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples
"""
def __init__(
self,
compute_on_call: bool = True,
compute_per_class_metrics: bool = SETTINGS.compute_per_class_metrics,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
self.metric_name = f"{self.prefix}auc{self.suffix}"
self._ddp_backend = None
self.compute_per_class_metrics = compute_per_class_metrics
self.scores = []
self.targets = []
self.reset(0, 0)
def reset(self, num_batches, num_samples) -> None:
"""Resets all fields"""
self._ddp_backend = get_backend()
self.scores = []
self.targets = []
def update(self, scores: torch.Tensor, targets: torch.Tensor) -> None:
"""Updates metric value with statistics for new data.
Args:
scores: tensor with scores
targets: tensor with targets
"""
self.scores.append(scores.cpu().detach())
self.targets.append(targets.cpu().detach())
def compute(self) -> Tuple[torch.Tensor, float, float, float]:
"""Computes the AUC metric based on saved statistics."""
targets = torch.cat(self.targets)
scores = torch.cat(self.scores)
# ddp hotfix, could be done better
# but metric must handle DDP on it's own
if self._ddp_backend == "xla":
# if you have "RuntimeError: Aborted: Session XXX is not found" here
# please, ask Google for a more powerful TPU setup ;)
device = get_device()
scores = xm.all_gather(scores.to(device)).cpu().detach()
targets = xm.all_gather(targets.to(device)).cpu().detach()
elif self._ddp_backend == "ddp":
scores = torch.cat(all_gather(scores))
targets = torch.cat(all_gather(targets))
scores, targets, _ = process_multilabel_components(outputs=scores, targets=targets)
per_class = auc(scores=scores, targets=targets)
micro = binary_auc(scores=scores.view(-1), targets=targets.view(-1))[0]
macro = per_class.mean().item()
weights = targets.sum(axis=0) / len(targets)
weighted = (per_class * weights).sum().item()
if self.compute_per_class_metrics:
return per_class, micro, macro, weighted
else:
return [], micro, macro, weighted
def compute_key_value(self) -> Dict[str, float]:
"""Computes the AUC metric based on saved statistics and returns key-value results."""
per_class_auc, micro_auc, macro_auc, weighted_auc = self.compute()
output = {
f"{self.metric_name}/class_{i:02d}": value.item()
for i, value in enumerate(per_class_auc)
}
output[f"{self.metric_name}/_micro"] = micro_auc
output[self.metric_name] = macro_auc
output[f"{self.metric_name}/_macro"] = macro_auc
output[f"{self.metric_name}/_weighted"] = weighted_auc
return output
__all__ = ["AUCMetric"]
| 34.521739
| 94
| 0.586762
|
acfcf58e4a6bf59e77b9e7d0005e4e0f122d7d8e
| 2,663
|
py
|
Python
|
anytask/settings_local_example.py
|
georgy-komarov/anytask
|
24ffaaf62f7fd219909ad7ab42f1441f9ff6d15a
|
[
"MIT"
] | null | null | null |
anytask/settings_local_example.py
|
georgy-komarov/anytask
|
24ffaaf62f7fd219909ad7ab42f1441f9ff6d15a
|
[
"MIT"
] | null | null | null |
anytask/settings_local_example.py
|
georgy-komarov/anytask
|
24ffaaf62f7fd219909ad7ab42f1441f9ff6d15a
|
[
"MIT"
] | null | null | null |
import os
# Required for Django 1.5
# Set server domain(s)
ALLOWED_HOSTS = ['domain.com', 'yourdoamin.ru']
# Set E-MAIL Settings
# Use EMAIL_USE_SSL and EMAIL_USE_TLS vars for SSL support
EMAIL_BACKEND = 'mail_smtp_ssl.EmailBackend'
# SMTP Server host
EMAIL_HOST = 'smtp.mailserverexample.ru'
# SMTP Server port
EMAIL_PORT = 465
# Sender SMTP username
EMAIL_HOST_USER = 'mail_login_here'
# Sender SMTP password
EMAIL_HOST_PASSWORD = 'mail_passwd_here'
# Connection settings
EMAIL_USE_SSL = True
EMAIL_USE_TLS = False
# Or use mailgun
# EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
# MAILGUN_ACCESS_KEY = 'API-KEY'
# MAILGUN_SERVER_NAME = 'DOMAIN.COM'
# Set FROM field
DEFAULT_FROM_EMAIL = 'example@mail.ru'
# Set admin for e-mail error notifications
ADMINS = (('Admin Adminov', 'exampleadmin@mail.ru'), ('Name Lastname', 'exampleadmin2@mail.ru'))
# DO NOT DELETE THIS LINE!
MANAGERS = ADMINS
# Set paths + locale fix
# DO NOT CHANGE THESE LINES!
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
LOCALE_PATHS = (os.path.join(PROJECT_PATH, 'locale'),) # LOCALE_PATHS must be tuple
# Set secret key
# Make this unique, and don't share it with anybody.
# manage.py generate_secret_key
SECRET_KEY = 'SUPER_SECRET_KEY'
# ReCaptcha support
# FIXME Not working
RECAPTCHA_PUBLIC_KEY = "RECAPTCHA_PUBLIC_KEY"
RECAPTCHA_PRIVATE_KEY = "RECAPTCHA_PRIVATE_KEY"
# Setup ReviewBoard
# FIXME Not working
RB_API_URL = "http://localhost:8080"
RB_API_USERNAME = "anytask"
RB_API_PASSWORD = "P@ssw0rd"
RB_API_DEFAULT_REVIEW_GROUP = 'teachers'
RB_SYMLINK_DIR = '/var/lib/anytask/repos/'
# Set contest integration
CONTEST_OAUTH = 'ADMIN_TOKEN_HERE'
CONTEST_OAUTH_ID = 'APP_ID_FROM_OAUTH.YANDEX.RU'
CONTEST_OAUTH_PASSWORD = 'APP_PASSWD_FROM_OAUTH.YANDEX.RU'
# Set specific compilers for courses
# TODO Understand how it works
CONTEST_EXTENSIONS_COURSE = {
30: {
".py": "python3"
},
13: {
".h": "make2"
},
61: {
".h": "make2"
}
}
# Setup Yandex.Passport integration
PASSPORT_OAUTH_ID = 'APP_ID_FROM_OAUTH.YANDEX.RU'
PASSPORT_OAUTH_PASSWORD = 'APP_PASSWD_FROM_OAUTH.YANDEX.RU'
# File uploader settings
MAX_FILE_SIZE = 1024 * 1024 # 1 Mb
MAX_FILES_NUMBER = 10
ACCEPTED_FILE_TYPES = '\.+(jpg|jpeg|png|gif|bmp|sh|bas|pas|cpp|c|cs|java|php|py|txt|rtf|doc|docx|xls|xlsx|ppt|pptx)$'
ACCOUNT_ACTIVATION_DAYS = 7
INVITE_EXPIRED_DAYS = 180
# If everything works, disable debug
# DEBUG = False
# Close registration after year start if needed
# REGISTRATION_OPEN = False
# RENAME THIS FILE TO settings_local.py !!!
# Do manage.py makemessages -l ru
# and manage.py compilemessages -l ru
# to generate translations
| 25.122642
| 117
| 0.749531
|
acfcf609250cd2d83fbd4e5f4fa6c9d993316750
| 1,559
|
py
|
Python
|
airbyte-integrations/connectors/source-github-singer/setup.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-github-singer/setup.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-github-singer/setup.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import find_packages, setup
setup(
name="source_github_singer",
description="Source implementation for Github.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=[
"tap-github @ https://github.com/airbytehq/tap-github/tarball/v1.9.4-airbyte",
"requests==2.20.0",
"airbyte-protocol",
"base-singer",
"base-python",
],
package_data={"": ["*.json"]},
)
| 36.255814
| 86
| 0.746632
|
acfcf73941e90b7e0cd7471fabb2d6f629ca3d5f
| 2,738
|
py
|
Python
|
simple_rl/tasks/random/RandomMDPClass.py
|
KorlaMarch/simple_rl
|
30086b5cf4fd3e9dee76ddfb5ae4f565593ce191
|
[
"Apache-2.0"
] | 1
|
2018-11-11T17:03:59.000Z
|
2018-11-11T17:03:59.000Z
|
simple_rl/tasks/random/RandomMDPClass.py
|
paultouma/simple_rl
|
4b90fd2f877ae8a56b3be4a3f273743e3d8fb00c
|
[
"Apache-2.0"
] | null | null | null |
simple_rl/tasks/random/RandomMDPClass.py
|
paultouma/simple_rl
|
4b90fd2f877ae8a56b3be4a3f273743e3d8fb00c
|
[
"Apache-2.0"
] | null | null | null |
''' RandomMDPClass.py: Contains the RandomMDPClass class. '''
# Python imports.
import random
import numpy as np
from collections import defaultdict
# Other imports.
from simple_rl.mdp.MDPClass import MDP
from simple_rl.tasks.random.RandomStateClass import RandomState
class RandomMDP(MDP):
''' Imeplementation for a standard Random MDP '''
ACTIONS = []
def __init__(self, num_states=5, num_rand_trans=5, num_actions=3, gamma=0.99):
'''
Args:
num_states (int) [optional]: Number of states in the Random MDP.
num_rand_trans (int) [optional]: Number of possible next states.
Summary:
Each state-action pair picks @num_rand_trans possible states and has a uniform distribution
over them for transitions. Rewards are also chosen randomly.
'''
RandomMDP.ACTIONS = [str(i) for i in range(num_actions)]
MDP.__init__(self, RandomMDP.ACTIONS, self._transition_func, self._reward_func, init_state=RandomState(1), gamma=gamma)
# assert(num_rand_trans <= num_states)
self.num_rand_trans = num_rand_trans
self.num_states = num_states
self._reward_s_a = (random.choice(range(self.num_states)), random.choice(RandomMDP.ACTIONS))
self._transitions = defaultdict(lambda: defaultdict(str))
def get_parameters(self):
'''
Returns:
(dict) key=param_name (str) --> val=param_val (object).
'''
param_dict = defaultdict(int)
param_dict["num_states"] = self.num_states
param_dict["num_rand_trans"] = self.num_rand_trans
param_dict["num_actions"] = self.num_actions
return param_dict
def _reward_func(self, state, action):
'''
Args:
state (State)
action (str)
statePrime
Returns
(float)
'''
if (state.data, action) == self._reward_s_a:
return 1.0
else:
return 0.0
def _transition_func(self, state, action):
'''
Args:
state (State)
action (str)
Returns
(State)
'''
if self.num_states == 1:
return state
if (state, action) not in self._transitions:
# Chooses @self.num_rand_trans from range(self.num_states)
self._transitions[state][action] = np.random.choice(self.num_states, self.num_rand_trans, replace=False)
state_id = np.random.choice(self._transitions[state][action])
return RandomState(state_id)
def __str__(self):
return "RandomMDP-" + str(self.num_states)
def main():
_gen_random_distr()
if __name__ == "__main__":
main()
| 30.087912
| 127
| 0.621987
|
acfcf7caa9b6543af663223b1f616d98c430c523
| 7,876
|
py
|
Python
|
towhee/functional/mixins/column.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
towhee/functional/mixins/column.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
towhee/functional/mixins/column.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Flag, auto
from towhee.utils.thirdparty.pyarrow import pa
from towhee.types.tensor_array import TensorArray
from towhee.hparam.hyperparameter import param_scope
from towhee.functional.storages import ChunkedTable, WritableTable
# pylint: disable=import-outside-toplevel
# pylint: disable=bare-except
class ColumnMixin:
"""
Mixins to support column-based storage.
"""
class ModeFlag(Flag):
ROWBASEDFLAG = auto()
COLBASEDFLAG = auto()
HASCHUNKFLAG = auto()
def __init__(self) -> None:
super().__init__()
with param_scope() as hp:
parent = hp().data_collection.parent(None)
if parent is not None and hasattr(parent, '_chunksize'):
self._chunksize = parent._chunksize
def set_chunksize(self, chunksize):
"""
Set chunk size for arrow
Examples:
>>> import towhee
>>> dc = towhee.dc['a'](range(20))
>>> dc = dc.set_chunksize(10)
>>> dc2 = dc.runas_op['a', 'b'](func=lambda x: x+1)
>>> dc2.get_chunksize()
10
>>> len(dc._iterable._chunks)
2
"""
self._chunksize = chunksize
chunked_table = ChunkedTable(chunksize=chunksize, stream=False)
for element in self:
chunked_table.feed(element)
chunked_table.feed(None, eos=True)
return self._factory(chunked_table, mode=self.ModeFlag.COLBASEDFLAG)
def get_chunksize(self):
return self._chunksize
def _create_col_table(self):
"""
Create a column-based table.
Examples:
>>> from towhee import Entity, DataFrame
>>> e = [Entity(a=a, b=b) for a,b in zip(['abc', 'def', 'ghi'], [1,2,3])]
>>> df = DataFrame(e)
>>> table = df._create_col_table()
>>> table
pyarrow.Table
a: string
b: int64
----
a: [["abc","def","ghi"]]
b: [[1,2,3]]
>>> df.stream()._create_col_table()
pyarrow.Table
a: string
b: int64
----
a: [["abc","def","ghi"]]
b: [[1,2,3]]
"""
header = None
cols = None
# cols = [[getattr(entity, col) for entity in self._iterable] for col in header]
def inner(entity):
nonlocal cols, header
header = [*entity.__dict__] if not header else header
cols = [[] for _ in header] if not cols else cols
for col, name in zip(cols, header):
col.append(getattr(entity, name))
for entity in self._iterable:
inner(entity)
arrays = []
for col in cols:
try:
arrays.append(pa.array(col))
# pylint: disable=bare-except
except:
arrays.append(TensorArray.from_numpy(col))
return pa.Table.from_arrays(arrays, names=header)
@classmethod
def from_arrow_table(cls, **kws):
arrays = []
names = []
for k, v in kws.items():
arrays.append(v)
names.append(k)
return pa.Table.from_arrays(arrays, names=names)
def to_column(self):
"""
Convert the iterables to column-based table.
Examples:
>>> from towhee import Entity, DataFrame
>>> e = [Entity(a=a, b=b) for a,b in zip(['abc', 'def', 'ghi'], [1,2,3])]
>>> df = DataFrame(e)
>>> df
[<Entity dict_keys(['a', 'b'])>, <Entity dict_keys(['a', 'b'])>, <Entity dict_keys(['a', 'b'])>]
>>> df.to_column()
pyarrow.Table
a: string
b: int64
----
a: [["abc","def","ghi"]]
b: [[1,2,3]]
"""
# pylint: disable=protected-access
df = self.to_df()
res = df._create_col_table()
df._iterable = WritableTable(res)
df._mode = self.ModeFlag.COLBASEDFLAG
return df
def cmap(self, unary_op):
"""
chunked map
Examples:
>>> import towhee
>>> dc = towhee.dc['a'](range(10))
>>> dc = dc.to_column()
>>> dc = dc.runas_op['a', 'b'](func=lambda x: x+1)
>>> dc.show(limit=5, tablefmt='plain')
a b
0 1
1 2
2 3
3 4
4 5
>>> dc._iterable
pyarrow.Table
a: int64
b: int64
----
a: [[0,1,2,3,4,5,6,7,8,9]]
b: [[1,2,3,4,5,6,7,8,9,10]]
>>> len(dc._iterable)
10
"""
# pylint: disable=protected-access
if isinstance(self._iterable, ChunkedTable):
tables = [
WritableTable(self.__table_apply__(chunk, unary_op))
for chunk in self._iterable.chunks()
]
return self._factory(ChunkedTable(tables))
return self._factory(self.__table_apply__(self._iterable, unary_op))
def __table_apply__(self, table, unary_op):
# pylint: disable=protected-access
return table.write_many(unary_op._index[1],
self.__col_apply__(table, unary_op))
def __col_apply__(self, cols, unary_op):
# pylint: disable=protected-access
import numpy as np
args = []
# Multi inputs.
if isinstance(unary_op._index[0], tuple):
for name in unary_op._index[0]:
try:
data = cols[name].combine_chunks()
except:
data = cols[name].chunk(0)
buffer = data.buffers()[-1]
dtype = data.type
if isinstance(data, TensorArray):
dtype = dtype.storage_type.value_type
elif hasattr(data.type, 'value_type'):
while hasattr(dtype, 'value_type'):
dtype = dtype.value_type
dtype = dtype.to_pandas_dtype()
shape = [-1, *data.type.shape] if isinstance(data, TensorArray)\
else [len(data), -1] if isinstance(data, pa.lib.ListArray)\
else [len(data)]
args.append(
np.frombuffer(buffer=buffer, dtype=dtype).reshape(shape))
# args.append(data.to_numpy(zero_copy_only=False).reshape(shape))
# Single input.
else:
try:
data = cols[unary_op._index[0]].combine_chunks()
except:
data = cols[unary_op._index[0]].chunk(0)
buffer = data.buffers()[-1]
dtype = data.type
if isinstance(data, TensorArray):
dtype = dtype.storage_type.value_type
elif hasattr(data.type, 'value_type'):
while hasattr(dtype, 'value_type'):
dtype = dtype.value_type
dtype = dtype.to_pandas_dtype()
shape = [-1, *data.type.shape] if isinstance(data, TensorArray)\
else [len(data), -1] if isinstance(data, pa.lib.ListArray)\
else [len(data)]
args.append(
np.frombuffer(buffer=buffer, dtype=dtype).reshape(shape))
# args.append(data.to_numpy(zero_copy_only=False).reshape(shape))
return unary_op.__vcall__(*args)
| 32.01626
| 104
| 0.543931
|
acfcfa78629c3f3aa3d98006994f150a3b2dd1cc
| 3,730
|
py
|
Python
|
h/settings.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | null | null | null |
h/settings.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | null | null | null |
h/settings.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | null | null | null |
"""Helpers for parsing settings from the environment."""
import logging
import os
log = logging.getLogger(__name__)
class SettingError(Exception):
"""Exception thrown when a setting cannot be resolved."""
pass
class SettingsManager:
"""
Configuration setting resolver.
SettingsManager resolves settings from various sources into the final typed
values used when the app runs. It also provides a way to check for missing
required settings or use of deprecated settings.
The resolved settings are available via the `settings` attribute.
"""
def __init__(self, settings=None, environ=None):
"""
Initialize with initial setting values from config files and environment.
:param settings: Initial configuration settings read from config files
:type settings: Dict[str,str]
:param environ: Environment variable mappings
:type environ: Dict[str, str]
"""
if environ is None:
environ = os.environ
self.settings = {}
self.settings.update(settings or {})
self._environ = environ
def set(
self, # pylint: disable=too-many-arguments
name,
envvar,
type_=str,
required=False,
default=None,
deprecated_msg=None,
):
"""
Update `setting[name]`.
Update `setting[name]` using the value from the environment variable
`envvar`. If there is no such environment variable and `setting[name]`
is not already set, `setting[name]` is set to `default`.
Raises `SettingsError` if a required setting is missing and has no default,
or coercing the setting using `type_` fails.
:param name: the name of the pyramid config setting
:type name: str
:param envvar: the environment variable name
:type envvar: str
:param type_: callable that casts the setting value to the desired type
:param required: True if the the pyramid config setting is required
:type required: bool
:param default: a default value to use if the envvar isn't set
:param deprecated_msg: a deprecated envvar setting message to display
:type deprecated_msg: str
:raises SettingsError: if required and not set
:raises SettingsError: if type casting fails
"""
val = None
cast_message = None
if envvar in self._environ:
if deprecated_msg:
log.warning(
"use of envvar %s is deprecated: %s", envvar, deprecated_msg
)
val = self._environ[envvar]
cast_message = "environment variable {}={!r}".format(envvar, val)
elif default and name not in self.settings:
val = default
cast_message = "{}'s default {!r}".format(name, val)
elif required and name not in self.settings:
raise SettingError(
"error parsing environment variable "
"{varname} not found".format(varname=envvar)
)
if val:
try:
self.settings[name] = type_(val)
except ValueError:
raise SettingError(
"error casting {} as {}".format(cast_message, type_.__name__)
)
def database_url(url):
"""Parse a string as a Heroku-style database URL."""
# Heroku database URLs start with postgres://, which is an old and
# deprecated dialect as far as sqlalchemy is concerned. We upgrade this
# to postgresql+psycopg2 by default.
if url.startswith("postgres://"):
url = "postgresql+psycopg2://" + url[len("postgres://") :]
return url
| 34.220183
| 83
| 0.619571
|
acfcfa9b57c5c060ced813bec56075dbf5efb3f1
| 262
|
py
|
Python
|
slipy/spectrum/etc/str.py
|
glentner/slipy-dev
|
db8fa6ada3ce3246879d5aae9b0f150c56c23382
|
[
"BSD-3-Clause"
] | null | null | null |
slipy/spectrum/etc/str.py
|
glentner/slipy-dev
|
db8fa6ada3ce3246879d5aae9b0f150c56c23382
|
[
"BSD-3-Clause"
] | null | null | null |
slipy/spectrum/etc/str.py
|
glentner/slipy-dev
|
db8fa6ada3ce3246879d5aae9b0f150c56c23382
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Copyright (c) Geoffrey Lentner 2016. All Rights Reserved.
# slipy/spectrum/etc/str.py
# TODO: etc/str.py
"""
"""
def _str(self, *args, **kwargs):
"""
"""
raise NotImplementedError()
| 20.153846
| 63
| 0.660305
|
acfcfab4dadd870462e7d5a529678838595cdb2d
| 4,193
|
py
|
Python
|
maskrcnn_benchmark/data/datasets/Sensing_dataset.py
|
Ricardozzf/maskrcnn-benchmark
|
409ec56a0df92623e5dd761b22cea1a45edea629
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/data/datasets/Sensing_dataset.py
|
Ricardozzf/maskrcnn-benchmark
|
409ec56a0df92623e5dd761b22cea1a45edea629
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/data/datasets/Sensing_dataset.py
|
Ricardozzf/maskrcnn-benchmark
|
409ec56a0df92623e5dd761b22cea1a45edea629
|
[
"MIT"
] | null | null | null |
from maskrcnn_benchmark.structures.bounding_box import BoxList
import os
from PIL import Image
import torch
class SensingDataset(object):
def __init__(self, ann_file, root, remove_annotations_without_images, transforms=None):
# as you would do normally
self.anns = []
for rootPath, _, files in os.walk(ann_file):
for filename in files:
if not filename.endswith('.txt'):
continue
self.anns.append(os.path.join(rootPath, filename))
self.images = []
for rootPath, _, files in os.walk(root):
for filename in files:
if not filename.endswith('.jpg'):
continue
self.images.append(os.path.join(rootPath, filename))
'''
if remove_annotations_without_images:
self.anns = [
annfile
for annfile in self.anns
if(os.path.exists(os.path.join(root, annfile.replace('.txt', '.jpg'))))
]
self.images = [
imagefile
for imagefile in self.images
if(os.path.exists(os.path.join(ann_file, imagefile.replace('.jpg', '.txt'))))
]
'''
self.transforms = transforms
self.classes = ['background', 'person']
def __getitem__(self, idx):
# load the image as a PIL Image
txtPath = self.anns[idx]
imPath = txtPath.replace('labels', 'images').replace('.txt', '.jpg')
image = Image.open(imPath).convert("RGB")
# load the bounding boxes as a list of list of boxes
# in this case, for illustrative purposes, we use
# x1, y1, x2, y2 order.
labels = []
boxes = []
with open(txtPath) as fp:
for line in fp.readlines():
cnt = line.rstrip().split(' ')
labels.append(int(cnt[0])+1)
x1 = float(cnt[1])
y1 = float(cnt[2])
x2 = float(cnt[3])
y2 = float(cnt[4])
boxes.append([x1,y1,x2,y2])
# and labels
labels = torch.tensor(labels)
# create a BoxList from the boxes
boxlist = BoxList(boxes, image.size, mode="xyxy")
# add the labels to the boxlist
boxlist.add_field("labels", labels)
if self.transforms:
image, boxlist = self.transforms(image, boxlist)
# return the image, the boxlist and the idx in your dataset
return image, boxlist, idx
def __len__(self):
return len(self.anns)
def get_img_info(self, idx):
# get img_height and img_width. This is used if
# we want to split the batches according to the aspect ratio
# of the image, as it can be more efficient than loading the
# image from disk
image = Image.open(self.anns[idx].replace('labels', 'images').replace('.txt', '.jpg'))
img_width, img_height = image.size
return {"height": img_height, "width": img_width}
def get_groundtruth(self, idx):
txtPath = self.anns[idx]
imPath = txtPath.replace('labels', 'images').replace('.txt', '.jpg')
image = Image.open(imPath).convert("RGB")
# load the bounding boxes as a list of list of boxes
# in this case, for illustrative purposes, we use
# x1, y1, x2, y2 order.
labels = []
boxes = []
with open(txtPath) as fp:
for line in fp.readlines():
cnt = line.rstrip().split(' ')
labels.append(int(cnt[0])+1)
x1 = float(cnt[1])
y1 = float(cnt[2])
x2 = float(cnt[3])
y2 = float(cnt[4])
boxes.append([x1,y1,x2,y2])
# and labels
labels = torch.tensor(labels)
# create a BoxList from the boxes
target = BoxList(boxes, image.size, mode="xyxy")
# add the labels to the boxlist
target.add_field("labels", labels)
return target
def map_class_id_to_class_name(self, class_id):
return self.classes[class_id]
| 35.533898
| 94
| 0.545433
|
acfcfad660e8c8b1cb698b4d4aba4b73439c6efd
| 4,529
|
py
|
Python
|
backend/api/views.py
|
sorablaze11/TapSearch
|
8197faaaf4bcbd18225d4fe049c71c59dcbca607
|
[
"MIT"
] | 4
|
2019-11-23T16:27:13.000Z
|
2020-03-08T06:47:49.000Z
|
backend/api/views.py
|
sorablaze11/TapSearch
|
8197faaaf4bcbd18225d4fe049c71c59dcbca607
|
[
"MIT"
] | 8
|
2020-06-06T00:57:12.000Z
|
2022-02-26T20:32:57.000Z
|
backend/api/views.py
|
sorablaze11/TapSearch
|
8197faaaf4bcbd18225d4fe049c71c59dcbca607
|
[
"MIT"
] | 3
|
2020-03-08T21:25:44.000Z
|
2021-06-06T13:48:09.000Z
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
import re
class Appearance:
"""
Represents the appearance of a term in a given document, along with the
frequency of appearances in the same one.
"""
def __init__(self, docId, frequency):
self.docId = docId
self.frequency = frequency
def __repr__(self):
"""
String representation of the Appearance object
"""
return str(self.__dict__)
class Database:
"""
In memory database representing the already indexed documents.
"""
def __init__(self):
self.db = dict()
def __repr__(self):
"""
String representation of the Database object
"""
return str(self.__dict__)
def get(self, id):
return self.db.get(id, None)
def add(self, document):
"""
Adds a document to the DB.
"""
return self.db.update({document['id']: document})
def remove(self, document):
"""
Removes document from DB.
"""
return self.db.pop(document['id'], None)
class InvertedIndex:
"""
Inverted Index class.
"""
def __init__(self, db):
self.index = dict()
self.db = db
def __repr__(self):
"""
String representation of the Database object
"""
return str(self.index)
def index_document(self, document):
"""
Process a given document, save it to the DB and update the index.
"""
# Remove punctuation from the text.
clean_text = re.sub(r'[^\w\s]', '', document['text'])
terms = re.split(' |\n', clean_text)
appearances_dict = dict()
# Dictionary with each term and the frequency it appears in the text.
for term in terms:
term = term.lower()
term_frequency = appearances_dict[term].frequency if term in appearances_dict else 0
appearances_dict[term] = Appearance(document['id'], term_frequency + 1)
# Update the inverted index
update_dict = {key: [appearance]
if key not in self.index
else self.index[key] + [appearance]
for (key, appearance) in appearances_dict.items()}
self.index.update(update_dict)
# Add the document into the database
self.db.add(document)
return document
def lookup_query(self, query):
"""
Returns the dictionary of terms with their correspondent Appearances.
This is a very naive search since it will just split the terms and show
the documents where they appear.
"""
if query in self.index:
result = []
count = 0
for x in self.index[query]:
count += 1
result.append([x.frequency, x.docId])
if count == 10:
break
return sorted(result, reverse=True)
else:
return []
# Global variables of the above created classes for storing inverted-indexes
db = Database()
index = InvertedIndex(db)
global_id = 0
@api_view(['GET', 'POST'])
def clear_indexes(request):
global global_id, db, index
global_id = 0
db = Database()
index = InvertedIndex(db)
return Response({"mssg": "All the indexes has been cleared."})
@api_view(['GET', 'POST'])
def indexing_docs(request):
global global_id, index
try:
docs = request.data['data']
docs = docs.split('\n\n')
print(docs)
for par in docs:
document = {
'id': global_id,
'text': par
}
index.index_document(document)
global_id = global_id + 1
except Exception as e:
return Response({"status": 0})
return Response({"status": 1})
@api_view(['GET', 'POST'])
def search_word(request):
global index, db
res = index.lookup_query(request.data['word'].lower())
print(res)
for x in res:
x.append(db.db[x[1]]['text'])
return Response({"docs": res})
@api_view(['GET'])
def get_document(request, id):
global db
if id not in db.db.keys():
return Response({"text": ""})
print(db.db[id])
return Response({"text": db.db[id]['text']})
@api_view(['GET'])
def get_all(request):
global db
res = [[db.db[x]['id'], db.db[x]['text']] for x in db.db.keys()]
return Response({"docs": res})
| 27.448485
| 96
| 0.571649
|
acfcfb31da45054af1be9e629bfa3df1de7e1884
| 3,209
|
py
|
Python
|
tests/test_decorators/test_version_check.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_decorators/test_version_check.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:21:02.000Z
|
2021-03-25T23:21:02.000Z
|
tests/test_decorators/test_version_check.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import unittest
import StringIO
from mock import patch
from click import Abort
from shellfoundry.decorators import shellfoundry_version_check
@shellfoundry_version_check(abort_if_major=False)
def test_me_do_not_abort():
print('vido', end='')
@shellfoundry_version_check(abort_if_major=True)
def test_me_abort_if_major():
print('vido', end='')
class TestShellFoundryVersionCheck(unittest.TestCase):
@patch('sys.stdout', new_callable=StringIO.StringIO)
def test_version_check_when_current_is_greater_than_index(self, stdout_mock):
# Act
with patch('shellfoundry.utilities.get_installed_version', return_value='5.0.0'), \
patch('shellfoundry.utilities.max_version_from_index', return_value='0.2.9'):
test_me_do_not_abort()
# Assert
self.assertEqual(stdout_mock.getvalue(), 'vido')
@patch('sys.stdout', new_callable=StringIO.StringIO)
def test_version_check_when_current_is_lower_than_index_but_not_major_version(self, stdout_mock):
# Act
with patch('shellfoundry.utilities.get_installed_version', return_value='0.2.7'), \
patch('shellfoundry.utilities.max_version_from_index', return_value='0.3.0'):
test_me_do_not_abort()
# Assert
self.assertEqual(stdout_mock.getvalue(),
'vido\nThere is a new version of shellfoundry available, please upgrade by running: pip install shellfoundry --upgrade\n')
@patch('sys.stdout', new_callable=StringIO.StringIO)
def test_version_check_when_current_is_lower_than_index_and_its_major_version(self, stdout_mock):
# Act
with patch('shellfoundry.utilities.get_installed_version', return_value='0.2.7'), \
patch('shellfoundry.utilities.max_version_from_index', return_value='1.0.0'):
test_me_do_not_abort()
# Assert
self.assertEqual(stdout_mock.getvalue(),
'vido\nThis version of shellfoundry is not supported anymore, please upgrade by running: pip install shellfoundry --upgrade\n')
@patch('sys.stdout', new_callable=StringIO.StringIO)
def test_version_check_when_current_is_equal_to_index(self, stdout_mock):
# Act
with patch('shellfoundry.utilities.get_installed_version', return_value='1.0.0'), \
patch('shellfoundry.utilities.max_version_from_index', return_value='1.0.0'):
test_me_do_not_abort()
# Assert
self.assertEqual(stdout_mock.getvalue(), 'vido')
@patch('sys.stdout', new_callable=StringIO.StringIO)
def test_version_check_when_current_is_lower_than_index_and_its_major_version_validate_abort(self, stdout_mock):
# Act
with patch('shellfoundry.utilities.get_installed_version', return_value='0.2.7'), \
patch('shellfoundry.utilities.max_version_from_index', return_value='1.0.0'):
self.assertRaises(Abort, test_me_abort_if_major)
# Assert
self.assertEqual(stdout_mock.getvalue(),
'This version of shellfoundry is not supported anymore, please upgrade by running: pip install shellfoundry --upgrade\n\n')
| 43.364865
| 152
| 0.715488
|
acfcfc825d69cee3ad733800bc7c47b148c9fc52
| 886
|
py
|
Python
|
src/hub/dataload/sources/ensembl_protists/dump.py
|
mlebeur/mygene.info
|
e71ca89c2b1c546c260101286ad5419503fd6653
|
[
"Apache-2.0"
] | 78
|
2017-05-26T08:38:25.000Z
|
2022-02-25T08:55:31.000Z
|
src/hub/dataload/sources/ensembl_protists/dump.py
|
mlebeur/mygene.info
|
e71ca89c2b1c546c260101286ad5419503fd6653
|
[
"Apache-2.0"
] | 105
|
2017-05-18T21:57:13.000Z
|
2022-03-18T21:41:47.000Z
|
src/hub/dataload/sources/ensembl_protists/dump.py
|
mlebeur/mygene.info
|
e71ca89c2b1c546c260101286ad5419503fd6653
|
[
"Apache-2.0"
] | 19
|
2017-06-12T18:31:54.000Z
|
2021-11-10T00:04:43.000Z
|
import os
from ftplib import FTP
from config import DATA_ARCHIVE_ROOT, logger as logging
from biothings.utils.dataload import tab2list
from biothings.utils.common import is_int
from hub.dataload.sources.ensembl.dump import GenericBioMart, XML_QUERY_TEMPLATE
class EnsemblProtistsBioMart(GenericBioMart):
SRC_NAME = "ensembl_protists"
SRC_ROOT_FOLDER = os.path.join(DATA_ARCHIVE_ROOT, SRC_NAME)
# used to get latest release number & list of available species
ENSEMBL_FTP_HOST = "ftp.ensemblgenomes.org"
MART_URL = "http://protists.ensembl.org/biomart/martservice"
RELEASE_FOLDER = '/pub/protists'
RELEASE_PREFIX = '/pub/protists/release-'
def get_species_file(self):
return '/pub/protists/release-%s/mysql/protists_mart_%s/dataset_names.txt.gz' % (self.release, self.release)
def get_virtual_schema(self):
return 'protists_mart'
| 35.44
| 116
| 0.767494
|
acfcfea44306c11e49bb966a943b41c90553d83a
| 20,736
|
py
|
Python
|
tensorflow_probability/python/distributions/quantized_distribution.py
|
mjul/tensorflow-probability
|
c733f06bccceb983f3e9db8e6e3c98b3bd4d23c9
|
[
"Apache-2.0"
] | 1
|
2020-04-13T12:31:12.000Z
|
2020-04-13T12:31:12.000Z
|
tensorflow_probability/python/distributions/quantized_distribution.py
|
mjul/tensorflow-probability
|
c733f06bccceb983f3e9db8e6e3c98b3bd4d23c9
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/quantized_distribution.py
|
mjul/tensorflow-probability
|
c733f06bccceb983f3e9db8e6e3c98b3bd4d23c9
|
[
"Apache-2.0"
] | 1
|
2020-05-27T19:42:06.000Z
|
2020-05-27T19:42:06.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Quantized distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution as distributions
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
__all__ = ['QuantizedDistribution']
_prob_base_note = """
For whole numbers `y`,
```
P[Y = y] := P[X <= low], if y == low,
:= P[X > high - 1], y == high,
:= 0, if j < low or y > high,
:= P[y - 1 < X <= y], all other y.
```
"""
_prob_note = _prob_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`. If the
base distribution has a `survival_function` method, results will be more
accurate for large values of `y`, and in this case the `survival_function` must
also be defined on `y - 1`.
"""
_log_prob_note = _prob_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`. If the
base distribution has a `log_survival_function` method results will be more
accurate for large values of `y`, and in this case the `log_survival_function`
must also be defined on `y - 1`.
"""
_cdf_base_note = """
For whole numbers `y`,
```
cdf(y) := P[Y <= y]
= 1, if y >= high,
= 0, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_cdf_note = _cdf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_cdf_note = _cdf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
_sf_base_note = """
For whole numbers `y`,
```
survival_function(y) := P[Y > y]
= 0, if y >= high,
= 1, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_sf_note = _sf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_sf_note = _sf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
class QuantizedDistribution(distributions.Distribution):
"""Distribution representing the quantization `Y = ceiling(X)`.
#### Definition in Terms of Sampling
```
1. Draw X
2. Set Y <-- ceiling(X)
3. If Y < low, reset Y <-- low
4. If Y > high, reset Y <-- high
5. Return Y
```
#### Definition in Terms of the Probability Mass Function
Given scalar random variable `X`, we define a discrete random variable `Y`
supported on the integers as follows:
```
P[Y = j] := P[X <= low], if j == low,
:= P[X > high - 1], j == high,
:= 0, if j < low or j > high,
:= P[j - 1 < X <= j], all other j.
```
Conceptually, without cutoffs, the quantization process partitions the real
line `R` into half open intervals, and identifies an integer `j` with the
right endpoints:
```
R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ...
j = ... -1 0 1 2 3 4 ...
```
`P[Y = j]` is the mass of `X` within the `jth` interval.
If `low = 0`, and `high = 2`, then the intervals are redrawn
and `j` is re-assigned:
```
R = (-infty, 0](0, 1](1, infty)
j = 0 1 2
```
`P[Y = j]` is still the mass of `X` within the `jth` interval.
#### Examples
We illustrate a mixture of discretized logistic distributions
[(Salimans et al., 2017)][1]. This is used, for example, for capturing 16-bit
audio in WaveNet [(van den Oord et al., 2017)][2]. The values range in
a 1-D integer domain of `[0, 2**16-1]`, and the discretization captures
`P(x - 0.5 < X <= x + 0.5)` for all `x` in the domain excluding the endpoints.
The lowest value has probability `P(X <= 0.5)` and the highest value has
probability `P(2**16 - 1.5 < X)`.
Below we assume a `wavenet` function. It takes as `input` right-shifted audio
samples of shape `[..., sequence_length]`. It returns a real-valued tensor of
shape `[..., num_mixtures * 3]`, i.e., each mixture component has a `loc` and
`scale` parameter belonging to the logistic distribution, and a `logits`
parameter determining the unnormalized probability of that component.
```python
tfd = tfp.distributions
tfb = tfp.bijectors
net = wavenet(inputs)
loc, unconstrained_scale, logits = tf.split(net,
num_or_size_splits=3,
axis=-1)
scale = tf.math.softplus(unconstrained_scale)
# Form mixture of discretized logistic distributions. Note we shift the
# logistic distribution by -0.5. This lets the quantization capture 'rounding'
# intervals, `(x-0.5, x+0.5]`, and not 'ceiling' intervals, `(x-1, x]`.
discretized_logistic_dist = tfd.QuantizedDistribution(
distribution=tfd.TransformedDistribution(
distribution=tfd.Logistic(loc=loc, scale=scale),
bijector=tfb.AffineScalar(shift=-0.5)),
low=0.,
high=2**16 - 1.)
mixture_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=logits),
components_distribution=discretized_logistic_dist)
neg_log_likelihood = -tf.reduce_sum(mixture_dist.log_prob(targets))
train_op = tf.train.AdamOptimizer().minimize(neg_log_likelihood)
```
After instantiating `mixture_dist`, we illustrate maximum likelihood by
calculating its log-probability of audio samples as `target` and optimizing.
#### References
[1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.
PixelCNN++: Improving the PixelCNN with discretized logistic mixture
likelihood and other modifications.
_International Conference on Learning Representations_, 2017.
https://arxiv.org/abs/1701.05517
[2]: Aaron van den Oord et al. Parallel WaveNet: Fast High-Fidelity Speech
Synthesis. _arXiv preprint arXiv:1711.10433_, 2017.
https://arxiv.org/abs/1711.10433
"""
def __init__(self,
distribution,
low=None,
high=None,
validate_args=False,
name='QuantizedDistribution'):
"""Construct a Quantized Distribution representing `Y = ceiling(X)`.
Some properties are inherited from the distribution defining `X`. Example:
`allow_nan_stats` is determined for this `QuantizedDistribution` by reading
the `distribution`.
Args:
distribution: The base distribution class to transform. Typically an
instance of `Distribution`.
low: `Tensor` with same `dtype` as this distribution and shape
that broadcasts to that of samples but does not result in additional
batch dimensions after broadcasting. Should be a whole number. Default
`None`. If provided, base distribution's `prob` should be defined at
`low`.
high: `Tensor` with same `dtype` as this distribution and shape
that broadcasts to that of samples but does not result in additional
batch dimensions after broadcasting. Should be a whole number. Default
`None`. If provided, base distribution's `prob` should be defined at
`high - 1`. `high` must be strictly greater than `low`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: If `dist_cls` is not a subclass of
`Distribution` or continuous.
NotImplementedError: If the base distribution does not implement `cdf`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([distribution, high, low],
dtype_hint=tf.float32)
self._dist = distribution
self._low = tensor_util.convert_nonref_to_tensor(
low, name='low', dtype=dtype)
self._high = tensor_util.convert_nonref_to_tensor(
high, name='high', dtype=dtype)
super(QuantizedDistribution, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=self._dist.allow_nan_stats,
parameters=parameters,
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._dist
@property
def low(self):
"""Lowest value that quantization returns."""
return self._low
@property
def high(self):
"""Highest value that quantization returns."""
return self._high
def _batch_shape_tensor(self):
return self.distribution.batch_shape_tensor()
def _batch_shape(self):
return self.distribution.batch_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _event_shape(self):
return self.distribution.event_shape
def _sample_n(self, n, seed=None):
with tf.name_scope('transform'):
n = tf.convert_to_tensor(n, name='n')
x_samps = self.distribution.sample(n, seed=seed)
# Snap values to the intervals (j - 1, j].
result_so_far = tf.math.ceil(x_samps)
if self._low is not None:
low = tf.convert_to_tensor(self._low)
result_so_far = tf.where(result_so_far < low, low, result_so_far)
if self._high is not None:
high = tf.convert_to_tensor(self._high)
result_so_far = tf.where(result_so_far > high, high, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_prob_note)
def _log_prob(self, y):
if not hasattr(self.distribution, '_log_cdf'):
raise NotImplementedError(
'`log_prob` not implemented unless the base distribution implements '
'`log_cdf`')
try:
return self._log_prob_with_logsf_and_logcdf(y)
except NotImplementedError:
return self._log_prob_with_logcdf(y)
def _log_prob_with_logcdf(self, y):
low = None if self._low is None else tf.convert_to_tensor(self._low)
high = None if self._high is None else tf.convert_to_tensor(self._high)
return _logsum_expbig_minus_expsmall(
self.log_cdf(y, low=low, high=high),
self.log_cdf(y - 1., low=low, high=high))
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
low = None if self._low is None else tf.convert_to_tensor(self._low)
high = None if self._high is None else tf.convert_to_tensor(self._high)
logsf_y = self._log_survival_function(y, low=low, high=high)
logsf_y_minus_1 = self._log_survival_function(y - 1., low=low, high=high)
logcdf_y = self._log_cdf(y, low=low, high=high)
logcdf_y_minus_1 = self._log_cdf(y - 1., low=low, high=high)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = tf.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = tf.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
@distribution_util.AppendDocstring(_prob_note)
def _prob(self, y):
if not hasattr(self.distribution, '_cdf'):
raise NotImplementedError(
'`prob` not implemented unless the base distribution implements '
'`cdf`')
try:
return self._prob_with_sf_and_cdf(y)
except NotImplementedError:
return self._prob_with_cdf(y)
def _prob_with_cdf(self, y):
low = None if self._low is None else tf.convert_to_tensor(self._low)
high = None if self._high is None else tf.convert_to_tensor(self._high)
return (self._cdf(y, low=low, high=high) -
self._cdf(y - 1., low=low, high=high))
def _prob_with_sf_and_cdf(self, y):
# There are two options that would be equal if we had infinite precision:
# sf(y - 1.) - sf(y)
# cdf(y) - cdf(y - 1.)
low = None if self._low is None else tf.convert_to_tensor(self._low)
high = None if self._high is None else tf.convert_to_tensor(self._high)
sf_y = self._survival_function(y, low=low, high=high)
sf_y_minus_1 = self._survival_function(y - 1., low=low, high=high)
cdf_y = self._cdf(y, low=low, high=high)
cdf_y_minus_1 = self._cdf(y - 1., low=low, high=high)
# sf_prob has greater precision iff we're on the right side of the median.
return tf.where(
sf_y < cdf_y, # True iff we're on the right side of the median.
sf_y_minus_1 - sf_y,
cdf_y - cdf_y_minus_1)
@distribution_util.AppendDocstring(_log_cdf_note)
def _log_cdf(self, y, low=None, high=None):
low = self._low if low is None else low
high = self._high if high is None else high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = tf.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = tf.where(
j < low, tf.constant(-np.inf, self.dtype), result_so_far)
if high is not None:
result_so_far = tf.where(
j < high, result_so_far, tf.zeros([], self.dtype))
return result_so_far
@distribution_util.AppendDocstring(_cdf_note)
def _cdf(self, y, low=None, high=None):
low = self._low if low is None else low
high = self._high if high is None else high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = tf.floor(y)
# P[X <= j], used when low < X < high.
result_so_far = self.distribution.cdf(j)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = tf.where(
j < low, tf.zeros([], self.dtype), result_so_far)
if high is not None:
result_so_far = tf.where(
j < high, result_so_far, tf.ones([], self.dtype))
return result_so_far
@distribution_util.AppendDocstring(_log_sf_note)
def _log_survival_function(self, y, low=None, high=None):
low = self._low if low is None else low
high = self._high if high is None else high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = tf.math.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.log_survival_function(j)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = tf.where(
j < low, tf.zeros([], self.dtype), result_so_far)
if high is not None:
result_so_far = tf.where(
j < high, result_so_far, tf.constant(-np.inf, self.dtype))
return result_so_far
@distribution_util.AppendDocstring(_sf_note)
def _survival_function(self, y, low=None, high=None):
low = self._low if low is None else low
high = self._high if high is None else high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = tf.math.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.survival_function(j)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = tf.where(
j < low, tf.ones([], self.dtype), result_so_far)
if high is not None:
result_so_far = tf.where(
j < high, result_so_far, tf.zeros([], self.dtype))
return result_so_far
def _default_event_space_bijector(self):
return
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
sample_shape = tf.concat(
[self._batch_shape_tensor(), self._event_shape_tensor()], axis=0)
low = None if self._low is None else tf.convert_to_tensor(self._low)
high = None if self._high is None else tf.convert_to_tensor(self._high)
assertions = []
if self._low is not None and is_init != tensor_util.is_ref(self._low):
low_shape = prefer_static.shape(low)
broadcast_shape = prefer_static.broadcast_shape(sample_shape, low_shape)
assertions.extend(
[distribution_util.assert_integer_form(
low, message='`low` has non-integer components.'),
assert_util.assert_equal(
tf.reduce_prod(broadcast_shape),
tf.reduce_prod(sample_shape),
message=('Shape of `low` adds extra batch dimensions to '
'sample shape.'))])
if self._high is not None and is_init != tensor_util.is_ref(self._high):
high_shape = prefer_static.shape(high)
broadcast_shape = prefer_static.broadcast_shape(sample_shape, high_shape)
assertions.extend(
[distribution_util.assert_integer_form(
high, message='`high` has non-integer components.'),
assert_util.assert_equal(
tf.reduce_prod(broadcast_shape),
tf.reduce_prod(sample_shape),
message=('Shape of `high` adds extra batch dimensions to '
'sample shape.'))])
if (self._low is not None and self._high is not None and
(is_init != (tensor_util.is_ref(self._low)
or tensor_util.is_ref(self._high)))):
assertions.append(assert_util.assert_less(
low, high,
message='`low` must be strictly less than `high`.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(distribution_util.assert_integer_form(
x, message='Sample has non-integer components.'))
return assertions
def _logsum_expbig_minus_expsmall(big, small):
"""Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Floating-point `Tensor`
small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable
shape.
Returns:
log_sub_exp: `Tensor` of same `dtype` of `big` and broadcast shape.
"""
with tf.name_scope('logsum_expbig_minus_expsmall'):
return big + tf.math.log1p(-tf.exp(small - big))
| 36
| 85
| 0.650125
|
acfd024b1292b1e82203f1b9c497af1e21cbd0e9
| 2,445
|
py
|
Python
|
unused/simple.py
|
philip-h-dye/docpt-parser
|
abb5ea6566dac4cb77851d0cff69ed4aca3b804b
|
[
"MIT"
] | 2
|
2021-05-01T02:34:05.000Z
|
2022-02-13T06:41:36.000Z
|
unused/simple.py
|
philip-h-dye/docpt-parser
|
abb5ea6566dac4cb77851d0cff69ed4aca3b804b
|
[
"MIT"
] | null | null | null |
unused/simple.py
|
philip-h-dye/docpt-parser
|
abb5ea6566dac4cb77851d0cff69ed4aca3b804b
|
[
"MIT"
] | null | null | null |
import parsimonious
#------------------------------------------------------------------------------
if False :
import sys
print(': import paths')
for path in sys.path :
print(path)
print(': - - - - -')
print('')
sys.stdout.flush()
#------------------------------------------------------------------------------
GRAMMAR_FILE = 'docopt.peg'
# USAGE_PATTERN = "hello = 5"
# USAGE_PATTERN = "Usage: hello = 5"
# USAGE_PATTERN = "Usage:"
# USAGE_PATTERN = "Usage: hello-world"
# USAGE_PATTERN = "Usage: hello-world FILE <another-argument>"
# USAGE_PATTERN = "Usage: convoy Move-Forward FILE <another-argument>"
# USAGE_PATTERN = "Usage: convoy --why"
# USAGE_PATTERN = "Usage: convoy -a --why -b --what"
# USAGE_PATTERN = "Usage: convoy Move-Forward --why FILE <another-argument>"
# USAGE_PATTERN = "Usage: convoy Move-Forward -a -b -c --why FILE <another-argument>"
# USAGE_PATTERN = "Usage: convoy [<another-argument>]"
# USAGE_PATTERN = "Usage: program FILE <dst>"
# New approach : model on SMILE 'Simplify' Stage
# 1. atoms
# 2 expressions of single atoms
# 3. expressions of sequences of atoms
# Starting from SMILE 'Simplify', USAGE_PATTERN ="3+4"
# USAGE_PATTERN =" FILE "
# USAGE_PATTERN =" FILE \n" # this \b is bell : works
# USAGE_PATTERN =" \bFILE \n" # this \b is bell : this fails !
# # bell isn't whitespace, not an issue
# USAGE_PATTERN = "<another-argument>"
# USAGE_PATTERN =" <another-argument> \n"
USAGE_PATTERN =" [ FILE ] "
# USAGE_PATTERN =" [FILE] "
# USAGE_PATTERN = "[<another-argument>]"
# USAGE_PATTERN = "[FILE + <another-argument>]"
# USAGE_PATTERN = "FILE <another-argument>"
# USAGE_PATTERN = "[ FILE <another-argument> ]"
#------------------------------------------------------------------------------
def to_str(node):
if node.children:
return ''.join([to_str(child) for child in node])
else:
return node.text
#------------------------------------------------------------------------------
slurp = lambda fname : [(f.read(), f.close()) for f in [open(fname,'r')]][0][0]
if True :
grammar = slurp(GRAMMAR_FILE)
g = parsimonious.Grammar(grammar)
else :
import bootstrap
g = bootstrap.docopt_grammar
AST = g.parse(USAGE_PATTERN)
# print( ' ' + str(eval(to_str(AST))) )
# print( ' ' + to_str(AST) )
print( AST )
#------------------------------------------------------------------------------
| 30.949367
| 86
| 0.539059
|
acfd0252c322c56b38ed9581f0300b05eab1a400
| 52,730
|
py
|
Python
|
database/support/mysql-connector-python-2.1.3/tests/test_cursor.py
|
aprilsanchez/ictf-framework
|
2d37b5632b8ca8a4d90a9d84d689a023d19033cf
|
[
"BSD-2-Clause-FreeBSD"
] | 110
|
2020-04-01T02:27:38.000Z
|
2022-03-01T16:17:48.000Z
|
database/support/mysql-connector-python-2.1.3/tests/test_cursor.py
|
aprilsanchez/ictf-framework
|
2d37b5632b8ca8a4d90a9d84d689a023d19033cf
|
[
"BSD-2-Clause-FreeBSD"
] | 40
|
2020-03-31T23:30:47.000Z
|
2022-03-12T00:20:57.000Z
|
database/support/mysql-connector-python-2.1.3/tests/test_cursor.py
|
aprilsanchez/ictf-framework
|
2d37b5632b8ca8a4d90a9d84d689a023d19033cf
|
[
"BSD-2-Clause-FreeBSD"
] | 19
|
2020-04-12T19:17:22.000Z
|
2022-03-07T17:03:25.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Incur., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Test module for bugs
Bug test cases specific to a particular Python (major) version are loaded
from py2.bugs or py3.bugs.
This module was originally located in python2/tests and python3/tests. It
should contain bug test cases which work for both Python v2 and v3.
Whenever a bug is bout to a specific Python version, put the test cases
in tests/py2/bugs.py or tests/py3/bugs.py. It might be that these files need
to be created first.
"""
import datetime
from collections import namedtuple
from decimal import Decimal
import re
import time
from . import PY2
import tests
from mysql.connector import (connection, cursor, errors)
class CursorModule(tests.MySQLConnectorTests):
"""
Tests for the cursor module functions and attributes
"""
def test_RE_SQL_INSERT_VALUES(self):
regex = cursor.RE_SQL_INSERT_VALUES
cases = [
("(%s, %s)",
"INSERT INTO t1 VALUES (%s, %s)"),
("( %s, \n %s )",
"INSERT INTO t1 VALUES ( %s, \n %s )"),
("(%(c1)s, %(c2)s)",
"INSERT INTO t1 VALUES (%(c1)s, %(c2)s)"),
("(\n%(c1)s\n, \n%(c2)s\n)",
"INSERT INTO t1 VALUES \n(\n%(c1)s\n, \n%(c2)s\n)"),
("( %(c1)s , %(c2)s )",
"INSERT INTO t1 VALUES ( %(c1)s , %(c2)s ) ON DUPLICATE"),
("(%s, %s, NOW())",
"INSERT INTO t1 VALUES (%s, %s, NOW())"),
("(%s, CONCAT('a', 'b'), %s, NOW())",
"INSERT INTO t1 VALUES (%s, CONCAT('a', 'b'), %s, NOW())"),
("( NOW(), %s, \n, CONCAT('a', 'b'), %s )",
"INSERT INTO t1 VALUES "
" ( NOW(), %s, \n, CONCAT('a', 'b'), %s )"),
("(%(c1)s, NOW(6), %(c2)s)",
"INSERT INTO t1 VALUES (%(c1)s, NOW(6), %(c2)s)"),
("(\n%(c1)s\n, \n%(c2)s, REPEAT('a', 20)\n)",
"INSERT INTO t1 VALUES "
"\n(\n%(c1)s\n, \n%(c2)s, REPEAT('a', 20)\n)"),
("( %(c1)s ,NOW(),REPEAT('a', 20)\n), %(c2)s )",
"INSERT INTO t1 VALUES "
" ( %(c1)s ,NOW(),REPEAT('a', 20)\n), %(c2)s ) ON DUPLICATE"),
("( %(c1)s, %(c2)s )",
"INSERT INTO `values` VALUES "
" ( %(c1)s, %(c2)s ) ON DUPLICATE"),
]
for exp, stmt in cases:
self.assertEqual(exp, re.search(regex, stmt).group(1))
class CursorBaseTests(tests.MySQLConnectorTests):
def setUp(self):
self.cur = cursor.CursorBase()
def test___init__(self):
exp = {
'_description': None,
'_rowcount': -1,
'arraysize': 1,
}
for key, value in exp.items():
self.assertEqual(value, getattr(self.cur, key),
msg="Default for '%s' did not match." % key)
def test_callproc(self):
"""CursorBase object callproc()-method"""
self.check_method(self.cur, 'callproc')
try:
self.cur.callproc('foo', args=(1, 2, 3))
except (SyntaxError, TypeError):
self.fail("Cursor callproc(): wrong arguments")
def test_close(self):
"""CursorBase object close()-method"""
self.check_method(self.cur, 'close')
def test_execute(self):
"""CursorBase object execute()-method"""
self.check_method(self.cur, 'execute')
try:
self.cur.execute('select', params=(1, 2, 3))
except (SyntaxError, TypeError):
self.fail("Cursor execute(): wrong arguments")
def test_executemany(self):
"""CursorBase object executemany()-method"""
self.check_method(self.cur, 'executemany')
try:
self.cur.executemany('select', [()])
except (SyntaxError, TypeError):
self.fail("Cursor executemany(): wrong arguments")
def test_fetchone(self):
"""CursorBase object fetchone()-method"""
self.check_method(self.cur, 'fetchone')
def test_fetchmany(self):
"""CursorBase object fetchmany()-method"""
self.check_method(self.cur, 'fetchmany')
try:
self.cur.fetchmany(size=1)
except (SyntaxError, TypeError):
self.fail("Cursor fetchmany(): wrong arguments")
def test_fetchall(self):
"""CursorBase object fetchall()-method"""
self.check_method(self.cur, 'fetchall')
def test_nextset(self):
"""CursorBase object nextset()-method"""
self.check_method(self.cur, 'nextset')
def test_setinputsizes(self):
"""CursorBase object setinputsizes()-method"""
self.check_method(self.cur, 'setinputsizes')
try:
self.cur.setinputsizes((1,))
except (SyntaxError, TypeError):
self.fail("CursorBase setinputsizes(): wrong arguments")
def test_setoutputsize(self):
"""CursorBase object setoutputsize()-method"""
self.check_method(self.cur, 'setoutputsize')
try:
self.cur.setoutputsize(1, column=None)
except (SyntaxError, TypeError):
self.fail("CursorBase setoutputsize(): wrong arguments")
def test_description(self):
self.assertEqual(None, self.cur.description)
self.assertEqual(self.cur._description, self.cur.description)
self.cur._description = 'ham'
self.assertEqual('ham', self.cur.description)
if tests.OLD_UNITTEST:
try:
self.cur.description = 'spam'
except AttributeError as err:
# Exception should be raised
pass
else:
self.fail("AttributeError was not raised")
else:
with self.assertRaises(AttributeError):
self.cur.description = 'spam'
def test_rowcount(self):
self.assertEqual(-1, self.cur.rowcount)
self.assertEqual(self.cur._rowcount, self.cur.rowcount)
self.cur._rowcount = 2
self.assertEqual(2, self.cur.rowcount)
if tests.OLD_UNITTEST:
try:
self.cur.description = 'spam'
except AttributeError as err:
# Exception should be raised
pass
else:
self.fail("AttributeError was not raised")
else:
with self.assertRaises(AttributeError):
self.cur.rowcount = 3
def test_last_insert_id(self):
self.assertEqual(None, self.cur.lastrowid)
self.assertEqual(self.cur._last_insert_id, self.cur.lastrowid)
self.cur._last_insert_id = 2
self.assertEqual(2, self.cur.lastrowid)
if tests.OLD_UNITTEST:
try:
self.cur.description = 'spam'
except AttributeError as err:
# Exception should be raised
pass
else:
self.fail("AttributeError was not raised")
else:
with self.assertRaises(AttributeError):
self.cur.lastrowid = 3
class MySQLCursorTests(tests.TestsCursor):
def setUp(self):
self.cur = cursor.MySQLCursor(connection=None)
self.cnx = None
def test_init(self):
"""MySQLCursor object init"""
try:
cur = cursor.MySQLCursor(connection=None)
except (SyntaxError, TypeError) as err:
self.fail("Failed initializing MySQLCursor; {0}".format(err))
exp = {
'_connection': None,
'_stored_results': [],
'_nextrow': (None, None),
'_warnings': None,
'_warning_count': 0,
'_executed': None,
'_executed_list': [],
}
for key, value in exp.items():
self.assertEqual(
value, getattr(cur, key),
msg="Default for '{0}' did not match.".format(key))
self.assertRaises(errors.InterfaceError, cursor.MySQLCursor,
connection='foo')
def test__set_connection(self):
"""MySQLCursor object _set_connection()-method"""
self.check_method(self.cur, '_set_connection')
self.assertRaises(errors.InterfaceError,
self.cur._set_connection, 'foo')
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur._set_connection(self.cnx)
self.cur.close()
def test__reset_result(self):
"""MySQLCursor object _reset_result()-method"""
self.check_method(self.cur, '_reset_result')
def reset(self):
self._test = "Reset called"
self.cur.reset = reset.__get__(self.cur, cursor.MySQLCursor)
exp = {
'rowcount': -1,
'_stored_results': [],
'_nextrow': (None, None),
'_warnings': None,
'_warning_count': 0,
'_executed': None,
'_executed_list': [],
}
self.cur._reset_result()
for key, value in exp.items():
self.assertEqual(value, getattr(self.cur, key),
msg="'{0}' was not reset.".format(key))
# MySQLCursor._reset_result() must call MySQLCursor.reset()
self.assertEqual('Reset called',
self.cur._test) # pylint: disable=E1103
def test__have_unread_result(self):
"""MySQLCursor object _have_unread_result()-method"""
self.check_method(self.cur, '_have_unread_result')
class FakeConnection(object):
def __init__(self):
self.unread_result = False
self.cur = cursor.MySQLCursor()
self.cur._connection = FakeConnection()
self.cur._connection.unread_result = True
self.assertTrue(self.cur._have_unread_result())
self.cur._connection.unread_result = False
self.assertFalse(self.cur._have_unread_result())
def test_next(self):
"""MySQLCursor object next()-method"""
self.check_method(self.cur, 'next')
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = cursor.MySQLCursor(self.cnx)
self.assertRaises(StopIteration, self.cur.__next__)
self.cur.execute("SELECT BINARY 'ham'")
exp = (b'ham',)
self.assertEqual(exp, next(self.cur))
self.cur.close()
def test_close(self):
"""MySQLCursor object close()-method"""
self.check_method(self.cur, 'close')
self.assertEqual(False, self.cur.close(),
"close() should return False with no connection")
self.assertEqual(None, self.cur._connection)
def test__process_params(self):
"""MySQLCursor object _process_params()-method"""
self.check_method(self.cur, '_process_params')
self.assertRaises(
errors.ProgrammingError, self.cur._process_params, 'foo')
self.assertRaises(errors.ProgrammingError, self.cur._process_params, ())
st_now = time.localtime()
data = (
None,
int(128),
int(1281288),
float(3.14),
Decimal('3.14'),
r'back\slash',
'newline\n',
'return\r',
"'single'",
'"double"',
'windows\032',
"Strings are sexy",
u'\u82b1',
datetime.datetime(2008, 5, 7, 20, 1, 23),
datetime.date(2008, 5, 7),
datetime.time(20, 3, 23),
st_now,
datetime.timedelta(hours=40, minutes=30, seconds=12),
)
exp = (
b'NULL',
b'128',
b'1281288',
repr(float(3.14)) if PY2 else b'3.14',
b"'3.14'",
b"'back\\\\slash'",
b"'newline\\n'",
b"'return\\r'",
b"'\\'single\\''",
b'\'\\"double\\"\'',
b"'windows\\\x1a'",
b"'Strings are sexy'",
b"'\xe8\x8a\xb1'",
b"'2008-05-07 20:01:23'",
b"'2008-05-07'",
b"'20:03:23'",
b"'" + time.strftime('%Y-%m-%d %H:%M:%S', st_now).encode('ascii')
+ b"'",
b"'40:30:12'",
)
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = self.cnx.cursor()
self.assertEqual((), self.cur._process_params(()),
"_process_params() should return a tuple")
res = self.cur._process_params(data)
for (i, exped) in enumerate(exp):
self.assertEqual(exped, res[i])
self.cur.close()
def test__process_params_dict(self):
"""MySQLCursor object _process_params_dict()-method"""
self.check_method(self.cur, '_process_params')
self.assertRaises(
errors.ProgrammingError, self.cur._process_params, 'foo')
self.assertRaises(errors.ProgrammingError, self.cur._process_params, ())
st_now = time.localtime()
data = {
'a': None,
'b': int(128),
'c': int(1281288),
'd': float(3.14),
'e': Decimal('3.14'),
'f': 'back\slash', # pylint: disable=W1401
'g': 'newline\n',
'h': 'return\r',
'i': "'single'",
'j': '"double"',
'k': 'windows\032',
'l': str("Strings are sexy"),
'm': u'\u82b1',
'n': datetime.datetime(2008, 5, 7, 20, 1, 23),
'o': datetime.date(2008, 5, 7),
'p': datetime.time(20, 3, 23),
'q': st_now,
'r': datetime.timedelta(hours=40, minutes=30, seconds=12),
}
exp = {
b'%(a)s': b'NULL',
b'%(b)s': b'128',
b'%(c)s': b'1281288',
b'%(d)s': repr(float(3.14)) if PY2 else b'3.14',
b'%(e)s': b"'3.14'",
b'%(f)s': b"'back\\\\slash'",
b'%(g)s': b"'newline\\n'",
b'%(h)s': b"'return\\r'",
b'%(i)s': b"'\\'single\\''",
b'%(j)s': b'\'\\"double\\"\'',
b'%(k)s': b"'windows\\\x1a'",
b'%(l)s': b"'Strings are sexy'",
b'%(m)s': b"'\xe8\x8a\xb1'",
b'%(n)s': b"'2008-05-07 20:01:23'",
b'%(o)s': b"'2008-05-07'",
b'%(p)s': b"'20:03:23'",
b'%(q)s': b"'" +
time.strftime('%Y-%m-%d %H:%M:%S', st_now).encode('ascii')
+ b"'",
b'%(r)s': b"'40:30:12'",
}
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = self.cnx.cursor()
self.assertEqual({}, self.cur._process_params_dict({}),
"_process_params_dict() should return a dict")
self.assertEqual(exp, self.cur._process_params_dict(data))
self.cur.close()
def test__fetch_warnings(self):
"""MySQLCursor object _fetch_warnings()-method"""
self.check_method(self.cur, '_fetch_warnings')
self.assertRaises(errors.InterfaceError, self.cur._fetch_warnings)
config = tests.get_mysql_config()
config['get_warnings'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur.execute("SELECT 'a' + 'b'")
self.cur.fetchone()
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'")
]
self.assertTrue(tests.cmp_result(exp, self.cur._fetch_warnings()))
self.assertEqual(len(exp), self.cur._warning_count)
def test__handle_noresultset(self):
"""MySQLCursor object _handle_noresultset()-method"""
self.check_method(self.cur, '_handle_noresultset')
self.assertRaises(errors.ProgrammingError,
self.cur._handle_noresultset, None)
data = {
'affected_rows': 1,
'insert_id': 10,
'warning_count': 100,
'server_status': 8,
}
config = tests.get_mysql_config()
config['get_warnings'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur._handle_noresultset(data)
self.assertEqual(data['affected_rows'], self.cur.rowcount)
self.assertEqual(data['insert_id'], self.cur.lastrowid)
self.assertEqual(data['warning_count'], self.cur._warning_count)
def test__handle_result(self):
"""MySQLCursor object _handle_result()-method"""
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = self.cnx.cursor()
self.assertRaises(errors.InterfaceError, self.cur._handle_result, None)
self.assertRaises(errors.InterfaceError, self.cur._handle_result,
'spam')
self.assertRaises(errors.InterfaceError, self.cur._handle_result,
{'spam': 5})
cases = [
{'affected_rows': 99999,
'insert_id': 10,
'warning_count': 100,
'server_status': 8,
},
{'eof': {'status_flag': 0, 'warning_count': 0},
'columns': [('1', 8, None, None, None, None, 0, 129)]
},
]
self.cur._handle_result(cases[0])
self.assertEqual(cases[0]['affected_rows'], self.cur.rowcount)
self.assertFalse(self.cur._connection.unread_result)
self.assertFalse(self.cur._have_unread_result())
self.cur._handle_result(cases[1])
self.assertEqual(cases[1]['columns'], self.cur.description)
self.assertTrue(self.cur._connection.unread_result)
self.assertTrue(self.cur._have_unread_result())
def test_execute(self):
"""MySQLCursor object execute()-method"""
self.check_method(self.cur, 'execute')
self.assertEqual(None, self.cur.execute(None, None))
config = tests.get_mysql_config()
config['get_warnings'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.assertRaises(errors.ProgrammingError, self.cur.execute,
'SELECT %s,%s,%s', ('foo', 'bar',))
self.cur.execute("SELECT 'a' + 'b'")
self.cur.fetchone()
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'")
]
self.assertTrue(tests.cmp_result(exp, self.cur._warnings))
self.cur.execute("SELECT BINARY 'ham'")
exp = [(b'ham',)]
self.assertEqual(exp, self.cur.fetchall())
self.cur.close()
tbl = 'myconnpy_cursor'
self._test_execute_setup(self.cnx, tbl)
stmt_insert = "INSERT INTO {0} (col1,col2) VALUES (%s,%s)".format(tbl)
self.cur = self.cnx.cursor()
res = self.cur.execute(stmt_insert, (1, 100))
self.assertEqual(None, res, "Return value of execute() is wrong.")
stmt_select = "SELECT col1,col2 FROM {0} ORDER BY col1".format(tbl)
self.cur.execute(stmt_select)
self.assertEqual([(1, '100')],
self.cur.fetchall(), "Insert test failed")
data = {'id': 2}
stmt = "SELECT col1,col2 FROM {0} WHERE col1 <= %(id)s".format(tbl)
self.cur.execute(stmt, data)
self.assertEqual([(1, '100')], self.cur.fetchall())
self._test_execute_cleanup(self.cnx, tbl)
self.cur.close()
self.cur = self.cnx.cursor()
self.cur.execute("DROP PROCEDURE IF EXISTS multi_results")
procedure = (
"CREATE PROCEDURE multi_results () "
"BEGIN SELECT 1; SELECT 'ham'; END"
)
self.cur.execute(procedure)
exp_stmt = "CALL multi_results()"
if not PY2:
exp_stmt = b"CALL multi_results()"
exp_result = [[(1,)], [(u'ham',)]]
results = []
for result in self.cur.execute(exp_stmt, multi=True):
if result.with_rows:
self.assertEqual(exp_stmt, result._executed)
results.append(result.fetchall())
self.assertEqual(exp_result, results)
self.cur.execute("DROP PROCEDURE multi_results")
def test_executemany(self):
"""MySQLCursor object executemany()-method"""
self.check_method(self.cur, 'executemany')
self.assertEqual(None, self.cur.executemany(None, []))
config = tests.get_mysql_config()
config['get_warnings'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.assertRaises(errors.ProgrammingError, self.cur.executemany,
'programming error with string', 'foo')
self.assertRaises(errors.ProgrammingError, self.cur.executemany,
'programming error with 1 element list', ['foo'])
self.assertEqual(None, self.cur.executemany('empty params', []))
self.assertEqual(None, self.cur.executemany('params is None', None))
self.assertRaises(errors.ProgrammingError, self.cur.executemany,
'foo', ['foo'])
self.assertRaises(errors.ProgrammingError, self.cur.executemany,
'SELECT %s', [('foo',), 'foo'])
self.assertRaises(errors.ProgrammingError,
self.cur.executemany,
"INSERT INTO t1 1 %s", [(1,), (2,)])
self.cur.executemany("SELECT SHA1(%s)", [('foo',), ('bar',)])
self.assertEqual(None, self.cur.fetchone())
self.cur.close()
tbl = 'myconnpy_cursor'
self._test_execute_setup(self.cnx, tbl)
stmt_insert = "INSERT INTO {0} (col1,col2) VALUES (%s,%s)".format(tbl)
stmt_select = "SELECT col1,col2 FROM {0} ORDER BY col1".format(tbl)
self.cur = self.cnx.cursor()
res = self.cur.executemany(stmt_insert, [(1, 100), (2, 200), (3, 300)])
self.assertEqual(3, self.cur.rowcount)
res = self.cur.executemany("SELECT %s", [('f',), ('o',), ('o',)])
self.assertEqual(3, self.cur.rowcount)
data = [{'id': 2}, {'id': 3}]
stmt = "SELECT * FROM {0} WHERE col1 <= %(id)s".format(tbl)
self.cur.executemany(stmt, data)
self.assertEqual(5, self.cur.rowcount)
self.cur.execute(stmt_select)
self.assertEqual([(1, '100'), (2, '200'), (3, '300')],
self.cur.fetchall(), "Multi insert test failed")
data = [{'id': 2}, {'id': 3}]
stmt = "DELETE FROM {0} WHERE col1 = %(id)s".format(tbl)
self.cur.executemany(stmt, data)
self.assertEqual(2, self.cur.rowcount)
stmt = "TRUNCATE TABLE {0}".format(tbl)
self.cur.execute(stmt)
stmt = (
"/*comment*/INSERT/*comment*/INTO/*comment*/{0}(col1,col2)VALUES"
"/*comment*/(%s,%s/*comment*/)/*comment()*/ON DUPLICATE KEY UPDATE"
" col1 = VALUES(col1)"
).format(tbl)
self.cur.executemany(stmt, [(4, 100), (5, 200), (6, 300)])
self.assertEqual(3, self.cur.rowcount)
self.cur.execute(stmt_select)
self.assertEqual([(4, '100'), (5, '200'), (6, '300')],
self.cur.fetchall(), "Multi insert test failed")
stmt = "TRUNCATE TABLE {0}".format(tbl)
self.cur.execute(stmt)
stmt = (
"INSERT INTO/*comment*/{0}(col1,col2)VALUES"
"/*comment*/(%s,'/*100*/')/*comment()*/ON DUPLICATE KEY UPDATE "
"col1 = VALUES(col1)"
).format(tbl)
self.cur.executemany(stmt, [(4,), (5,)])
self.assertEqual(2, self.cur.rowcount)
self.cur.execute(stmt_select)
self.assertEqual([(4, '/*100*/'), (5, '/*100*/')],
self.cur.fetchall(), "Multi insert test failed")
self._test_execute_cleanup(self.cnx, tbl)
self.cur.close()
def test_fetchwarnings(self):
"""MySQLCursor object fetchwarnings()-method"""
self.check_method(self.cur, 'fetchwarnings')
self.assertEqual(
None, self.cur.fetchwarnings(),
"There should be no warnings after initiating cursor.")
exp = ['A warning']
self.cur._warnings = exp
self.cur._warning_count = len(self.cur._warnings)
self.assertEqual(exp, self.cur.fetchwarnings())
self.cur.close()
def test_stored_results(self):
"""MySQLCursor object stored_results()-method"""
self.check_method(self.cur, 'stored_results')
self.assertEqual([], self.cur._stored_results)
self.assertTrue(hasattr(self.cur.stored_results(), '__iter__'))
self.cur._stored_results.append('abc')
self.assertEqual('abc', next(self.cur.stored_results()))
try:
_ = next(self.cur.stored_results())
except StopIteration:
pass
except:
self.fail("StopIteration not raised")
def _test_callproc_setup(self, cnx):
self._test_callproc_cleanup(cnx)
stmt_create1 = (
"CREATE PROCEDURE myconnpy_sp_1 "
"(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) "
"BEGIN SET pProd := pFac1 * pFac2; END;")
stmt_create2 = (
"CREATE PROCEDURE myconnpy_sp_2 "
"(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) "
"BEGIN SELECT 'abc'; SELECT 'def'; SET pProd := pFac1 * pFac2; "
"END;")
stmt_create3 = (
"CREATE PROCEDURE myconnpy_sp_3"
"(IN pStr1 VARCHAR(20), IN pStr2 VARCHAR(20), "
"OUT pConCat VARCHAR(100)) "
"BEGIN SET pConCat := CONCAT(pStr1, pStr2); END;")
stmt_create4 = (
"CREATE PROCEDURE myconnpy_sp_4"
"(IN pStr1 VARCHAR(20), INOUT pStr2 VARCHAR(20), "
"OUT pConCat VARCHAR(100)) "
"BEGIN SET pConCat := CONCAT(pStr1, pStr2); END;")
try:
cur = cnx.cursor()
cur.execute(stmt_create1)
cur.execute(stmt_create2)
cur.execute(stmt_create3)
cur.execute(stmt_create4)
except errors.Error as err:
self.fail("Failed setting up test stored routine; {0}".format(err))
cur.close()
def _test_callproc_cleanup(self, cnx):
sp_names = ('myconnpy_sp_1', 'myconnpy_sp_2', 'myconnpy_sp_3',
'myconnpy_sp_4')
stmt_drop = "DROP PROCEDURE IF EXISTS {procname}"
try:
cur = cnx.cursor()
for sp_name in sp_names:
cur.execute(stmt_drop.format(procname=sp_name))
except errors.Error as err:
self.fail(
"Failed cleaning up test stored routine; {0}".format(err))
cur.close()
def test_callproc(self):
"""MySQLCursor object callproc()-method"""
self.check_method(self.cur, 'callproc')
self.assertRaises(ValueError, self.cur.callproc, None)
self.assertRaises(ValueError, self.cur.callproc, 'sp1', None)
config = tests.get_mysql_config()
config['get_warnings'] = True
self.cnx = connection.MySQLConnection(**config)
self._test_callproc_setup(self.cnx)
self.cur = self.cnx.cursor()
if tests.MYSQL_VERSION < (5, 1):
exp = ('5', '4', b'20')
else:
exp = (5, 4, 20)
result = self.cur.callproc('myconnpy_sp_1', (exp[0], exp[1], 0))
self.assertEqual([], self.cur._stored_results)
self.assertEqual(exp, result)
if tests.MYSQL_VERSION < (5, 1):
exp = ('6', '5', b'30')
else:
exp = (6, 5, 30)
result = self.cur.callproc('myconnpy_sp_2', (exp[0], exp[1], 0))
self.assertTrue(isinstance(self.cur._stored_results, list))
self.assertEqual(exp, result)
exp_results = [
('abc',),
('def',)
]
for result, exp in zip(self.cur.stored_results(),
iter(exp_results)):
self.assertEqual(exp, result.fetchone())
exp = ('ham', 'spam', 'hamspam')
result = self.cur.callproc('myconnpy_sp_3', (exp[0], exp[1], 0))
self.assertTrue(isinstance(self.cur._stored_results, list))
self.assertEqual(exp, result)
exp = ('ham', 'spam', 'hamspam')
result = self.cur.callproc('myconnpy_sp_4',
(exp[0], (exp[1], 'CHAR'), (0, 'CHAR')))
self.assertTrue(isinstance(self.cur._stored_results, list))
self.assertEqual(exp, result)
self._test_callproc_cleanup(self.cnx)
self.cur.close()
def test_fetchone(self):
"""MySQLCursor object fetchone()-method"""
self.check_method(self.cur, 'fetchone')
self.assertEqual(None, self.cur.fetchone())
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = self.cnx.cursor()
self.cur.execute("SELECT BINARY 'ham'")
exp = (b'ham',)
self.assertEqual(exp, self.cur.fetchone())
self.assertEqual(None, self.cur.fetchone())
self.cur.close()
def test_fetchmany(self):
"""MySQLCursor object fetchmany()-method"""
self.check_method(self.cur, 'fetchmany')
self.assertEqual([], self.cur.fetchmany())
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
tbl = 'myconnpy_fetch'
self._test_execute_setup(self.cnx, tbl)
stmt_insert = (
"INSERT INTO {table} (col1,col2) "
"VALUES (%s,%s)".format(table=tbl))
stmt_select = (
"SELECT col1,col2 FROM {table} "
"ORDER BY col1 DESC".format(table=tbl))
self.cur = self.cnx.cursor()
nrrows = 10
data = [(i, str(i * 100)) for i in range(0, nrrows)]
self.cur.executemany(stmt_insert, data)
self.cur.execute(stmt_select)
exp = [(9, '900'), (8, '800'), (7, '700'), (6, '600')]
rows = self.cur.fetchmany(4)
self.assertTrue(tests.cmp_result(exp, rows),
"Fetching first 4 rows test failed.")
exp = [(5, '500'), (4, '400'), (3, '300')]
rows = self.cur.fetchmany(3)
self.assertTrue(tests.cmp_result(exp, rows),
"Fetching next 3 rows test failed.")
exp = [(2, '200'), (1, '100'), (0, '0')]
rows = self.cur.fetchmany(3)
self.assertTrue(tests.cmp_result(exp, rows),
"Fetching next 3 rows test failed.")
self.assertEqual([], self.cur.fetchmany())
self._test_execute_cleanup(self.cnx, tbl)
self.cur.close()
def test_fetchall(self):
"""MySQLCursor object fetchall()-method"""
self.check_method(self.cur, 'fetchall')
self.assertRaises(errors.InterfaceError, self.cur.fetchall)
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
tbl = 'myconnpy_fetch'
self._test_execute_setup(self.cnx, tbl)
stmt_insert = (
"INSERT INTO {table} (col1,col2) "
"VALUES (%s,%s)".format(table=tbl))
stmt_select = (
"SELECT col1,col2 FROM {table} "
"ORDER BY col1 ASC".format(table=tbl))
self.cur = self.cnx.cursor()
self.cur.execute("SELECT * FROM {table}".format(table=tbl))
self.assertEqual([], self.cur.fetchall(),
"fetchall() with empty result should return []")
nrrows = 10
data = [(i, str(i * 100)) for i in range(0, nrrows)]
self.cur.executemany(stmt_insert, data)
self.cur.execute(stmt_select)
self.assertTrue(tests.cmp_result(data, self.cur.fetchall()),
"Fetching all rows failed.")
self.assertEqual(None, self.cur.fetchone())
self._test_execute_cleanup(self.cnx, tbl)
self.cur.close()
def test_raise_on_warning(self):
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cnx.raise_on_warnings = True
self.cur = self.cnx.cursor()
try:
self.cur.execute("SELECT 'a' + 'b'")
self.cur.fetchall()
except errors.Error:
pass
else:
self.fail("Did not get exception while raising warnings.")
def test__str__(self):
"""MySQLCursor object __str__()-method"""
self.assertEqual("MySQLCursor: (Nothing executed yet)",
self.cur.__str__())
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = self.cnx.cursor()
self.cur.execute("SELECT VERSION()")
self.cur.fetchone()
self.assertEqual("MySQLCursor: SELECT VERSION()",
self.cur.__str__())
stmt = "SELECT VERSION(),USER(),CURRENT_TIME(),NOW(),SHA1('myconnpy')"
self.cur.execute(stmt)
self.cur.fetchone()
self.assertEqual("MySQLCursor: {0}..".format(stmt[:40]),
self.cur.__str__())
self.cur.close()
def test_column_names(self):
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = self.cnx.cursor()
stmt = "SELECT NOW() as now, 'The time' as label, 123 FROM dual"
exp = ('now', 'label', '123')
self.cur.execute(stmt)
self.cur.fetchone()
self.assertEqual(exp, self.cur.column_names)
self.cur.close()
def test_statement(self):
self.cur = cursor.MySQLCursor()
exp = 'SELECT * FROM ham'
self.cur._executed = exp
self.assertEqual(exp, self.cur.statement)
self.cur._executed = ' ' + exp + ' '
self.assertEqual(exp, self.cur.statement)
self.cur._executed = b'SELECT * FROM ham'
self.assertEqual(exp, self.cur.statement)
def test_with_rows(self):
self.cur = cursor.MySQLCursor()
self.assertFalse(self.cur.with_rows)
self.cur._description = ('ham', 'spam')
self.assertTrue(self.cur.with_rows)
def test_unicode(self):
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
self.cur = self.cnx.cursor()
stmt = "DROP TABLE IF EXISTS test_unicode"
self.cur.execute(stmt)
stmt = (
"CREATE TABLE test_unicode(`aé` INTEGER AUTO_INCREMENT, "
"`測試` INTEGER, PRIMARY KEY (`aé`))ENGINE=InnoDB"
)
self.cur.execute(stmt)
stmt = "INSERT INTO test_unicode(`aé`, `測試`) VALUES (%(aé)s, %(測試)s)"
params = {'aé': 1, '測試': 2}
self.cur.execute(stmt, params)
stmt = "SELECT * FROM test_unicode"
self.cur.execute(stmt)
exp = [(1, 2)]
self.assertEqual(exp, self.cur.fetchall())
stmt = "DROP TABLE IF EXISTS test_unicode"
self.cur.execute(stmt)
class MySQLCursorBufferedTests(tests.TestsCursor):
def setUp(self):
self.cur = cursor.MySQLCursorBuffered(connection=None)
self.cnx = None
def tearDown(self):
if self.cnx:
self.cnx.close()
def test_init(self):
"""MySQLCursorBuffered object init"""
try:
cur = cursor.MySQLCursorBuffered(connection=None)
except (SyntaxError, TypeError) as err:
self.fail("Failed initializing MySQLCursorBuffered; {0}".format(
err))
else:
cur.close()
self.assertRaises(errors.InterfaceError,
cursor.MySQLCursorBuffered, connection='foo')
def test__next_row(self):
"""MySQLCursorBuffered object _next_row-attribute"""
self.check_attr(self.cur, '_next_row', 0)
def test__rows(self):
"""MySQLCursorBuffered object _rows-attribute"""
self.check_attr(self.cur, '_rows', None)
def test_execute(self):
"""MySQLCursorBuffered object execute()-method
"""
self.check_method(self.cur, 'execute')
self.assertEqual(None, self.cur.execute(None, None))
config = tests.get_mysql_config()
config['buffered'] = True
config['get_warnings'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.assertEqual(True,
isinstance(self.cur, cursor.MySQLCursorBuffered))
self.cur.execute("SELECT 1")
self.assertEqual([(b'1',)], self.cur._rows)
def test_raise_on_warning(self):
config = tests.get_mysql_config()
config['buffered'] = True
config['raise_on_warnings'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
try:
self.cur.execute("SELECT 'a' + 'b'")
except errors.Error:
pass
else:
self.fail("Did not get exception while raising warnings.")
def test_with_rows(self):
cur = cursor.MySQLCursorBuffered()
self.assertFalse(cur.with_rows)
cur._rows = [('ham',)]
self.assertTrue(cur.with_rows)
class MySQLCursorRawTests(tests.TestsCursor):
def setUp(self):
config = tests.get_mysql_config()
config['raw'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
def tearDown(self):
self.cur.close()
self.cnx.close()
def test_fetchone(self):
self.check_method(self.cur, 'fetchone')
self.assertEqual(None, self.cur.fetchone())
self.cur.execute("SELECT 1, 'string', MAKEDATE(2010,365), 2.5")
exp = (b'1', b'string', b'2010-12-31', b'2.5')
self.assertEqual(exp, self.cur.fetchone())
class MySQLCursorRawBufferedTests(tests.TestsCursor):
def setUp(self):
config = tests.get_mysql_config()
config['raw'] = True
config['buffered'] = True
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
def tearDown(self):
self.cur.close()
self.cnx.close()
def test_fetchone(self):
self.check_method(self.cur, 'fetchone')
self.assertEqual(None, self.cur.fetchone())
self.cur.execute("SELECT 1, 'string', MAKEDATE(2010,365), 2.5")
exp = (b'1', b'string', b'2010-12-31', b'2.5')
self.assertEqual(exp, self.cur.fetchone())
def test_fetchall(self):
self.check_method(self.cur, 'fetchall')
self.assertRaises(errors.InterfaceError, self.cur.fetchall)
self.cur.execute("SELECT 1, 'string', MAKEDATE(2010,365), 2.5")
exp = [(b'1', b'string', b'2010-12-31', b'2.5')]
self.assertEqual(exp, self.cur.fetchall())
class MySQLCursorPreparedTests(tests.TestsCursor):
def setUp(self):
config = tests.get_mysql_config()
config['raw'] = True
config['buffered'] = True
self.cnx = connection.MySQLConnection(**config)
def test_callproc(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
self.assertRaises(errors.NotSupportedError, cur.callproc)
def test_close(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
cur.close()
self.assertEqual(None, cur._prepared)
def test_fetch_row(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
self.assertEqual(None, cur._fetch_row())
cur._description = [('c1', 5, None, None, None, None, 1, 128)]
# Monkey patch the get_row method of the connection for testing
def _get_row(binary, columns): # pylint: disable=W0613
try:
row = self.cnx._test_fetch_row[0]
self.cnx._test_fetch_row = self.cnx._test_fetch_row[1:]
except IndexError:
return None
return row
self.cnx.get_row = _get_row
eof_info = {'status_flag': 0, 'warning_count': 2}
self.cnx.unread_result = True
self.cnx._test_fetch_row = [(b'1', None), (None, eof_info)]
self.assertEqual(b'1', cur._fetch_row())
self.assertEqual((None, None), cur._nextrow)
self.assertEqual(eof_info['warning_count'], cur._warning_count)
cur._reset_result()
self.cnx.unread_result = True
self.cnx._test_fetch_row = [(None, eof_info)]
self.assertEqual(None, cur._fetch_row())
self.assertEqual((None, None), cur._nextrow)
self.assertEqual(eof_info['warning_count'], cur._warning_count)
def test_execute(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
cur2 = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
# No 1
stmt = "SELECT (? * 2) AS c1"
cur.execute(stmt, (5,))
self.assertEqual(stmt, cur._executed)
exp = {
'num_params': 1, 'statement_id': 1,
'parameters': [('?', 253, None, None, None, None, 1, 128)],
'warning_count': 0, 'num_columns': 1,
'columns': [('c1', 5, None, None, None, None, 1, 128)]
}
self.assertEqual(exp, cur._prepared)
# No 2
stmt = "SELECT (? * 3) AS c2"
# first, execute should fail, because unread results of No 1
self.assertRaises(errors.InternalError, cur2.execute, stmt)
cur.fetchall()
# We call with wrong number of values for paramaters
self.assertRaises(errors.ProgrammingError, cur2.execute, stmt, (1, 3))
cur2.execute(stmt, (5,))
self.assertEqual(stmt, cur2._executed)
exp = {
'num_params': 1, 'statement_id': 2,
'parameters': [('?', 253, None, None, None, None, 1, 128)],
'warning_count': 0, 'num_columns': 1,
'columns': [('c2', 5, None, None, None, None, 1, 128)]
}
self.assertEqual(exp, cur2._prepared)
self.assertEqual([(15,)], cur2.fetchall())
# No 3
data = (3, 4)
exp = [(5.0,)]
stmt = "SELECT SQRT(POW(?, 2) + POW(?, 2)) AS hypotenuse"
cur.execute(stmt, data)
self.assertEqual(3, cur._prepared['statement_id'])
self.assertEqual(exp, cur.fetchall())
# Execute the already prepared statement
data = (4, 5)
exp = (6.4031242374328485,)
cur.execute(stmt, data)
self.assertEqual(3, cur._prepared['statement_id'])
self.assertEqual(exp, cur.fetchone())
def test_executemany(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
self.assertEqual(None, cur.executemany(None, []))
self.assertRaises(errors.InterfaceError, cur.executemany,
'ham', None)
self.assertRaises(errors.ProgrammingError, cur.executemany,
'ham', 'ham')
self.assertEqual(None, cur.executemany('ham', []))
self.assertRaises(errors.ProgrammingError, cur.executemany,
'ham', ['ham'])
cur.executemany("SELECT SHA1(%s)", [('ham',), ('bar',)])
self.assertEqual(None, cur.fetchone())
cur.close()
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
tbl = 'myconnpy_cursor'
self._test_execute_setup(self.cnx, tbl)
stmt_insert = "INSERT INTO {table} (col1,col2) VALUES (%s, %s)".format(
table=tbl)
stmt_select = "SELECT col1,col2 FROM {table} ORDER BY col1".format(
table=tbl)
cur.executemany(stmt_insert, [(1, 100), (2, 200), (3, 300)])
self.assertEqual(3, cur.rowcount)
cur.executemany("SELECT %s", [('h',), ('a',), ('m',)])
self.assertEqual(3, cur.rowcount)
cur.execute(stmt_select)
self.assertEqual([(1, b'100'), (2, b'200'), (3, b'300')],
cur.fetchall(), "Multi insert test failed")
data = [(2,), (3,)]
stmt = "DELETE FROM {table} WHERE col1 = %s".format(table=tbl)
cur.executemany(stmt, data)
self.assertEqual(2, cur.rowcount)
self._test_execute_cleanup(self.cnx, tbl)
cur.close()
def test_fetchone(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
def _fetch_row():
try:
row = cur._test_fetch_row[0]
cur._test_fetch_row = cur._test_fetch_row[1:]
except IndexError:
return None
return row
cur._fetch_row = _fetch_row
cur._test_fetch_row = [(b'ham',)]
self.assertEqual((b'ham',), cur.fetchone())
self.assertEqual(None, cur.fetchone())
def test_fetchmany(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
def _fetch_row():
try:
row = cur._test_fetch_row[0]
cur._test_fetch_row = cur._test_fetch_row[1:]
except IndexError:
return None
return row
cur._fetch_row = _fetch_row
rows = [(1, b'100'), (2, b'200'), (3, b'300')]
cur._test_fetch_row = rows
self.cnx.unread_result = True
self.assertEqual(rows[0:2], cur.fetchmany(2))
self.assertEqual([rows[2]], cur.fetchmany(2))
self.assertEqual([], cur.fetchmany())
def test_fetchall(self):
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
def _get_rows(binary, columns): # pylint: disable=W0613
self.unread_result = False # pylint: disable=W0201
return (
self.cnx._test_fetch_row,
{'status_flag': 0, 'warning_count': 3}
)
self.cnx.get_rows = _get_rows
rows = [(1, 100), (2, 200), (3, 300)]
self.cnx._test_fetch_row = rows
self.cnx.unread_result = True
self.assertEqual(rows, cur.fetchall())
self.assertEqual(len(rows), cur._rowcount)
self.assertEqual(3, cur._warning_count)
self.assertRaises(errors.InterfaceError, cur.fetchall)
class MySQLCursorDictTests(tests.TestsCursor):
def setUp(self):
config = tests.get_mysql_config()
self.connection = connection.MySQLConnection(**config)
self.cur = self.connection.cursor(dictionary=True)
self.cur.execute('DROP TABLE IF EXISTS MySQLCursorDictTests')
self.cur.execute('CREATE TABLE MySQLCursorDictTests(id INT(10), name '
'VARCHAR(20), city VARCHAR(20))')
def tearDown(self):
self.cur.execute('DROP TABLE IF EXISTS MySQLCursorDictTests')
self.cur.close()
self.connection.close()
def test_fetchone(self):
self.check_method(self.cur, 'fetchone')
self.assertEqual(None, self.cur.fetchone())
self.cur.execute("INSERT INTO MySQLCursorDictTests VALUES(%s, %s, %s)",
(1, 'ham', 'spam'))
self.cur.execute("SELECT * FROM MySQLCursorDictTests")
exp = {u'id': 1, u'name': u'ham', u'city': u'spam'}
self.assertEqual(exp, self.cur.fetchone())
class MySQLCursorBufferedDictTests(tests.TestsCursor):
def setUp(self):
config = tests.get_mysql_config()
self.connection = connection.MySQLConnection(**config)
self.cur = self.connection.cursor(dictionary=True, buffered=True)
self.cur.execute('DROP TABLE IF EXISTS MySQLCursorBufferedDictTests')
self.cur.execute('CREATE TABLE MySQLCursorBufferedDictTests(id INT(10),'
'name VARCHAR(20), city VARCHAR(20))')
def tearDown(self):
self.cur.close()
self.connection.close()
def test_fetchone(self):
self.check_method(self.cur, 'fetchone')
self.assertEqual(None, self.cur.fetchone())
self.cur.execute("INSERT INTO MySQLCursorBufferedDictTests VALUE"
"(%s, %s, %s)", (1, 'ham', 'spam'))
self.cur.execute("SELECT * FROM MySQLCursorBufferedDictTests")
exp = {u'id': 1, u'name': u'ham', u'city': u'spam'}
self.assertEqual(exp, self.cur.fetchone())
def test_fetchall(self):
self.check_method(self.cur, 'fetchall')
self.assertRaises(errors.InterfaceError, self.cur.fetchall)
self.cur.execute("INSERT INTO MySQLCursorBufferedDictTests VALUE"
"(%s, %s, %s)", (1, 'ham', 'spam'))
self.cur.execute("SELECT * FROM MySQLCursorBufferedDictTests")
exp = [{u'id': 1, u'name': u'ham', u'city': u'spam'}]
self.assertEqual(exp, self.cur.fetchall())
class MySQLCursorNamedTupleTests(tests.TestsCursor):
def setUp(self):
config = tests.get_mysql_config()
self.connection = connection.MySQLConnection(**config)
self.cur = self.connection.cursor(named_tuple=True)
self.cur.execute('DROP TABLE IF EXISTS MySQLCursorNamedTupleTests')
self.cur.execute('CREATE TABLE MySQLCursorNamedTupleTests(id INT(10),'
'name VARCHAR(20), city VARCHAR(20))')
def tearDown(self):
self.cur.execute('DROP TABLE IF EXISTS MySQLCursorNamedTupleTests')
self.cur.close()
self.connection.close()
def test_fetchone(self):
self.check_method(self.cur, 'fetchone')
self.assertEqual(None, self.cur.fetchone())
self.cur.execute("INSERT INTO MySQLCursorNamedTupleTests VALUES"
"(%s, %s, %s)", (1, 'ham', 'spam'))
self.cur.execute("SELECT * FROM MySQLCursorNamedTupleTests")
named_tuple = namedtuple('Row', ['id', 'name', 'city'])
exp = named_tuple(1, u'ham', u'spam')
row = self.cur.fetchone()
self.assertEqual(exp.id, row.id)
self.assertEqual(exp.name, row.name)
self.assertEqual(exp.city, row.city)
class MySQLCursorBufferedNamedTupleTests(tests.TestsCursor):
def setUp(self):
config = tests.get_mysql_config()
self.connection = connection.MySQLConnection(**config)
self.cur = self.connection.cursor(named_tuple=True, buffered=True)
self.cur.execute('DROP TABLE IF EXISTS '
'MySQLCursorBufferedNamedTupleTests')
self.cur.execute('CREATE TABLE MySQLCursorBufferedNamedTupleTests('
'id INT(10), name VARCHAR(20), city VARCHAR(20))')
def tearDown(self):
self.cur.close()
self.connection.close()
def test_fetchone(self):
self.check_method(self.cur, 'fetchone')
self.assertEqual(None, self.cur.fetchone())
self.cur.execute("INSERT INTO MySQLCursorBufferedNamedTupleTests VALUES"
"(%s, %s, %s)", (1, 'ham', 'spam'))
self.cur.execute("SELECT * FROM MySQLCursorBufferedNamedTupleTests")
named_tuple = namedtuple('Row', ['id', 'name', 'city'])
exp = named_tuple(1, u'ham', u'spam')
row = self.cur.fetchone()
self.assertEqual(exp.id, row.id)
self.assertEqual(exp.name, row.name)
self.assertEqual(exp.city, row.city)
def test_fetchall(self):
self.check_method(self.cur, 'fetchall')
self.assertRaises(errors.InterfaceError, self.cur.fetchall)
self.cur.execute("INSERT INTO MySQLCursorBufferedNamedTupleTests VALUES"
"(%s, %s, %s)", (1, 'ham', 'spam'))
self.cur.execute("SELECT * FROM MySQLCursorBufferedNamedTupleTests")
named_tuple = namedtuple('Row', ['id', 'name', 'city'])
exp = named_tuple(1, u'ham', u'spam')
row = self.cur.fetchall()
self.assertEqual(exp.id, row[0].id)
self.assertEqual(exp.name, row[0].name)
self.assertEqual(exp.city, row[0].city)
| 36.491349
| 80
| 0.574303
|
acfd033cbdfce94e043baacb829129a1c9bbb735
| 8,872
|
py
|
Python
|
server/tests/util/test_file_parser.py
|
drankye/recordservice
|
ced33a1565b7ab3a25f6cb7cdcf623a26e7b3ec0
|
[
"Apache-2.0"
] | null | null | null |
server/tests/util/test_file_parser.py
|
drankye/recordservice
|
ced33a1565b7ab3a25f6cb7cdcf623a26e7b3ec0
|
[
"Apache-2.0"
] | null | null | null |
server/tests/util/test_file_parser.py
|
drankye/recordservice
|
ced33a1565b7ab3a25f6cb7cdcf623a26e7b3ec0
|
[
"Apache-2.0"
] | 2
|
2019-09-22T07:59:28.000Z
|
2021-02-25T21:56:07.000Z
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# This module is used for common utilities related to parsing test files
import collections
import codecs
import logging
import re
from collections import defaultdict
from os.path import isfile, isdir
from tests.common.test_dimensions import TableFormatInfo
LOG = logging.getLogger('impala_test_suite')
# constants
SECTION_DELIMITER = "===="
SUBSECTION_DELIMITER = "----"
# The QueryTestSectionReader provides utility functions that help to parse content
# from a query test file
class QueryTestSectionReader(object):
@staticmethod
def build_query(query_section_text):
"""Build a query by stripping comments and trailing semi-colons."""
query_section_text = remove_comments(query_section_text)
return query_section_text.rstrip(';')
@staticmethod
def get_table_name_components(table_format, table_name, scale_factor=''):
"""
Returns a pair (db_name, tbl_name). If the table_name argument is
fully qualified, return the database name mentioned there,
otherwise get the default db name from the table format and scale
factor.
"""
# If table name is fully qualified return the db prefix
split = table_name.split('.')
assert len(split) <= 2, 'Unexpected table format: %s' % table_name
db_name = split[0] if len(split) == 2 else \
QueryTestSectionReader.get_db_name(table_format, scale_factor)
return (db_name, split[-1])
@staticmethod
def get_db_name(table_format, scale_factor=''):
"""
Get the database name to use.
Database names are dependent on the scale factor, file format, compression type
and compression codec. This method returns the appropriate database name to the
caller based on the table format information provided.
"""
if table_format.file_format == 'text' and table_format.compression_codec == 'none':
suffix = ''
elif table_format.compression_codec == 'none':
suffix = '_%s' % (table_format.file_format)
elif table_format.compression_type == 'record':
suffix = '_%s_record_%s' % (table_format.file_format,
table_format.compression_codec)
else:
suffix = '_%s_%s' % (table_format.file_format, table_format.compression_codec)
dataset = table_format.dataset.replace('-', '')
return dataset + scale_factor + suffix
def remove_comments(section_text):
return '\n'.join([l for l in section_text.split('\n') if not l.strip().startswith('#')])
def parse_query_test_file(file_name, valid_section_names=None, encoding=None):
"""
Reads the specified query test file accepting the given list of valid section names
Uses a default list of valid section names if valid_section_names is None
Returns the result as a list of dictionaries. Each dictionary in the list corresponds
to a test case and each key in the dictionary maps to a section in that test case.
"""
# Update the valid section names as we support other test types
# (ex. planner, data error)
section_names = valid_section_names
if section_names is None:
section_names = ['QUERY', 'RESULTS', 'TYPES', 'LABELS', 'SETUP', 'CATCH', 'ERRORS',
'USER']
return parse_test_file(file_name, section_names, encoding=encoding,
skip_unknown_sections=False)
def parse_table_constraints(constraints_file):
"""Reads a table contraints file, if one exists"""
schema_include = defaultdict(list)
schema_exclude = defaultdict(list)
if not isfile(constraints_file):
LOG.info('No schema constraints file file found')
else:
with open(constraints_file, 'rb') as constraints_file:
for line in constraints_file.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
# Format: table_name:<name>, constraint_type:<type>, table_format:<t1>,<t2>,...
table_name, constraint_type, table_formats =\
[value.split(':')[1].strip() for value in line.split(',', 2)]
if constraint_type == 'restrict_to':
schema_include[table_name.lower()] +=\
map(parse_table_format_constraint, table_formats.split(','))
elif constraint_type == 'exclude':
schema_exclude[table_name.lower()] +=\
map(parse_table_format_constraint, table_formats.split(','))
else:
raise ValueError, 'Unknown constraint type: %s' % constraint_type
return schema_include, schema_exclude
def parse_table_format_constraint(table_format_constraint):
# TODO: Expand how we parse table format constraints to support syntax such as
# a table format string with a wildcard character. Right now we don't do anything.
return table_format_constraint
def parse_test_file(test_file_name, valid_section_names, skip_unknown_sections=True,
encoding=None):
"""
Parses an Impala test file
Test files have the format:
==== <- Section
---- [Name] <- Named subsection
// some text
---- [Name2] <- Named subsection
...
====
The valid section names are passed in to this function. The encoding to use
when reading the data can be specified with the 'encoding' flag.
"""
with open(test_file_name, 'rb') as test_file:
file_data = test_file.read()
if encoding: file_data = file_data.decode(encoding)
return parse_test_file_text(file_data, valid_section_names,
skip_unknown_sections)
def parse_test_file_text(text, valid_section_names, skip_unknown_sections=True):
sections = list()
section_start_regex = re.compile(r'(?m)^%s' % SECTION_DELIMITER)
match = section_start_regex.search(text)
if match is not None:
# Assume anything before the first section (==== tag) is a header and ignore it
text = text[match.start():]
# Split the test file up into sections. For each section, parse all subsections.
for section in section_start_regex.split(text):
parsed_sections = collections.defaultdict(str)
for sub_section in re.split(r'(?m)^%s' % SUBSECTION_DELIMITER, section[1:]):
# Skip empty subsections
if not sub_section:
continue
lines = sub_section.split('\n')
subsection_name = lines[0].strip()
subsection_comment = None
subsection_info = [s.strip() for s in subsection_name.split(':')]
if(len(subsection_info) == 2):
subsection_name, subsection_comment = subsection_info
if subsection_name not in valid_section_names:
if skip_unknown_sections or not subsection_name:
print sub_section
print 'Unknown section %s' % subsection_name
continue
else:
raise RuntimeError, 'Unknown subsection: %s' % subsection_name
if subsection_name == 'QUERY' and subsection_comment:
parsed_sections['QUERY_NAME'] = subsection_comment
if subsection_name == 'RESULTS' and subsection_comment:
for comment in subsection_comment.split(','):
if comment == 'MULTI_LINE':
parsed_sections['MULTI_LINE'] = comment
elif comment.startswith('VERIFY'):
parsed_sections['VERIFIER'] = comment
else:
raise RuntimeError, 'Unknown subsection comment: %s' % comment
parsed_sections[subsection_name] = '\n'.join([line for line in lines[1:-1]])
if parsed_sections:
sections.append(parsed_sections)
return sections
def write_test_file(test_file_name, test_file_sections, encoding=None):
"""
Given a list of test file sections, write out the corresponding test file
This is useful when updating the results of a test.
The file encoding can be specified in the 'encoding' parameter. If not specified
the default system encoding will be used.
"""
with codecs.open(test_file_name, 'w', encoding=encoding) as test_file:
test_file_text = list()
for test_case in test_file_sections:
test_file_text.append(SECTION_DELIMITER)
for section_name, section_value in test_case.items():
# Have to special case query name and verifier because they have annotations
# in the headers
if section_name in ['QUERY_NAME', 'VERIFIER']:
continue
# TODO: We need a more generic way of persisting the old test file.
# Special casing will blow up.
full_section_name = section_name
if section_name == 'QUERY' and test_case.get('QUERY_NAME'):
full_section_name = '%s: %s' % (section_name, test_case['QUERY_NAME'])
if section_name == 'RESULTS' and test_case.get('VERIFIER'):
full_section_name = '%s: %s' % (section_name, test_case['VERIFIER'])
test_file_text.append("%s %s" % (SUBSECTION_DELIMITER, full_section_name))
if test_case[section_name].strip():
test_file_text.append(test_case[section_name])
test_file_text.append(SECTION_DELIMITER)
test_file.write(('\n').join(test_file_text))
| 40.884793
| 90
| 0.701533
|
acfd043094feadc50954a4bbb4c2030bbf002435
| 12,797
|
py
|
Python
|
swagger_client/models/post_ont.py
|
parzingis/corpy
|
638dedb3eaa619046d3c3fb9652f9e82800a8557
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/models/post_ont.py
|
parzingis/corpy
|
638dedb3eaa619046d3c3fb9652f9e82800a8557
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/models/post_ont.py
|
parzingis/corpy
|
638dedb3eaa619046d3c3fb9652f9e82800a8557
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
ORR API Documentation
The main ORR documentation is located at: https://mmisw.org/orrdoc/ __Please note__: - The ORR API is approaching a stable version but is still work in progress. Please [let us know](https://github.com/mmisw/mmiorr-docs/issues) if you have any questions or suggestions. - Besides the documentation itself, this page lets you directly exercise and test the API. Click on any operation header below to learn more details about it, and see a \"Try it out\" button. - You can click on the \"Authorize\" button at the top right of this page (or the `!` icon under the particular operation) to retrieve an authentication token corresponding to your ORR instance credentials (username and password). Once authorized, the authentication token will be automatically included in the corresponding request. You will be able to not only perform the basic `GET` operations, but also see expanded responses according to your access privileges as well as perform other operations. - The \"Try it out\" button will also show the corresponding API call that you can submit from the command line using [`curl`](https://curl.haxx.se/). - This API includes administrative operations related with the triple store. The SPARQL endpoint itself (located at `http://cor.esipfed.org/sparql` for the MMI ORR instance) is not described here. (General SPARQL information can be found [here](https://en.wikipedia.org/wiki/SPARQL), and regarding the current service used by the ORR to support the SPARQL interface [here](http://franz.com/agraph/support/documentation/current/http-protocol.html).) - Actual requests from this page are against the specific endpoint at `http://cor.esipfed.org/ont`.
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PostOnt(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'iri': 'str',
'original_iri': 'str',
'name': 'str',
'org_name': 'str',
'visibility': 'str',
'status': 'str',
'user_name': 'str',
'uploaded_filename': 'str',
'uploaded_format': 'str',
'contents': 'str',
'format': 'str'
}
attribute_map = {
'iri': 'iri',
'original_iri': 'originalIri',
'name': 'name',
'org_name': 'orgName',
'visibility': 'visibility',
'status': 'status',
'user_name': 'userName',
'uploaded_filename': 'uploadedFilename',
'uploaded_format': 'uploadedFormat',
'contents': 'contents',
'format': 'format'
}
def __init__(self, iri=None, original_iri=None, name=None, org_name=None, visibility=None, status=None, user_name=None, uploaded_filename=None, uploaded_format=None, contents=None, format=None):
"""
PostOnt - a model defined in Swagger
"""
self._iri = None
self._original_iri = None
self._name = None
self._org_name = None
self._visibility = None
self._status = None
self._user_name = None
self._uploaded_filename = None
self._uploaded_format = None
self._contents = None
self._format = None
if iri is not None:
self.iri = iri
if original_iri is not None:
self.original_iri = original_iri
if name is not None:
self.name = name
if org_name is not None:
self.org_name = org_name
if visibility is not None:
self.visibility = visibility
if status is not None:
self.status = status
if user_name is not None:
self.user_name = user_name
if uploaded_filename is not None:
self.uploaded_filename = uploaded_filename
if uploaded_format is not None:
self.uploaded_format = uploaded_format
if contents is not None:
self.contents = contents
if format is not None:
self.format = format
@property
def iri(self):
"""
Gets the iri of this PostOnt.
The IRI of the ontology.
:return: The iri of this PostOnt.
:rtype: str
"""
return self._iri
@iri.setter
def iri(self, iri):
"""
Sets the iri of this PostOnt.
The IRI of the ontology.
:param iri: The iri of this PostOnt.
:type: str
"""
self._iri = iri
@property
def original_iri(self):
"""
Gets the original_iri of this PostOnt.
In case of a fully-hosted registration, enter this field to indicate the original IRI in the provided contents to be used for the \"migration\" of corresponding entities to the new IRI.
:return: The original_iri of this PostOnt.
:rtype: str
"""
return self._original_iri
@original_iri.setter
def original_iri(self, original_iri):
"""
Sets the original_iri of this PostOnt.
In case of a fully-hosted registration, enter this field to indicate the original IRI in the provided contents to be used for the \"migration\" of corresponding entities to the new IRI.
:param original_iri: The original_iri of this PostOnt.
:type: str
"""
self._original_iri = original_iri
@property
def name(self):
"""
Gets the name of this PostOnt.
The name for the ontology. If omitted, the ORR will try to get this information from standard metadata in the submitted ontology contents.
:return: The name of this PostOnt.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this PostOnt.
The name for the ontology. If omitted, the ORR will try to get this information from standard metadata in the submitted ontology contents.
:param name: The name of this PostOnt.
:type: str
"""
self._name = name
@property
def org_name(self):
"""
Gets the org_name of this PostOnt.
ID of the organization that will own the ontology registration. If omitted, the owner will be the submitting user.
:return: The org_name of this PostOnt.
:rtype: str
"""
return self._org_name
@org_name.setter
def org_name(self, org_name):
"""
Sets the org_name of this PostOnt.
ID of the organization that will own the ontology registration. If omitted, the owner will be the submitting user.
:param org_name: The org_name of this PostOnt.
:type: str
"""
self._org_name = org_name
@property
def visibility(self):
"""
Gets the visibility of this PostOnt.
One of: `owner` or `public`. The default visibility is `owner`.
:return: The visibility of this PostOnt.
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""
Sets the visibility of this PostOnt.
One of: `owner` or `public`. The default visibility is `owner`.
:param visibility: The visibility of this PostOnt.
:type: str
"""
self._visibility = visibility
@property
def status(self):
"""
Gets the status of this PostOnt.
One of: `draft`, `unstable`, `testing`, `stable`, `deprecated`, `archaic`. There's no default value.
:return: The status of this PostOnt.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this PostOnt.
One of: `draft`, `unstable`, `testing`, `stable`, `deprecated`, `archaic`. There's no default value.
:param status: The status of this PostOnt.
:type: str
"""
self._status = status
@property
def user_name(self):
"""
Gets the user_name of this PostOnt.
Registered user making the request.
:return: The user_name of this PostOnt.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""
Sets the user_name of this PostOnt.
Registered user making the request.
:param user_name: The user_name of this PostOnt.
:type: str
"""
self._user_name = user_name
@property
def uploaded_filename(self):
"""
Gets the uploaded_filename of this PostOnt.
Name of file previously uploaded via prior `POST /ont/upload` request.
:return: The uploaded_filename of this PostOnt.
:rtype: str
"""
return self._uploaded_filename
@uploaded_filename.setter
def uploaded_filename(self, uploaded_filename):
"""
Sets the uploaded_filename of this PostOnt.
Name of file previously uploaded via prior `POST /ont/upload` request.
:param uploaded_filename: The uploaded_filename of this PostOnt.
:type: str
"""
self._uploaded_filename = uploaded_filename
@property
def uploaded_format(self):
"""
Gets the uploaded_format of this PostOnt.
Format of the file previously uploaded via prior `POST /ont/upload` request.
:return: The uploaded_format of this PostOnt.
:rtype: str
"""
return self._uploaded_format
@uploaded_format.setter
def uploaded_format(self, uploaded_format):
"""
Sets the uploaded_format of this PostOnt.
Format of the file previously uploaded via prior `POST /ont/upload` request.
:param uploaded_format: The uploaded_format of this PostOnt.
:type: str
"""
self._uploaded_format = uploaded_format
@property
def contents(self):
"""
Gets the contents of this PostOnt.
Direct contents of the ontology.
:return: The contents of this PostOnt.
:rtype: str
"""
return self._contents
@contents.setter
def contents(self, contents):
"""
Sets the contents of this PostOnt.
Direct contents of the ontology.
:param contents: The contents of this PostOnt.
:type: str
"""
self._contents = contents
@property
def format(self):
"""
Gets the format of this PostOnt.
Format of the `contents`.
:return: The format of this PostOnt.
:rtype: str
"""
return self._format
@format.setter
def format(self, format):
"""
Sets the format of this PostOnt.
Format of the `contents`.
:param format: The format of this PostOnt.
:type: str
"""
self._format = format
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PostOnt):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.519704
| 1,710
| 0.600375
|
acfd04f5ca72966fe3d7f94292c4f47d996e6ecc
| 6,855
|
py
|
Python
|
code/Python/poisson.py
|
anemptyarchive/Probability-Distribution
|
c44d1079051c0e079c4f009e6fb2d7e1d7b61011
|
[
"MIT"
] | null | null | null |
code/Python/poisson.py
|
anemptyarchive/Probability-Distribution
|
c44d1079051c0e079c4f009e6fb2d7e1d7b61011
|
[
"MIT"
] | null | null | null |
code/Python/poisson.py
|
anemptyarchive/Probability-Distribution
|
c44d1079051c0e079c4f009e6fb2d7e1d7b61011
|
[
"MIT"
] | null | null | null |
# ポアソン分布
# 利用するライブラリ
import numpy as np
from scipy.stats import poisson # ポアソン分布
from scipy.special import gamma, loggamma # ガンマ関数, 対数ガンマ関数
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
#%%
### 確率の計算
# パラメータを指定
lmd = 4.0
# 確率変数の値を指定
x = 2.0
# 定義式により確率を計算
prob = lmd**x / gamma(x + 1.0) * np.exp(-lmd)
print(prob)
# 対数をとった定義式により確率を計算
log_prob = x * np.log(lmd) - loggamma(x + 1.0) - lmd
prob = np.exp(log_prob)
print(prob, log_prob)
# ポアソン分布の関数により確率を計算
prob = poisson.pmf(k=x, mu=lmd)
print(prob)
# ポアソン分布の対数をとった関数により確率を計算
log_prob = poisson.logpmf(k=x, mu=lmd)
prob = np.exp(log_prob)
print(prob, log_prob)
#%%
### 統計量の計算
# パラメータを指定
lmd = 4.0
# 計算式により平均を計算
E_x = lmd
print(E_x)
# 計算式により分散を計算
V_x = lmd
print(V_x)
# ポアソン分布の関数により平均を計算
E_x = poisson.mean(mu=lmd)
print(E_x)
# ポアソン分布の関数により分散を計算
V_x = poisson.var(mu=lmd)
print(V_x)
#%%
### 分布の可視化
## 分布の計算
# パラメータを指定
lmd = 4.0
# 作図用のxの点を作成
x_vals = np.arange(np.ceil(lmd) * 4.0)
# ポアソン分布を計算
probability = poisson.pmf(k=x_vals, mu=lmd)
#%%
## 分布の作図
# ポアソン分布を作図
plt.figure(figsize=(12, 9)) # 図の設定
plt.bar(x=x_vals, height=probability, color='#00A968') # 棒グラフ
plt.xlabel('x') # x軸ラベル
plt.ylabel('probability') # y軸ラベル
plt.suptitle('Poisson Distribution', fontsize=20) # 全体のタイトル
plt.title('$\lambda=' + str(lmd) + '$', loc='left') # タイトル
plt.xticks(ticks=x_vals) # x軸目盛
plt.grid() # グリッド線
plt.show() # 描画
#%%
# 統計量を計算
E_x = lmd
s_x = np.sqrt(lmd)
# 統計量を重ねたポアソン分布を作図
plt.figure(figsize=(12, 9)) # 図の設定
plt.bar(x=x_vals, height=probability, color='#00A968') # 分布
plt.vlines(x=E_x, ymin=0.0, ymax=probability.max(), color='orange', linewidth=2.5, linestyle='--', label='$E[x]$') # 平均
plt.vlines(x=E_x - s_x, ymin=0.0, ymax=probability.max(), color='orange', linewidth=2.5, linestyle=':', label='$E[x] \pm \\sqrt{V[x]}$') # 平均 - 標準偏差
plt.vlines(x=E_x + s_x, ymin=0.0, ymax=probability.max(), color='orange', linewidth=2.5, linestyle=':') # 平均 + 標準偏差
plt.xlabel('x') # x軸ラベル
plt.ylabel('probability') # y軸ラベル
plt.suptitle('Poisson Distribution', fontsize=20)# 全体のタイトル
plt.title('$\lambda=' + str(lmd) + '$', loc='left') # タイトル
plt.xticks(ticks=x_vals) # x軸目盛
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show() # 描画
#%%
### パラメータと分布の形状の関係
# lambdaとして利用する値を指定
lambda_vals = np.arange(start=0.0, stop=10.1, step=0.1)
print(len(lambda_vals)) # フレーム数
# 作図用のxの点を作成
x_vals = np.arange(np.ceil(lambda_vals.max()) * 2.0)
# y軸(確率)の最大値を設定
prob_max = np.max(poisson.pmf(k=x_vals, mu=lambda_vals.min())) + 0.1
#prob_max = 0.5
# 図を初期化
fig = plt.figure(figsize=(12, 9)) # 図の設定
fig.suptitle('Poisson Distribution', fontsize=20)# 全体のタイトル
# 作図処理を関数として定義
def update(i):
# 前フレームのグラフを初期化
plt.cla()
# i回目のパラメータを取得
lmd = lambda_vals[i]
# ポアソン分布を計算
probability = poisson.pmf(k=x_vals, mu=lmd)
# ポアソン分布を作図
plt.bar(x=x_vals, height=probability, color='#00A968') # 棒グラフ
plt.xlabel('x') # x軸ラベル
plt.ylabel('probability') # y軸ラベル
plt.title('$\lambda=' + str(np.round(lmd, 1)) + '$', loc='left') # タイトル
plt.xticks(ticks=x_vals) # x軸目盛
plt.grid() # グリッド線
plt.ylim(ymin=0.0, ymax=prob_max) # y軸の表示範囲
# gif画像を作成
anime_prob = FuncAnimation(fig, update, frames=len(lambda_vals), interval=100)
# gif画像を保存
anime_prob.save('ProbabilityDistribution/Poisson_prob.gif')
#%%
### 乱数の生成
## 乱数の生成
# パラメータを指定
lmd = 4.0
# データ数(サンプルサイズ)を指定
N = 1000
# ポアソン分布に従う乱数を生成
x_n = np.random.poisson(lam=lmd, size=N)
# 作図用のxの点を作成
x_vals = np.arange(x_n.max() + 5.0)
# 乱数を集計
frequency = np.array([np.sum(x_n == m) for m in x_vals])
# ポアソン分布を計算
probability = poisson.pmf(k=x_vals, mu=lmd)
#%%
## 乱数の可視化
# サンプルのヒストグラムを作成
plt.figure(figsize=(12, 9)) # 図の設定
plt.bar(x=x_vals, height=frequency, color='#00A968') # ヒストグラム
plt.xlabel('x') # x軸ラベル
plt.ylabel('frequency') # y軸ラベル
plt.suptitle('Poisson Distribution', fontsize=20)# 全体のタイトル
plt.title('$\lambda=' + str(lmd) + ', N=' + str(N) +
'=(' + ', '.join([str(f) for f in frequency]) + ')$', loc='left') # タイトル
plt.xticks(ticks=x_vals) # x軸目盛
plt.grid() # グリッド線
plt.show() # 描画
#%%
# サンプルの構成比を作図
plt.figure(figsize=(12, 9)) # 図の設定
plt.bar(x=x_vals, height=probability, color='white', edgecolor='green', linestyle='--') # 元の分布
plt.bar(x=x_vals, height=frequency / N, color='#00A968', alpha=0.8) # 構成比
plt.xlabel('x') # x軸ラベル
plt.ylabel('proportion') # y軸ラベル
plt.suptitle('Poisson Distribution', fontsize=20)# 全体のタイトル
plt.title('$\lambda=' + str(lmd) + ', N=' + str(N) +
'=(' + ', '.join([str(f) for f in frequency]) + ')$', loc='left') # タイトル
plt.xticks(ticks=x_vals) # x軸目盛
plt.grid() # グリッド線
plt.show() # 描画
#%%
## アニメーションによる可視化:(頻度)
# フレーム数を指定
N_frame = 100
# 図を初期化
fig = plt.figure(figsize=(12, 9)) # 図の設定
fig.suptitle('Poisson Distribution', fontsize=20)# 全体のタイトル
# y軸(頻度)の最大値を設定
freq_max = np.max([np.sum(x_n[:N_frame] == m) for m in x_vals]) + 1.0
# 作図処理を関数として定義
def update(n):
# 前フレームのグラフを初期化
plt.cla()
# n個の乱数を集計
frequency = np.array([np.sum(x_n[:(n+1)] == m) for m in x_vals])
# サンプルのヒストグラムを作成
plt.bar(x=x_vals, height=frequency, color='#00A968', zorder=1) # ヒストグラム
plt.scatter(x=x_n[n], y=0.0, s=100, c='orange', zorder=2) # サンプル
plt.xlabel('x') # x軸ラベル
plt.ylabel('frequency') # y軸ラベル
plt.title('$\lambda=' + str(lmd) + ', N=' + str(n + 1) +
'=(' + ', '.join([str(f) for f in frequency]) + ')$', loc='left') # タイトル
plt.xticks(ticks=x_vals) # x軸目盛
plt.grid() # グリッド線
plt.ylim(ymin=-0.5, ymax=freq_max) # y軸の表示範囲
# gif画像を作成
anime_freq = FuncAnimation(fig, update, frames=N_frame, interval=100)
# gif画像を保存
anime_freq.save('ProbabilityDistribution/Poisson_freq.gif')
#%%
## アニメーションによる可視化:(構成比)
# フレーム数を指定
N_frame = 100
# 図を初期化
fig = plt.figure(figsize=(12, 9)) # 図の設定
fig.suptitle('Poisson Distribution', fontsize=20)# 全体のタイトル
# y軸(割合)の最大値を設定
prop_max = np.max([np.sum(x_n[:N_frame] == m) for m in x_vals]) / N_frame + 0.1
# 作図処理を関数として定義
def update(n):
# 前フレームのグラフを初期化
plt.cla()
# n個の乱数を集計
frequency = np.array([np.sum(x_n[:(n+1)] == m) for m in x_vals])
# サンプルのヒストグラムを作成
plt.bar(x=x_vals, height=probability, color='white', edgecolor='green', linestyle='--', zorder=1) # 元の分布
plt.bar(x=x_vals, height=frequency / (n + 1), color='#00A968', alpha=0.8, zorder=2) # 構成比
plt.scatter(x=x_n[n], y=0.0, s=100, c='orange', zorder=3) # サンプル
plt.xlabel('x') # x軸ラベル
plt.ylabel('proportion') # y軸ラベル
plt.title('$\lambda=' + str(lmd) + ', N=' + str(n + 1) +
'=(' + ', '.join([str(f) for f in frequency]) + ')$', loc='left') # タイトル
plt.xticks(ticks=x_vals) # x軸目盛
plt.grid() # グリッド線
plt.ylim(ymin=-0.01, ymax=prop_max) # y軸の表示範囲
# gif画像を作成
anime_prop = FuncAnimation(fig, update, frames=N_frame, interval=100)
# gif画像を保存
anime_prop.save('ProbabilityDistribution/Poisson_prop.gif')
#%%
| 23.237288
| 148
| 0.646389
|
acfd05866abfa666181cc752925739a0dea67dc9
| 2,045
|
py
|
Python
|
V2/adb.py
|
seb97979797/Rok
|
767b8d8a4f5a9df2d11007732b01e41008e38c59
|
[
"MIT"
] | 63
|
2021-02-25T13:00:19.000Z
|
2022-03-29T21:18:47.000Z
|
adb.py
|
aDwCarrazzone/Rise-of-Kingdoms-Bot
|
51c39501ea038037df7a63067cfb3b65b5f67847
|
[
"MIT"
] | 93
|
2021-02-27T10:03:15.000Z
|
2022-03-30T07:29:34.000Z
|
adb.py
|
aDwCarrazzone/Rise-of-Kingdoms-Bot
|
51c39501ea038037df7a63067cfb3b65b5f67847
|
[
"MIT"
] | 35
|
2021-03-12T07:23:29.000Z
|
2022-03-13T15:04:06.000Z
|
from ppadb.client import Client as PPADBClient
from utils import resource_path
from utils import build_command
from filepath.file_relative_paths import FilePaths
import subprocess
import traceback
bridge = None
class Adb:
def __init__(self, host='127.0.0.1', port=5037):
self.client = PPADBClient(host, port)
def connect_to_device(self, host='127.0.0.1', port=5555):
adb_path = resource_path(FilePaths.ADB_EXE_PATH.value)
cmd = build_command(adb_path, 'connect', "{}:{}".format(host, port))
ret = subprocess.check_output(cmd, shell=True, stderr=subprocess.PIPE, encoding="utf-8", timeout=2)
return self.get_device(host, port)
def get_client_devices(self):
return self.client.devices()
def get_device(self, host='127.0.0.1', port=5555):
device = self.client.device('{}:{}'.format(host, port))
try:
if device is None:
self.connect_to_device(host, port)
device = self.client.device('{}:{}'.format(host, port))
except Exception as e:
traceback.print_exc()
return None
return device
def enable_adb(host='127.0.0.1', port=5037):
adb = None
try:
adb = Adb(host, port)
version = adb.client.version()
if version != 41:
raise RuntimeError('Error: require adb version 41, but version is {}'.format(version))
except RuntimeError as err:
adb_path = resource_path(FilePaths.ADB_EXE_PATH.value)
ret = subprocess.run(build_command(adb_path, '-P', str(port), 'kill-server', host), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
ret = subprocess.run(build_command(adb_path, '-P', str(port), 'connect', host), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if ret.returncode != 0:
raise RuntimeError('Error: fail to start adb server. \n({})'.format(ret))
return adb
| 32.983871
| 107
| 0.630807
|
acfd05975854b10643e07f66d8e0b7caae2d1bd4
| 4,907
|
py
|
Python
|
PYTHON/python-datastructures/algorithms/algorithms/graph/maximum_flow.py
|
Web-Dev-Collaborative/DS-ALGO-OFFICIAL
|
6d7195d33c28a0fe22f12231efffb39f4bf05c97
|
[
"Apache-2.0"
] | 11
|
2021-02-18T04:53:44.000Z
|
2022-01-16T10:57:39.000Z
|
PYTHON/python-datastructures/algorithms/algorithms/graph/maximum_flow.py
|
Web-Dev-Collaborative/DS-ALGO-OFFICIAL
|
6d7195d33c28a0fe22f12231efffb39f4bf05c97
|
[
"Apache-2.0"
] | 162
|
2021-03-09T01:52:11.000Z
|
2022-03-12T01:09:07.000Z
|
PYTHON/python-datastructures/algorithms/algorithms/graph/maximum_flow.py
|
Web-Dev-Collaborative/DS-ALGO-OFFICIAL
|
6d7195d33c28a0fe22f12231efffb39f4bf05c97
|
[
"Apache-2.0"
] | 8
|
2021-02-18T05:12:34.000Z
|
2022-03-06T19:02:14.000Z
|
"""
Given the capacity, source and sink of a graph,
computes the maximum flow from source to sink.
Input : capacity, source, sink
Output : maximum flow from source to sink
Capacity is a two-dimensional array that is v*v.
capacity[i][j] implies the capacity of the edge from i to j.
If there is no edge from i to j, capacity[i][j] should be zero.
"""
import queue
def dfs(capacity, flow, visit, vertices, idx, sink, current_flow=1 << 63):
# DFS function for ford_fulkerson algorithm.
if idx == sink:
return current_flow
visit[idx] = True
for nxt in range(vertices):
if not visit[nxt] and flow[idx][nxt] < capacity[idx][nxt]:
tmp = dfs(
capacity,
flow,
visit,
vertices,
nxt,
sink,
min(current_flow, capacity[idx][nxt] - flow[idx][nxt]),
)
if tmp:
flow[idx][nxt] += tmp
flow[nxt][idx] -= tmp
return tmp
return 0
def ford_fulkerson(capacity, source, sink):
# Computes maximum flow from source to sink using DFS.
# Time Complexity : O(Ef)
# E is the number of edges and f is the maximum flow in the graph.
vertices = len(capacity)
ret = 0
flow = [[0] * vertices for i in range(vertices)]
while True:
visit = [False for i in range(vertices)]
tmp = dfs(capacity, flow, visit, vertices, source, sink)
if tmp:
ret += tmp
else:
break
return ret
def edmonds_karp(capacity, source, sink):
# Computes maximum flow from source to sink using BFS.
# Time complexity : O(V*E^2)
# V is the number of vertices and E is the number of edges.
vertices = len(capacity)
ret = 0
flow = [[0] * vertices for i in range(vertices)]
while True:
tmp = 0
q = queue.Queue()
visit = [False for i in range(vertices)]
par = [-1 for i in range(vertices)]
visit[source] = True
q.put((source, 1 << 63))
# Finds new flow using BFS.
while q.qsize():
front = q.get()
idx, current_flow = front
if idx == sink:
tmp = current_flow
break
for nxt in range(vertices):
if not visit[nxt] and flow[idx][nxt] < capacity[idx][nxt]:
visit[nxt] = True
par[nxt] = idx
q.put((nxt, min(current_flow, capacity[idx][nxt] - flow[idx][nxt])))
if par[sink] == -1:
break
ret += tmp
parent = par[sink]
idx = sink
# Update flow array following parent starting from sink.
while parent != -1:
flow[parent][idx] += tmp
flow[idx][parent] -= tmp
idx = parent
parent = par[parent]
return ret
def dinic_bfs(capacity, flow, level, source, sink):
# BFS function for Dinic algorithm.
# Check whether sink is reachable only using edges that is not full.
vertices = len(capacity)
q = queue.Queue()
q.put(source)
level[source] = 0
while q.qsize():
front = q.get()
for nxt in range(vertices):
if level[nxt] == -1 and flow[front][nxt] < capacity[front][nxt]:
level[nxt] = level[front] + 1
q.put(nxt)
return level[sink] != -1
def dinic_dfs(capacity, flow, level, idx, sink, work, current_flow=1 << 63):
# DFS function for Dinic algorithm.
# Finds new flow using edges that is not full.
if idx == sink:
return current_flow
vertices = len(capacity)
while work[idx] < vertices:
nxt = work[idx]
if level[nxt] == level[idx] + 1 and flow[idx][nxt] < capacity[idx][nxt]:
tmp = dinic_dfs(
capacity,
flow,
level,
nxt,
sink,
work,
min(current_flow, capacity[idx][nxt] - flow[idx][nxt]),
)
if tmp > 0:
flow[idx][nxt] += tmp
flow[nxt][idx] -= tmp
return tmp
work[idx] += 1
return 0
def dinic(capacity, source, sink):
# Computes maximum flow from source to sink using Dinic algorithm.
# Time complexity : O(V^2*E)
# V is the number of vertices and E is the number of edges.
vertices = len(capacity)
flow = [[0] * vertices for i in range(vertices)]
ret = 0
while True:
level = [-1 for i in range(vertices)]
work = [0 for i in range(vertices)]
if not dinic_bfs(capacity, flow, level, source, sink):
break
while True:
tmp = dinic_dfs(capacity, flow, level, source, sink, work)
if tmp > 0:
ret += tmp
else:
break
return ret
| 31.455128
| 88
| 0.53495
|
acfd059cdca7a6de4f1774f9dba8b62de69e7097
| 14,659
|
py
|
Python
|
lib/flows/general/registry_test.py
|
darrenbilby/grr
|
aa8628b15d197f8c4541d1d24b76715bdddcac44
|
[
"Apache-2.0"
] | 1
|
2015-01-07T05:29:57.000Z
|
2015-01-07T05:29:57.000Z
|
lib/flows/general/registry_test.py
|
darrenbilby/grr
|
aa8628b15d197f8c4541d1d24b76715bdddcac44
|
[
"Apache-2.0"
] | null | null | null |
lib/flows/general/registry_test.py
|
darrenbilby/grr
|
aa8628b15d197f8c4541d1d24b76715bdddcac44
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for the registry flows."""
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.flows.general import file_finder
# pylint: disable=unused-import
from grr.lib.flows.general import registry
# pylint: enable=unused-import
from grr.lib.flows.general import transfer
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
class RegistryFlowTest(test_lib.FlowTestsBaseclass):
def setUp(self):
super(RegistryFlowTest, self).setUp()
self.vfs_overrider = test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.REGISTRY, test_lib.FakeRegistryVFSHandler)
self.vfs_overrider.Start()
def tearDown(self):
super(RegistryFlowTest, self).tearDown()
self.vfs_overrider.Stop()
class TestRegistryFinderFlow(RegistryFlowTest):
"""Tests for the RegistryFinder flow."""
def setUp(self):
super(TestRegistryFinderFlow, self).setUp()
self.output_path = "analysis/file_finder"
self.client_mock = action_mocks.ActionMock(
"Find", "TransferBuffer", "HashBuffer", "FingerprintFile",
"FingerprintFile", "Grep", "StatFile")
def RunFlow(self, keys_paths=None, conditions=None):
if keys_paths is None:
keys_paths = ["HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"]
if conditions is None:
conditions = []
for _ in test_lib.TestFlowHelper(
"RegistryFinder", self.client_mock, client_id=self.client_id,
keys_paths=keys_paths, conditions=conditions,
token=self.token, output=self.output_path):
pass
def AssertNoResults(self):
self.assertRaises(aff4.InstantiationError, aff4.FACTORY.Open,
self.client_id.Add(self.output_path),
aff4_type="RDFValueCollection",
token=self.token)
def GetResults(self):
fd = aff4.FACTORY.Open(self.client_id.Add(self.output_path),
aff4_type="RDFValueCollection",
token=self.token)
return list(fd)
def testFindsNothingIfNothingMatchesTheGlob(self):
self.RunFlow(["HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/NonMatch*"])
self.AssertNoResults()
def testFindsKeysWithSingleGlobWithoutConditions(self):
self.RunFlow(["HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"])
results = self.GetResults()
self.assertEqual(len(results), 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
self.assertTrue([r for r in results
if r.stat_entry.aff4path.Basename() == "Sidebar"])
self.assertTrue([r for r in results
if r.stat_entry.aff4path.Basename() == "MctAdmin"])
def testFindsKeysWithTwoGlobsWithoutConditions(self):
self.RunFlow(["HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Side*",
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Mct*"])
results = self.GetResults()
self.assertEqual(len(results), 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
self.assertTrue([r for r in results
if r.stat_entry.aff4path.Basename() == "Sidebar"])
self.assertTrue([r for r in results
if r.stat_entry.aff4path.Basename() == "MctAdmin"])
def testFindsKeyWithInterpolatedGlobWithoutConditions(self):
# Initialize client's knowledge base in order for the interpolation
# to work.
user = rdf_client.KnowledgeBaseUser(
sid="S-1-5-21-2911950750-476812067-1487428992-1001")
kb = rdf_client.KnowledgeBase(users=[user])
with aff4.FACTORY.Open(
self.client_id, mode="rw", token=self.token) as client:
client.Set(client.Schema.KNOWLEDGE_BASE, kb)
self.RunFlow(["HKEY_USERS/%%users.sid%%/Software/Microsoft/Windows/"
"CurrentVersion/*"])
results = self.GetResults()
self.assertEqual(len(results), 1)
key = ("/HKEY_USERS/S-1-5-21-2911950750-476812067-1487428992-1001/"
"Software/Microsoft/Windows/CurrentVersion/Explorer")
self.assertEqual(results[0].stat_entry.aff4path,
"aff4:/C.1000000000000000/registry" + key)
self.assertEqual(results[0].stat_entry.pathspec.path, key)
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfNothingMatchesLiteralMatchCondition(self):
value_literal_match = file_finder.FileFinderContentsLiteralMatchCondition(
literal="CanNotFindMe")
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.VALUE_LITERAL_MATCH,
value_literal_match=value_literal_match)])
self.AssertNoResults()
def testFindsKeyIfItMatchesLiteralMatchCondition(self):
value_literal_match = file_finder.FileFinderContentsLiteralMatchCondition(
literal="Windows Sidebar\\Sidebar.exe")
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.VALUE_LITERAL_MATCH,
value_literal_match=value_literal_match)])
results = self.GetResults()
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].matches), 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(results[0].stat_entry.aff4path,
"aff4:/C.1000000000000000/registry/HKEY_USERS/S-1-5-20/"
"Software/Microsoft/Windows/CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.path,
"/HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/"
"CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfRegexMatchesNothing(self):
value_regex_match = file_finder.FileFinderContentsRegexMatchCondition(
regex=".*CanNotFindMe.*")
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)])
self.AssertNoResults()
def testFindsKeyIfItMatchesRegexMatchCondition(self):
value_regex_match = file_finder.FileFinderContentsRegexMatchCondition(
regex="Windows.+\\.exe")
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)])
results = self.GetResults()
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].matches), 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(results[0].stat_entry.aff4path,
"aff4:/C.1000000000000000/registry/HKEY_USERS/S-1-5-20/"
"Software/Microsoft/Windows/CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.path,
"/HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/"
"CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfModiciationTimeConditionMatchesNothing(self):
modification_time = file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime().FromSecondsFromEpoch(0),
max_last_modified_time=rdfvalue.RDFDatetime().FromSecondsFromEpoch(1))
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.MODIFICATION_TIME,
modification_time=modification_time)])
self.AssertNoResults()
def testFindsKeysIfModificationTimeConditionMatches(self):
modification_time = file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=
rdfvalue.RDFDatetime().FromSecondsFromEpoch(1247546054 - 1),
max_last_modified_time=
rdfvalue.RDFDatetime().FromSecondsFromEpoch(1247546054 + 1))
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.MODIFICATION_TIME,
modification_time=modification_time)])
results = self.GetResults()
self.assertEqual(len(results), 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
self.assertTrue([r for r in results
if r.stat_entry.aff4path.Basename() == "Sidebar"])
self.assertTrue([r for r in results
if r.stat_entry.aff4path.Basename() == "MctAdmin"])
def testFindsKeyWithLiteralAndModificaitonTimeConditions(self):
modification_time = file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=
rdfvalue.RDFDatetime().FromSecondsFromEpoch(1247546054 - 1),
max_last_modified_time=
rdfvalue.RDFDatetime().FromSecondsFromEpoch(1247546054 + 1))
value_literal_match = file_finder.FileFinderContentsLiteralMatchCondition(
literal="Windows Sidebar\\Sidebar.exe")
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.MODIFICATION_TIME,
modification_time=modification_time),
registry.RegistryFinderCondition(
condition_type=
registry.RegistryFinderCondition.Type.VALUE_LITERAL_MATCH,
value_literal_match=value_literal_match)])
results = self.GetResults()
self.assertEqual(len(results), 1)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
self.assertEqual(results[0].stat_entry.aff4path,
"aff4:/C.1000000000000000/registry/HKEY_USERS/S-1-5-20/"
"Software/Microsoft/Windows/CurrentVersion/Run/Sidebar")
def testSizeCondition(self):
# There are two values, one is 20 bytes, the other 53.
self.RunFlow(
["HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"],
[registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type.SIZE,
size=file_finder.FileFinderSizeCondition(min_file_size=50))])
results = self.GetResults()
self.assertEqual(len(results), 1)
self.assertGreater(results[0].stat_entry.st_size, 50)
class TestRegistryFlows(RegistryFlowTest):
"""Test the Run Key and MRU registry flows."""
def testRegistryMRU(self):
"""Test that the MRU discovery flow. Flow is a work in Progress."""
# Mock out the Find client action.
client_mock = action_mocks.ActionMock("Find")
# Add some user accounts to this client.
fd = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
users = fd.Schema.USER()
users.Append(rdf_client.User(
username="testing", domain="testing-PC",
homedir=r"C:\Users\testing", sid="S-1-5-21-2911950750-476812067-"
"1487428992-1001"))
fd.Set(users)
fd.Close()
# Run the flow in the emulated way.
for _ in test_lib.TestFlowHelper("GetMRU", client_mock,
client_id=self.client_id,
token=self.token):
pass
# Check that the key was read.
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add(
"registry/HKEY_USERS/S-1-5-21-2911950750-476812067-1487428992-1001/"
"Software/Microsoft/Windows/CurrentVersion/Explorer/"
"ComDlg32/OpenSavePidlMRU/dd/0"), token=self.token)
self.assertEqual(fd.__class__.__name__, "VFSFile")
s = fd.Get(fd.Schema.STAT)
# TODO(user): Make this test better when the MRU flow is complete.
self.assertTrue(s.registry_data)
def testCollectRunKeyBinaries(self):
"""Read Run key from the client_fixtures to test parsing and storage."""
test_lib.ClientFixture(self.client_id, token=self.token)
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
with test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, test_lib.FakeFullVFSHandler):
client_mock = action_mocks.ActionMock(
"TransferBuffer", "StatFile", "Find",
"HashBuffer", "FingerprintFile", "ListDirectory")
# Get KB initialized
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
with test_lib.Instrument(
transfer.MultiGetFile, "Start") as getfile_instrument:
# Run the flow in the emulated way.
for _ in test_lib.TestFlowHelper(
"CollectRunKeyBinaries", client_mock, client_id=self.client_id,
token=self.token):
pass
# Check MultiGetFile got called for our runkey file
download_requested = False
for pathspec in getfile_instrument.args[0][0].args.pathspecs:
if pathspec.path == u"C:\\Windows\\TEMP\\A.exe":
download_requested = True
self.assertTrue(download_requested)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 40.271978
| 80
| 0.684426
|
acfd06aaf00363ad811792f888488cc57ca24ff2
| 3,231
|
py
|
Python
|
BiomechData.py
|
DrDanParker/Biomechanical-Pandas
|
627fef4481e5af3e05894bd0096fc1862697a906
|
[
"MIT"
] | 1
|
2018-12-31T11:59:55.000Z
|
2018-12-31T11:59:55.000Z
|
BiomechData.py
|
DrDanParker/Biomechanical-Pandas
|
627fef4481e5af3e05894bd0096fc1862697a906
|
[
"MIT"
] | null | null | null |
BiomechData.py
|
DrDanParker/Biomechanical-Pandas
|
627fef4481e5af3e05894bd0096fc1862697a906
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""############################################################################
###
### Biomechanical Pandas Data Handling
### This file is part of Biomechanical-Pandas
### This file was created by Dr Daniel Parker on 23/12/18
### Twitter: @DrDanParker GitHub:https://github.com/DrDanParker
###
### Copyright (C) 2018 University of Salford - All Rights Reserved
### You may use, distribute and modify this code under the terms of MIT Licence
### See LICENSE or go to https://tldrlegal.com/license/mit-license for full licence details
###
### Based on/Learnt from the following:
### General panda docs and online tutorials
### Learning the Pandas Library - Matt Harrison 2016
###
############################################################################"""
import os
import pandas as pd
class BiomechData:
# global bld_flist
def __init__(self, fpath):
self.fpath = fpath
# self.data = pd.read_csv(self.fname,index_col=0)
def openPD(self,fname):
fdat = pd.read_csv(fname,sep='\t',skiprows=list(range(9)))
return(fdat)
def bld_flist(self,ftype='.asc'):
''' builds list of all files in directory+subs with given file type '''
flist = [os.path.join(r,file) for r,d,f in os.walk(self.fpath) for file in f
if file.endswith(ftype)]
return(flist)
def grp_by_part(self,seperator='_',level=0):
''' builds nested list of all files base on naming convention using _ -
can also be used to group files based on subdirectory using / or \\'''
filelist = self.bld_flist()
pref = []
used = set()
for file in filelist:
pref.append(file.split(seperator)[level])
unique = [x for x in pref if x not in used and (used.add(x) or True)]
groups = []
for i in range(0,len(unique)):
grp = []
for j in range(0,len(pref)):
if pref[j] == unique[i]:
grp.append(filelist[j])
groups.append(grp)
return(groups)
def join_file(self,files,pad=100):
''' builds one dataframe from multiple file with the same data format '''
fname = os.path.splitext(os.path.basename(files[0]))[0]
dat =[]
ind = range(pad)
col = self.openPD(files[0]).columns
if len(files) > 1:
for file in files:
dat.append(self.openPD(file))
pad_ = pd.DataFrame(index=ind,columns=col)
pad_ = pad_.fillna(0)
dat.append(pad_)
odat = pd.concat(dat,axis=0)
else:
odat = self.openPD(files[0])
return odat,fname
################################################################################
### Run Script
################################################################################
if __name__ == "__main__":
mydir = 'C:/Temp/Test/' # either move to working directory or update to location
d = BiomechData(mydir)
data = d.join_file(files=d.bld_flist())
print(data)
# flist = d.bld_flist()
| 34.741935
| 91
| 0.516249
|
acfd0705bd9b70723abcaea2d5f9da7ab756c263
| 914
|
py
|
Python
|
kaffepause/relationships/exceptions.py
|
Eirsteir/kaffepause
|
77535f057e68d575831e3a44f36285ab2fe621d4
|
[
"MIT"
] | null | null | null |
kaffepause/relationships/exceptions.py
|
Eirsteir/kaffepause
|
77535f057e68d575831e3a44f36285ab2fe621d4
|
[
"MIT"
] | 2
|
2022-02-28T21:04:22.000Z
|
2022-03-01T21:05:37.000Z
|
kaffepause/relationships/exceptions.py
|
Eirsteir/kaffepause
|
77535f057e68d575831e3a44f36285ab2fe621d4
|
[
"MIT"
] | null | null | null |
from django.utils.translation import gettext_lazy as _
from kaffepause.common.exceptions import DefaultError
class RelationshipAlreadyExists(DefaultError):
default_message = _("This relationship already exists")
class CannotAcceptFriendRequest(DefaultError):
default_message = _("You cannot accept this friend request")
class FriendRequestDoesNotExist(DefaultError):
default_message = _("This friend request does not exist")
class CannotRejectFriendRequest(DefaultError):
default_message = _("You cannot reject this friend request")
class CannotUnfriendUser(DefaultError):
default_message = _("You cannot unfriend this user, you are not friends")
class CannotFollowUser(DefaultError):
default_message = _("You cannot follow this user, you are not friends")
class CannotUnfollowUser(DefaultError):
default_message = _("You cannot unfollow this user, you are not friends")
| 28.5625
| 77
| 0.78884
|
acfd09ef5fa7e3282c39965b4456b2e3b417c85a
| 621
|
py
|
Python
|
rplugin/python3/denite/filter/sorter/reverse.py
|
hiberabyss/denite.nvim
|
54242329c9e7c10d044137fad23082b982313cba
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/filter/sorter/reverse.py
|
hiberabyss/denite.nvim
|
54242329c9e7c10d044137fad23082b982313cba
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/filter/sorter/reverse.py
|
hiberabyss/denite.nvim
|
54242329c9e7c10d044137fad23082b982313cba
|
[
"MIT"
] | null | null | null |
# ============================================================================
# FILE: sorter/reverse.py
# AUTHOR: Jacob Niehus <jacob.niehus at gmail.com>
# DESCRIPTION: Simple filter to reverse the order of candidates
# License: MIT license
# ============================================================================
from denite.filter.base import Base
class Filter(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'sorter/reverse'
self.description = 'reverse order of candidates'
def filter(self, context):
return list(reversed(context['candidates']))
| 29.571429
| 78
| 0.515298
|
acfd0c38a83a77cb0b98b7af3686457464b32707
| 8,658
|
py
|
Python
|
P7/dsa_heap_test.py
|
MC-DeltaT/DSA-Practicals
|
5c77cac1cfee5d756b84722e563813c153486770
|
[
"MIT"
] | null | null | null |
P7/dsa_heap_test.py
|
MC-DeltaT/DSA-Practicals
|
5c77cac1cfee5d756b84722e563813c153486770
|
[
"MIT"
] | null | null | null |
P7/dsa_heap_test.py
|
MC-DeltaT/DSA-Practicals
|
5c77cac1cfee5d756b84722e563813c153486770
|
[
"MIT"
] | null | null | null |
from dsa_heap import DSAHeap, _trickle_down, _trickle_up, heapify, heapsort
from collections import defaultdict
from itertools import takewhile
import random
from typing import DefaultDict, List, Sequence
import unittest
class DSAHeapToolsTest(unittest.TestCase):
TEST_SIZE = 1000
def test_trickle_down_1(self) -> None:
# Root node belongs in bottom level.
# Bottom level of tree full.
array = [0, 10, 9, 7, 8, 6, 5]
expected = [10, 8, 9, 7, 0, 6, 5]
_trickle_down(array)
self.assertListEqual(expected, array)
# Bottom level of tree not full.
array = [0, 9, 10, 7, 8, 6, 5, 4]
expected = [10, 9, 6, 7, 8, 0, 5, 4]
_trickle_down(array)
self.assertListEqual(expected, array)
def test_trickle_down_2(self) -> None:
# Root node belongs above bottom level.
# Bottom level of tree full.
array = [8, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 4]
expected = [12, 10, 11, 8, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 4]
_trickle_down(array)
self.assertListEqual(expected, array)
# Bottom level of tree not full.
array = [8, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 4, 2]
expected = [12, 10, 11, 8, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 4, 2]
_trickle_down(array)
self.assertListEqual(expected, array)
# Root node belongs at root.
array = [8, 6, 7, 5, 4, 2, 1, 0]
expected = [8, 6, 7, 5, 4, 2, 1, 0]
_trickle_down(array)
self.assertListEqual(expected, array)
def test_trickle_down_3(self) -> None:
# Edge cases.
# 0 elements.
array = []
expected = []
_trickle_down(array)
self.assertListEqual(expected, array)
# 1 element.
array = [10]
expected = [10]
_trickle_down(array)
self.assertListEqual(expected, array)
# 2 elements.
array = [5, 10]
expected = [10, 5]
_trickle_down(array)
self.assertListEqual(expected, array)
def test_trickle_up_1(self) -> None:
# Last node belongs at root.
# Bottom level of tree full.
array = [9, 8, 7, 5, 6, 4, 3, 10]
expected = [10, 9, 7, 8, 6, 4, 3, 5]
_trickle_up(array)
self.assertListEqual(expected, array)
# Bottom level of tree not full.
array = [9, 8, 7, 5, 6, 4, 10]
expected = [10, 8, 9, 5, 6, 4, 7]
_trickle_up(array)
self.assertListEqual(expected, array)
def test_trickle_up_2(self) -> None:
# Last node doesn't belong at root.
# Bottom level of tree full.
array = [14, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13]
expected = [14, 12, 13, 10, 9, 8, 11, 6, 5, 4, 3, 2, 1, 0, 7]
_trickle_up(array)
self.assertListEqual(expected, array)
# Bottom level of tree not full.
array = [14, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 5, 11]
expected = [14, 12, 11, 11, 9, 8, 7, 10, 5, 4, 3, 2, 1, 0, 5, 6]
_trickle_up(array)
self.assertListEqual(expected, array)
def test_trickle_up_3(self) -> None:
# Edge cases.
# 0 elements.
array = []
expected = []
_trickle_up(array)
self.assertListEqual(expected, array)
# 1 element.
array = [10]
expected = [10]
_trickle_up(array)
self.assertListEqual(expected, array)
# 2 elements.
array = [3, 7]
expected = [7, 3]
_trickle_up(array)
self.assertListEqual(expected, array)
def test_heapify_1(self) -> None:
# Array already forms a heap.
# Bottom level of tree full.
array = [15, 14, 14, 13, 10, 11, 9, 12, 12, 9, 4, 10, 8, 8, 2]
expected = array.copy()
heapify(array)
self.assertListEqual(expected, array)
# Bottom level of tree not full.
array = [15, 14, 14, 13, 10, 11, 9, 12, 12, 9, 4, 10, 8, 8, 2, 3, 1, 1]
expected = array.copy()
heapify(array)
self.assertListEqual(expected, array)
def test_heapify_2(self) -> None:
# Array doesn't already form a heap.
# Bottom level of tree full.
array = [10, 7, 10, 14, 7, 8, 9]
expected = [14, 10, 10, 7, 7, 8, 9]
heapify(array)
self.assertListEqual(expected, array)
# Bottom level of tree not full.
array = [6, 4, 8, 2, 10, 7, 0, 11]
expected = [11, 10, 8, 4, 6, 7, 0, 2]
heapify(array)
self.assertListEqual(expected, array)
def test_heapify_3(self) -> None:
# Edge cases.
# 0 elements.
array = []
expected = []
heapify(array)
self.assertListEqual(expected, array)
# 1 element.
array = [10]
expected = [10]
heapify(array)
self.assertListEqual(expected, array)
# 2 elements.
array = [5, 10]
expected = [10, 5]
heapify(array)
self.assertListEqual(expected, array)
def test_heapify_4(self) -> None:
# Random data of varying size.
for n in range(self.TEST_SIZE):
array = random.choices(range(self.TEST_SIZE), k=n)
heapify(array)
self.assertTrue(self._is_heap(array))
def test_heapsort_1(self) -> None:
# Random data of varying size.
for n in range(self.TEST_SIZE):
array = random.choices(range(self.TEST_SIZE), k=n)
heapsort(array)
self.assertListEqual(sorted(array), array)
def test_heapsort_2(self) -> None:
# Edge cases.
# 0 elements.
array = []
expected = []
heapsort(array)
self.assertListEqual(expected, array)
# 1 element.
array = [5]
expected = [5]
heapsort(array)
self.assertListEqual(expected, array)
def test_heapsort_3(self) -> None:
# Random but already sorted data of varying size.
for n in range(self.TEST_SIZE):
array = random.choices(range(self.TEST_SIZE), k=n)
array = sorted(array)
expected = array.copy()
heapsort(array)
self.assertListEqual(expected, array)
@staticmethod
def _is_heap(seq: Sequence) -> bool:
res = True
for i in takewhile(lambda _: res, range(1, len(seq))):
res = seq[(i - 1) // 2] >= seq[i]
return res
class DSAHeapTest(unittest.TestCase):
TEST_SIZE = 10000
def setUp(self) -> None:
self._heap = DSAHeap(self.TEST_SIZE)
def test_add_remove(self) -> None:
priorities = random.choices(range(self.TEST_SIZE), k=self.TEST_SIZE)
values: DefaultDict[int, List[int]] = defaultdict(list)
for p in priorities:
values[p].append(random.randrange(self.TEST_SIZE))
for p, vs in values.items():
for v in vs:
self._heap.add(v, p)
priorities = sorted(priorities, reverse=True)
for p in priorities:
self.assertIn(self._heap.remove(), values[p])
def test_size(self) -> None:
size = 0
self.assertEqual(size, self._heap.size)
# Assert size follows adds.
for i in range(self.TEST_SIZE):
p = random.randrange(self.TEST_SIZE)
v = random.randrange(self.TEST_SIZE)
self._heap.add(v, p)
size += 1
self.assertEqual(size, self._heap.size)
# Assert size follows remove.
for i in range(self.TEST_SIZE):
self._heap.remove()
size -= 1
self.assertEqual(size, self._heap.size)
def test_is_empty(self) -> None:
self.assertTrue(self._heap.is_empty)
for i in range(self.TEST_SIZE):
p = random.randrange(self.TEST_SIZE)
v = random.randrange(self.TEST_SIZE)
self._heap.add(v, p)
self.assertFalse(self._heap.is_empty)
for i in range(self.TEST_SIZE):
self.assertFalse(self._heap.is_empty)
self._heap.remove()
self.assertTrue(self._heap.is_empty)
def test_is_full(self) -> None:
for i in range(self.TEST_SIZE):
p = random.randrange(self.TEST_SIZE)
v = random.randrange(self.TEST_SIZE)
self.assertFalse(self._heap.is_full)
self._heap.add(v, p)
self.assertTrue(self._heap.is_full)
for i in range(self.TEST_SIZE):
self._heap.remove()
self.assertFalse(self._heap.is_full)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 30.272727
| 79
| 0.556826
|
acfd0c81f25ddc7b2567e9e5517e5f07b9e25908
| 12,399
|
py
|
Python
|
latextools/convert.py
|
cduck/latextools
|
8161acc88d669951b2b5e1e3e6888b9fc918b49a
|
[
"MIT"
] | 13
|
2020-06-02T22:57:13.000Z
|
2022-03-26T23:07:27.000Z
|
latextools/convert.py
|
cduck/latextools
|
8161acc88d669951b2b5e1e3e6888b9fc918b49a
|
[
"MIT"
] | 3
|
2021-06-03T14:38:17.000Z
|
2022-02-28T23:05:48.000Z
|
latextools/convert.py
|
cduck/latextools
|
8161acc88d669951b2b5e1e3e6888b9fc918b49a
|
[
"MIT"
] | 2
|
2020-08-19T05:44:23.000Z
|
2021-06-03T01:56:48.000Z
|
import base64
from pathlib import Path
import subprocess
import tempfile
import re
import urllib
import fs
from .project import LatexProject, LatexError
from .package import LatexPackage
from .content import BasicContent
from .document import DocumentConfig
from .command import LatexCommand
svg_packages = (
LatexPackage('xcolor'),
LatexPackage('amsmath'),
LatexPackage('amssymb'),
LatexPackage('amsfonts'),
LatexPackage('svg'),
LatexPackage('qcircuit', options=['braket', 'qm']),
)
svg_commands = (
)
def svg_to_pdf(fname_or_drawing=None, text=None, data=None, file=None,
fit_drawing=False, latex_width=None, out_name=None,
only_final=True, config=DocumentConfig('standalone'),
**pdf_args):
'''Requires the inkscape command line tool.'''
if ((fname_or_drawing is not None)
+ (text is not None)
+ (data is not None)
+ (file is not None)) != 1:
raise TypeError(
'Specify exactly one of fname_or_drawing, text, data, file, or '
'fname.')
fname = None
if fname_or_drawing:
if isinstance(fname_or_drawing, (str, bytes, Path)):
fname = fname_or_drawing
else:
text = fname_or_drawing.asSvg()
proj = LatexProject()
proj.add_file('image.svg', text=text, data=data, file=file, fname=fname)
if latex_width is None:
width_str = ''
else:
width_str = r'\def\svgwidth'+f'{{{latex_width}}}\n'
content = BasicContent(width_str+r'\input{image_svg-tex.pdf_tex}',
svg_packages, svg_commands)
doc = content.as_document('main.tex', config=config)
proj.add_file(doc)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_fs = fs.open_fs(tmp_dir, writeable=False)
fs.copy.copy_file(proj.proj_fs, 'image.svg', tmp_fs, 'image.svg')
if fit_drawing:
options = ('-z', '-D', '--export-latex', '--export-type=pdf')
else:
options = ('-z', '--export-latex', '--export-type=pdf')
_run_inkscape(proj, 'image.svg', tmp_dir, options=options)
tmp_fs.remove('image.svg')
proj.proj_fs.remove('image.svg')
r = proj.compile_pdf(options=['-shell-escape', '-halt-on-error',
'-file-line-error', '-interaction',
'nonstopmode'],
tmp_dir=tmp_dir,
#inkscape_list=['image.svg'],
**pdf_args)
if out_name is not None:
if out_name.endswith('.svg') or out_name.endswith('.pdf'):
out_name = out_name[:-4]
r.save(out_name + '.pdf')
def save_intermediate(fname, ext):
out_fname = out_name + ext
data = None
if tmp_fs.exists(fname):
fs.copy.copy_file(tmp_fs, fname, '.', out_fname)
if not only_final:
save_intermediate('image_svg-tex.pdf_tex', '_svg-tex.pdf_tex')
save_intermediate('image_svg-tex.pdf', '_svg-tex.pdf')
return r
def _run_inkscape(proj, fpath, cwd,
options=('-z', '--export-latex', '--export-type=pdf')):
try:
out_path = fpath
if not out_path.endswith('.svg'):
out_path += '.svg'
out_path = out_path[:-4] + '_svg-tex.pdf'
args = ['inkscape', *options, fpath, '-o', out_path]
p = subprocess.Popen(args,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except FileNotFoundError:
raise LatexError('inkscape command not found.')
stdout, stderr = p.communicate()
if p.returncode != 0:
# inkscape had an error
msg = ''
if stdout:
msg += stdout.decode()
if stderr:
msg += stderr.decode()
raise LatexError(msg)
class Svg:
STRIP_CHARS = ('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f\x10'
'\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e'
'\x1f')
def __init__(self, content):
self.content = content
w_str = next(re.finditer(r'width="([0-9]+(.[0-9]+)?)', self.content)
).group(1)
h_str = next(re.finditer(r'height="([0-9]+(.[0-9]+)?)', self.content)
).group(1)
self.width, self.height = float(w_str)*4/3, float(h_str)*4/3
def _repr_svg_(self):
return self.content
def save(self, fname):
with open(fname, 'w') as f:
f.write(self.content)
def rasterize(self, to_file=None, scale=1):
'''Requires the drawSvg Python package and cairo.'''
import drawSvg as draw
if scale != 1:
return self.as_drawing(scale=scale).rasterize(to_file)
else:
if to_file:
return draw.Raster.fromSvgToFile(self.content, to_file)
else:
return draw.Raster.fromSvg(self.content)
def as_drawing(self, scale=1):
'''Requires the drawSvg Python package and cairo.'''
import drawSvg as draw
d = draw.Drawing(self.width*scale, self.height*scale)
d.draw(self, x=0, y=0, scale=scale)
return d
def asDataUri(self, strip_chars=STRIP_CHARS):
'''Returns a data URI with base64 encoding.'''
data = self.content
search = re.compile('|'.join(strip_chars))
data_safe = search.sub(lambda m: '', data)
b64 = base64.b64encode(data_safe.encode())
return 'data:image/svg+xml;base64,' + b64.decode(encoding='ascii')
def asUtf8DataUri(self, unsafe_chars='"', strip_chars=STRIP_CHARS):
'''Returns a data URI without base64 encoding.
The characters '#&%' are always escaped. '#' and '&' break parsing of
the data URI. If '%' is not escaped, plain text like '%50' will be
incorrectly decoded to 'P'. The characters in `strip_chars` cause the
SVG not to render even if they are escaped.
'''
data = self.content
unsafe_chars = (unsafe_chars or '') + '#&%'
replacements = {
char: urllib.parse.quote(char, safe='')
for char in unsafe_chars
}
replacements.update({
char: ''
for char in strip_chars
})
search = re.compile('|'.join(map(re.escape, replacements.keys())))
data_safe = search.sub(lambda m: replacements[m.group(0)], data)
return 'data:image/svg+xml;utf8,' + data_safe
def toDrawables(self, elements, x=0, y=0, center=False, scale=1,
text_anchor=None, **kwargs):
scale = scale*4/3 # Points to pixels
w_str = next(re.finditer(r'width="([0-9]+(.[0-9]+)?)', self.content)
).group(1)
h_str = next(re.finditer(r'height="([0-9]+(.[0-9]+)?)', self.content)
).group(1)
w, h = float(w_str), float(h_str)
x_off, y_off = 0, 0
if center:
x_off, y_off = -w/2, h/2
else:
x_off, y_off = 0, h
if text_anchor == 'start':
x_off = 0
elif text_anchor == 'middle':
x_off = -w/2
elif text_anchor == 'end':
x_off = -w
id_prefix = f'embed-{hash(self.content)}-'
content = (self.content
.replace('id="', f'id="{id_prefix}')
.replace('="url(#', f'="url(#{id_prefix}')
.replace('xlink:href="#', f'xlink:href="#{id_prefix}'))
defs_str = next(re.finditer(r'<defs>(.*)</defs>', content,
re.MULTILINE | re.DOTALL)
).group(1)
elems_str = next(re.finditer(r'</defs>(.*)</svg>', content,
re.MULTILINE | re.DOTALL)
).group(1)
defs = elements.Raw(defs_str)
transforms = []
if 'transform' in kwargs:
transforms.append(kwargs['transform'])
if x or y:
transforms.append(f'translate({x}, {-y})')
transforms.append(f'scale({scale})')
if x_off or y_off:
transforms.append(f'translate({x_off}, {-y_off})')
kwargs['transform'] = ' '.join(transforms)
elems = elements.Raw(elems_str, (defs,), **kwargs)
return (elems,)
def pdf_to_svg(fname_or_obj=None, text=None, data=None, file=None,
out_name=None, ret_svg=True):
'''Requires the pdf2svg command line tool.'''
if ((fname_or_obj is not None)
+ (text is not None)
+ (data is not None)
+ (file is not None)) != 1:
raise TypeError(
'Specify exactly one of fname_or_obj, text, data, file, or '
'fname.')
fname = None
if fname_or_obj is not None:
if isinstance(fname_or_obj, (str, bytes, Path)):
fname = fname_or_obj
elif fname_or_obj.fname is not None:
fname = fname_or_obj.fname
else:
data = fname_or_obj.data
if fname is not None:
with open(fname, 'b') as f:
pdf_to_svg(file=f, out_name=out_name)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_fs = fs.open_fs(tmp_dir, writeable=True)
if file is not None:
tmp_fs.writefile('image.pdf', file)
elif data is not None:
tmp_fs.writebytes('image.pdf', data)
elif text is not None:
tmp_fs.writetext('image.pdf', text)
else:
assert False, 'Logic error'
args = ['pdf2svg', 'image.pdf', 'image.svg']
try:
p = subprocess.Popen(args,
cwd=tmp_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except FileNotFoundError:
raise LatexError('pdf2svg command not found.')
stdout, stderr = p.communicate()
if p.returncode != 0:
# pdf2svg had an error
msg = ''
if stdout:
msg += stdout.decode()
if stderr:
msg += stderr.decode()
raise RuntimeError(msg)
if out_name is not None:
fs.copy.copy_file(tmp_fs, 'image.svg', '.', out_name)
if ret_svg:
return Svg(tmp_fs.readtext('image.svg'))
def render_latex_in_svg(name_or_drawing=None, text=None, data=None, file=None,
fit_drawing=False, latex_width=None, out_name=None,
config=DocumentConfig('standalone'), ret_svg=True):
pdf = svg_to_pdf(name_or_drawing, text=text, data=data, file=file,
fit_drawing=fit_drawing, latex_width=latex_width,
out_name=None, config=config)
svg = pdf_to_svg(pdf, out_name=out_name, ret_svg=ret_svg)
return svg
def svg_to_png(name_or_drawing=None, text=None, data=None, file=None,
fit_drawing=False, latex_width=None, out_name=None,
config=DocumentConfig('standalone')):
svg = render_latex_in_svg(name_or_drawing, text=text, data=data, file=file,
fit_drawing=fit_drawing, latex_width=latex_width,
config=config)
png = svg.rasterize(out_name)
return png
def text_to_svg(latex_text, config=DocumentConfig('standalone'), fill=None):
color = fill
commands = list(svg_commands)
color_command = None
if color is not None:
if color.startswith('#') and (len(color) == 7 or len(color) == 4):
w = (len(color)-1) // 3
r = int(color[1:1+w]*(3-w), 16)
g = int(color[1+w:1+2*w]*(3-w), 16)
b = int(color[1+2*w:1+3*w]*(3-w), 16)
color_command = (r'\definecolor{customcolor}{RGB}'
f'{{{r},{g},{b}}}')
color = 'customcolor'
if color_command is not None:
commands.append(LatexCommand('customcolor', color_command))
if color is not None:
latex_text = fr'\color{{{color}}}{latex_text}'
content = BasicContent(latex_text, svg_packages, commands)
pdf = content.render(config=config)
return pdf_to_svg(pdf)
| 37.234234
| 80
| 0.551012
|
acfd0d71e37766c87adbd570ac13e81026326434
| 18,884
|
py
|
Python
|
impacket/krb5/ccache.py
|
milkdevil/impacket
|
5813a5f4527460b9c9b87175f528d5d366e0436b
|
[
"Apache-1.1"
] | 109
|
2017-12-31T07:43:03.000Z
|
2022-02-15T16:27:29.000Z
|
impacket/krb5/ccache.py
|
jppgibbs/Aegis
|
feac08cd3935569057e75531fe80bd0e1f982a93
|
[
"MIT"
] | 8
|
2017-12-31T01:45:54.000Z
|
2021-06-08T19:35:58.000Z
|
impacket/krb5/ccache.py
|
jppgibbs/Aegis
|
feac08cd3935569057e75531fe80bd0e1f982a93
|
[
"MIT"
] | 42
|
2018-01-02T14:31:13.000Z
|
2021-11-27T14:08:08.000Z
|
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description:
# Kerberos Credential Cache format implementation
# based on file format described at:
# http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt
# Pretty lame and quick implementation, not a fun thing to do
# Contribution is welcome to make it the right way
#
from datetime import datetime
from struct import pack, unpack, calcsize
from pyasn1.codec.der import decoder, encoder
from pyasn1.type.univ import noValue
from binascii import hexlify
from impacket.structure import Structure
from impacket.krb5 import crypto, constants, types
from impacket.krb5.asn1 import AS_REP, seq_set, TGS_REP, EncTGSRepPart, EncASRepPart, Ticket
from impacket import LOG
DELTA_TIME = 1
class Header(Structure):
structure = (
('tag','!H=0'),
('taglen','!H=0'),
('_tagdata','_-tagdata','self["taglen"]'),
('tagdata',':'),
)
class DeltaTime(Structure):
structure = (
('time_offset','!L=0'),
('usec_offset','!L=0'),
)
class CountedOctetString(Structure):
structure = (
('length','!L=0'),
('_data','_-data','self["length"]'),
('data',':'),
)
def prettyPrint(self, indent=''):
return "%s%s" % (indent, hexlify(self['data']))
class KeyBlock(Structure):
structure = (
('keytype','!H=0'),
('etype','!H=0'),
('keylen','!H=0'),
('_keyvalue','_-keyvalue','self["keylen"]'),
('keyvalue',':'),
)
def prettyPrint(self):
return "Key: (0x%x)%s" % (self['keytype'], hexlify(self['keyvalue']))
class Times(Structure):
structure = (
('authtime','!L=0'),
('starttime','!L=0'),
('endtime','!L=0'),
('renew_till','!L=0'),
)
def prettyPrint(self, indent = ''):
print "%sAuth : %s" % (indent, datetime.fromtimestamp(self['authtime']).isoformat())
print "%sStart: %s" % (indent, datetime.fromtimestamp(self['starttime']).isoformat())
print "%sEnd : %s" % (indent, datetime.fromtimestamp(self['endtime']).isoformat())
print "%sRenew: %s" % (indent, datetime.fromtimestamp(self['renew_till']).isoformat())
class Address(Structure):
structure = (
('addrtype','!H=0'),
('addrdata',':', CountedOctetString),
)
class AuthData(Structure):
structure = (
('authtype','!H=0'),
('authdata',':', CountedOctetString),
)
class Principal:
class PrincipalHeader(Structure):
structure = (
('name_type','!L=0'),
('num_components','!L=0'),
)
def __init__(self, data=None):
self.components = []
self.realm = None
if data is not None:
self.header = self.PrincipalHeader(data)
data = data[len(self.header):]
self.realm = CountedOctetString(data)
data = data[len(self.realm):]
self.components = []
for component in range(self.header['num_components']):
comp = CountedOctetString(data)
data = data[len(comp):]
self.components.append(comp)
else:
self.header = self.PrincipalHeader()
def __len__(self):
totalLen = len(self.header) + len(self.realm)
for i in self.components:
totalLen += len(i)
return totalLen
def getData(self):
data = self.header.getData() + self.realm.getData()
for component in self.components:
data += component.getData()
return data
def __str__(self):
return self.getData()
def prettyPrint(self):
principal = ''
for component in self.components:
principal += component['data'] + '/'
principal = principal[:-1]
principal += '@' + self.realm['data']
return principal
def fromPrincipal(self, principal):
self.header['name_type'] = principal.type
self.header['num_components'] = len(principal.components)
octetString = CountedOctetString()
octetString['length'] = len(principal.realm)
octetString['data'] = principal.realm
self.realm = octetString
self.components = []
for c in principal.components:
octetString = CountedOctetString()
octetString['length'] = len(c)
octetString['data'] = c
self.components.append(octetString)
def toPrincipal(self):
return types.Principal(self.prettyPrint(), type=self.header['name_type'])
class Credential:
class CredentialHeader(Structure):
structure = (
('client',':', Principal),
('server',':', Principal),
('key',':', KeyBlock),
('time',':', Times),
('is_skey','B=0'),
('tktflags','!L=0'),
('num_address','!L=0'),
)
def __init__(self, data=None):
self.addresses = ()
self.authData = ()
self.header = None
self.ticket = None
self.secondTicket = None
if data is not None:
self.header = self.CredentialHeader(data)
data = data[len(self.header):]
self.addresses = []
for address in range(self.header['num_address']):
ad = Address(data)
data = data[len(ad):]
self.addresses.append(ad)
num_authdata = unpack('!L', data[:4])[0]
data = data[calcsize('!L'):]
for authdata in range(num_authdata):
ad = AuthData(data)
data = data[len(ad):]
self.authData.append(ad)
self.ticket = CountedOctetString(data)
data = data[len(self.ticket):]
self.secondTicket = CountedOctetString(data)
data = data[len( self.secondTicket):]
else:
self.header = self.CredentialHeader()
def __getitem__(self, key):
return self.header[key]
def __setitem__(self, item, value):
self.header[item] = value
def getServerPrincipal(self):
return self.header['server'].prettyPrint()
def __len__(self):
totalLen = len(self.header)
for i in self.addresses:
totalLen += len(i)
totalLen += calcsize('!L')
for i in self.authData:
totalLen += len(i)
totalLen += len(self.ticket)
totalLen += len(self.secondTicket)
return totalLen
def dump(self):
self.header.dump()
def getData(self):
data = self.header.getData()
for i in self.addresses:
data += i.getData()
data += pack('!L', len(self.authData))
for i in self.authData:
data += i.getData()
data += self.ticket.getData()
data += self.secondTicket.getData()
return data
def __str__(self):
return self.getData()
def prettyPrint(self, indent=''):
print "%sClient: %s" % (indent, self.header['client'].prettyPrint())
print "%sServer: %s" % (indent, self.header['server'].prettyPrint())
print "%s%s" % (indent, self.header['key'].prettyPrint())
print "%sTimes: " % indent
self.header['time'].prettyPrint('\t\t')
print "%sSubKey: %s" % (indent, self.header['is_skey'])
print "%sFlags: 0x%x" % (indent, self.header['tktflags'])
print "%sAddresses: %d" % (indent, self.header['num_address'])
for address in self.addresses:
address.prettyPrint('\t\t')
print "%sAuth Data: %d" % (indent, len(self.authData))
for ad in self.authData:
ad.prettyPrint('\t\t')
print "%sTicket: %s" % (indent, self.ticket.prettyPrint())
print "%sSecond Ticket: %s" % (indent, self.secondTicket.prettyPrint())
def toTGT(self):
tgt_rep = AS_REP()
tgt_rep['pvno'] = 5
tgt_rep['msg-type'] = int(constants.ApplicationTagNumbers.AS_REP.value)
tgt_rep['crealm'] = self['server'].realm['data']
# Fake EncryptedData
tgt_rep['enc-part'] = noValue
tgt_rep['enc-part']['etype'] = 1
tgt_rep['enc-part']['cipher'] = ''
seq_set(tgt_rep, 'cname', self['client'].toPrincipal().components_to_asn1)
ticket = types.Ticket()
ticket.from_asn1(self.ticket['data'])
seq_set(tgt_rep,'ticket', ticket.to_asn1)
cipher = crypto._enctype_table[self['key']['keytype']]()
tgt = dict()
tgt['KDC_REP'] = encoder.encode(tgt_rep)
tgt['cipher'] = cipher
tgt['sessionKey'] = crypto.Key(cipher.enctype, str(self['key']['keyvalue']))
return tgt
def toTGS(self, newSPN=None):
tgs_rep = TGS_REP()
tgs_rep['pvno'] = 5
tgs_rep['msg-type'] = int(constants.ApplicationTagNumbers.TGS_REP.value)
tgs_rep['crealm'] = self['server'].realm['data']
# Fake EncryptedData
tgs_rep['enc-part'] = noValue
tgs_rep['enc-part']['etype'] = 1
tgs_rep['enc-part']['cipher'] = ''
seq_set(tgs_rep, 'cname', self['client'].toPrincipal().components_to_asn1)
ticket = types.Ticket()
ticket.from_asn1(self.ticket['data'])
if newSPN is not None:
if newSPN.upper() != str(ticket.service_principal).upper():
LOG.debug('Changing sname from %s to %s and hoping for the best' % (ticket.service_principal, newSPN) )
ticket.service_principal = types.Principal(newSPN, type=int(ticket.service_principal.type))
seq_set(tgs_rep,'ticket', ticket.to_asn1)
cipher = crypto._enctype_table[self['key']['keytype']]()
tgs = dict()
tgs['KDC_REP'] = encoder.encode(tgs_rep)
tgs['cipher'] = cipher
tgs['sessionKey'] = crypto.Key(cipher.enctype, str(self['key']['keyvalue']))
return tgs
class CCache:
class MiniHeader(Structure):
structure = (
('file_format_version','!H=0x0504'),
('headerlen','!H=12'),
)
def __init__(self, data = None):
self.headers = None
self.principal = None
self.credentials = []
self.miniHeader = None
if data is not None:
miniHeader = self.MiniHeader(data)
data = data[len(str(miniHeader)):]
headerLen = miniHeader['headerlen']
self.headers = []
while headerLen > 0:
header = Header(data)
self.headers.append(header)
headerLen -= len(header)
data = data[len(header):]
# Now the primary_principal
self.principal = Principal(data)
data = data[len(self.principal):]
# Now let's parse the credentials
self.credentials = []
while len(data) > 0:
cred = Credential(data)
self.credentials.append(cred)
data = data[len(cred.getData()):]
def getData(self):
data = self.MiniHeader().getData()
for header in self.headers:
data += header.getData()
data += self.principal.getData()
for credential in self.credentials:
data += credential.getData()
return data
def getCredential(self, server, anySPN=True):
for c in self.credentials:
if c['server'].prettyPrint().upper() == server.upper():
LOG.debug('Returning cached credential for %s' % c['server'].prettyPrint().upper())
return c
LOG.debug('SPN %s not found in cache' % server.upper())
if anySPN is True:
LOG.debug('AnySPN is True, looking for another suitable SPN')
for c in self.credentials:
# Let's search for any TGT/TGS that matches the server w/o the SPN's service type, returns
# the first one
if c['server'].prettyPrint().find('/') >=0:
if c['server'].prettyPrint().upper().split('/')[1] == server.upper().split('/')[1]:
LOG.debug('Returning cached credential for %s' % c['server'].prettyPrint().upper())
return c
return None
def toTimeStamp(self, dt, epoch=datetime(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 1e6
def reverseFlags(self, flags):
result = 0
if isinstance(flags, str):
flags = flags[1:-2]
for i,j in enumerate(reversed(flags)):
if j != 0:
result += j << i
return result
def fromTGT(self, tgt, oldSessionKey, sessionKey):
self.headers = []
header = Header()
header['tag'] = 1
header['taglen'] = 8
header['tagdata'] = '\xff\xff\xff\xff\x00\x00\x00\x00'
self.headers.append(header)
decodedTGT = decoder.decode(tgt, asn1Spec = AS_REP())[0]
tmpPrincipal = types.Principal()
tmpPrincipal.from_asn1(decodedTGT, 'crealm', 'cname')
self.principal = Principal()
self.principal.fromPrincipal(tmpPrincipal)
# Now let's add the credential
cipherText = decodedTGT['enc-part']['cipher']
cipher = crypto._enctype_table[decodedTGT['enc-part']['etype']]
# Key Usage 3
# AS-REP encrypted part (includes TGS session key or
# application session key), encrypted with the client key
# (Section 5.4.2)
plainText = cipher.decrypt(oldSessionKey, 3, str(cipherText))
encASRepPart = decoder.decode(plainText, asn1Spec = EncASRepPart())[0]
credential = Credential()
server = types.Principal()
server.from_asn1(encASRepPart, 'srealm', 'sname')
tmpServer = Principal()
tmpServer.fromPrincipal(server)
credential['client'] = self.principal
credential['server'] = tmpServer
credential['is_skey'] = 0
credential['key'] = KeyBlock()
credential['key']['keytype'] = int(encASRepPart['key']['keytype'])
credential['key']['keyvalue'] = str(encASRepPart['key']['keyvalue'])
credential['key']['keylen'] = len(credential['key']['keyvalue'])
credential['time'] = Times()
credential['time']['authtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['authtime']))
credential['time']['starttime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['starttime']))
credential['time']['endtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['endtime']))
credential['time']['renew_till'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['renew-till']))
flags = self.reverseFlags(encASRepPart['flags'])
credential['tktflags'] = flags
credential['num_address'] = 0
credential.ticket = CountedOctetString()
credential.ticket['data'] = encoder.encode(decodedTGT['ticket'].clone(tagSet=Ticket.tagSet, cloneValueFlag=True))
credential.ticket['length'] = len(credential.ticket['data'])
credential.secondTicket = CountedOctetString()
credential.secondTicket['data'] = ''
credential.secondTicket['length'] = 0
self.credentials.append(credential)
def fromTGS(self, tgs, oldSessionKey, sessionKey):
self.headers = []
header = Header()
header['tag'] = 1
header['taglen'] = 8
header['tagdata'] = '\xff\xff\xff\xff\x00\x00\x00\x00'
self.headers.append(header)
decodedTGS = decoder.decode(tgs, asn1Spec = TGS_REP())[0]
tmpPrincipal = types.Principal()
tmpPrincipal.from_asn1(decodedTGS, 'crealm', 'cname')
self.principal = Principal()
self.principal.fromPrincipal(tmpPrincipal)
# Now let's add the credential
cipherText = decodedTGS['enc-part']['cipher']
cipher = crypto._enctype_table[decodedTGS['enc-part']['etype']]
# Key Usage 8
# TGS-REP encrypted part (includes application session
# key), encrypted with the TGS session key (Section 5.4.2)
plainText = cipher.decrypt(oldSessionKey, 8, str(cipherText))
encTGSRepPart = decoder.decode(plainText, asn1Spec = EncTGSRepPart())[0]
credential = Credential()
server = types.Principal()
server.from_asn1(encTGSRepPart, 'srealm', 'sname')
tmpServer = Principal()
tmpServer.fromPrincipal(server)
credential['client'] = self.principal
credential['server'] = tmpServer
credential['is_skey'] = 0
credential['key'] = KeyBlock()
credential['key']['keytype'] = int(encTGSRepPart['key']['keytype'])
credential['key']['keyvalue'] = str(encTGSRepPart['key']['keyvalue'])
credential['key']['keylen'] = len(credential['key']['keyvalue'])
credential['time'] = Times()
credential['time']['authtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['authtime']))
credential['time']['starttime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['starttime']))
credential['time']['endtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['endtime']))
credential['time']['renew_till'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['renew-till']))
flags = self.reverseFlags(encTGSRepPart['flags'])
credential['tktflags'] = flags
credential['num_address'] = 0
credential.ticket = CountedOctetString()
credential.ticket['data'] = encoder.encode(decodedTGS['ticket'].clone(tagSet=Ticket.tagSet, cloneValueFlag=True))
credential.ticket['length'] = len(credential.ticket['data'])
credential.secondTicket = CountedOctetString()
credential.secondTicket['data'] = ''
credential.secondTicket['length'] = 0
self.credentials.append(credential)
@classmethod
def loadFile(cls, fileName):
f = open(fileName,'rb')
data = f.read()
f.close()
return cls(data)
def saveFile(self, fileName):
f = open(fileName,'wb+')
f.write(self.getData())
f.close()
def prettyPrint(self):
print "Primary Principal: %s" % self.principal.prettyPrint()
print "Credentials: "
for i, credential in enumerate(self.credentials):
print "[%d]" % i
credential.prettyPrint('\t')
if __name__ == '__main__':
import os
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
ccache.prettyPrint()
| 36.107075
| 121
| 0.585575
|
acfd0dd91e56d7cda21c5318425ca17321309644
| 5,179
|
py
|
Python
|
zaphod/plugins/nagios.py
|
snapiri/zaphod
|
c96f73a28f50ac3062ad32dfd73bc13f3e8648f8
|
[
"Apache-2.0"
] | null | null | null |
zaphod/plugins/nagios.py
|
snapiri/zaphod
|
c96f73a28f50ac3062ad32dfd73bc13f3e8648f8
|
[
"Apache-2.0"
] | 4
|
2019-02-10T15:18:34.000Z
|
2019-02-20T08:27:31.000Z
|
zaphod/plugins/nagios.py
|
snapiri/zaphod
|
c96f73a28f50ac3062ad32dfd73bc13f3e8648f8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright (C) 2020 Shachar Snapiri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# nagios.py v0.0.1
# 13/3/2020
# Test different net protocols integrity.
# Written by Shachar Snapiri <shachar@snapiri.net>
# This plugin will try to verify the DHCP server(s), according to the
# supplied configuration. It will also validate the ARP resolution according
# to the same file.
import argparse
import sys
import time
from zaphod.common import config
from zaphod.common import logger
from zaphod.common import packet_reader
from zaphod.protocols import ARP
from zaphod.protocols import DHCP
TEST_SLEEP_TIME = 2
LOG = logger.get_logger(__name__)
options = argparse.ArgumentParser(usage='%prog server [options]',
description='Test network integrity')
options.add_argument('-f', '--file', type=str, default='/etc/zaphod.conf',
help='Configuration file location '
'(default: /etc/zaphod.conf)')
options.add_argument('-t', '--timeout', type=int, default=10,
help='Socket timeout (default: 10)')
options.add_argument('-v', '--verbose', action='count', default=0,
help='Print verbose output')
dhcp_errors = None
arp_errors = None
def _set_log_level(verbose):
if verbose > 2:
logger.set_log_level(logger.DEBUG)
elif verbose > 1:
logger.set_log_level(logger.INFO)
elif verbose > 0:
logger.set_log_level(logger.WARN)
else:
logger.set_log_level(logger.ERROR)
class DhcpTester(object):
def __init__(self, zaphod_config):
self.config = zaphod_config.get_dhcp_config()
self._errors = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def _dhcp_status_callback(errors):
global dhcp_errors
dhcp_errors = errors
def test_dhcp(self):
reader = packet_reader.PacketReader(self.config['interface'])
if not reader.is_ready:
print('DHCP Reader is not ready')
return False
dhcp_proto = DHCP.DHCPProto(reader,
False,
self.config['servers'],
self.config['dhcp_ranges'],
self.config['gateways'],
self.config['dns_servers'])
dhcp_proto.register_callback(self._dhcp_status_callback)
dhcp_proto.set_timeout(10)
reader.start_reader()
packet = dhcp_proto.create_packet()
dhcp_proto.send_packet(packet)
time.sleep(TEST_SLEEP_TIME)
reader.stop_reader()
dhcp_proto.close()
global dhcp_errors
if dhcp_errors:
for dhcp_error in dhcp_errors:
print(f'{dhcp_error}')
return False
return True
class ArpTester(object):
def __init__(self, zaphod_config):
self.config = zaphod_config.get_arp_config()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def _arp_status_callback(errors):
global arp_errors
arp_errors = errors
def test_arp(self):
reader = packet_reader.PacketReader(self.config['interface'])
if not reader.is_ready:
print('ARP Reader is not ready')
return False
arp_proto = ARP.ARPProto(reader, False, self.config['resolvers'])
arp_proto.register_callback(self._arp_status_callback)
arp_proto.set_timeout(10)
reader.start_reader()
for item in self.config['resolvers'].keys():
packet = arp_proto.create_packet(ip_address=item)
arp_proto.send_packet(packet)
time.sleep(TEST_SLEEP_TIME)
reader.stop_reader()
arp_proto.close()
global arp_errors
if arp_errors:
for arp_error in arp_errors:
print(f'{arp_error}')
return False
return True
def main():
args = options.parse_args()
_set_log_level(args.verbose)
status = 0
try:
zaphod_config = config.Config(args.file)
except IOError:
LOG.error('Config file does not exist or not accessible')
return 3
with DhcpTester(zaphod_config) as tester:
if not tester.test_dhcp():
status = 1
with ArpTester(zaphod_config) as tester:
if not tester.test_arp():
status = 1
if not status:
print('All tests succeeded')
sys.exit(status)
if __name__ == '__main__':
main()
| 29.936416
| 76
| 0.633906
|
acfd0e1687d31e0dff2562d860770fd5bff1f727
| 2,412
|
py
|
Python
|
app/app.py
|
robmarkcole/python-fastapi-aws-lambda-container
|
56a676f4c0bccce10fd2533daba3ace0201a1bb3
|
[
"Apache-2.0"
] | null | null | null |
app/app.py
|
robmarkcole/python-fastapi-aws-lambda-container
|
56a676f4c0bccce10fd2533daba3ace0201a1bb3
|
[
"Apache-2.0"
] | null | null | null |
app/app.py
|
robmarkcole/python-fastapi-aws-lambda-container
|
56a676f4c0bccce10fd2533daba3ace0201a1bb3
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import uvicorn
from fastapi import FastAPI, HTTPException
from mangum import Mangum
from app.routes import helloworld_router
from app.monitoring import logging_config
from app.middlewares.correlation_id_middleware import CorrelationIdMiddleware
from app.middlewares.logging_middleware import LoggingMiddleware
from app.handlers.exception_handler import exception_handler
from app.handlers.http_exception_handler import http_exception_handler
###############################################################################
# Application object #
###############################################################################
app = FastAPI()
###############################################################################
# Logging configuration #
###############################################################################
logging_config.configure_logging(level='DEBUG', service='Helloworld', instance=str(uuid.uuid4()))
###############################################################################
# Error handlers configuration #
###############################################################################
app.add_exception_handler(Exception, exception_handler)
app.add_exception_handler(HTTPException, http_exception_handler)
###############################################################################
# Middlewares configuration #
###############################################################################
# Tip : middleware order : CorrelationIdMiddleware > LoggingMiddleware -> reverse order
app.add_middleware(LoggingMiddleware)
app.add_middleware(CorrelationIdMiddleware)
###############################################################################
# Routers configuration #
###############################################################################
app.include_router(helloworld_router.router, prefix='/hello', tags=['hello'])
###############################################################################
# Handler for AWS Lambda #
###############################################################################
handler = Mangum(app)
| 47.294118
| 97
| 0.393449
|
acfd0e63d0efc6315e7bd005df99d7d3eeb89f53
| 2,983
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/eventgrid/v20190101/get_event_subscription_full_url.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/eventgrid/v20190101/get_event_subscription_full_url.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/eventgrid/v20190101/get_event_subscription_full_url.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetEventSubscriptionFullUrlResult',
'AwaitableGetEventSubscriptionFullUrlResult',
'get_event_subscription_full_url',
]
@pulumi.output_type
class GetEventSubscriptionFullUrlResult:
"""
Full endpoint url of an event subscription
"""
def __init__(__self__, endpoint_url=None):
if endpoint_url and not isinstance(endpoint_url, str):
raise TypeError("Expected argument 'endpoint_url' to be a str")
pulumi.set(__self__, "endpoint_url", endpoint_url)
@property
@pulumi.getter(name="endpointUrl")
def endpoint_url(self) -> Optional[str]:
"""
The URL that represents the endpoint of the destination of an event subscription.
"""
return pulumi.get(self, "endpoint_url")
class AwaitableGetEventSubscriptionFullUrlResult(GetEventSubscriptionFullUrlResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventSubscriptionFullUrlResult(
endpoint_url=self.endpoint_url)
def get_event_subscription_full_url(event_subscription_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventSubscriptionFullUrlResult:
"""
Full endpoint url of an event subscription
:param str event_subscription_name: Name of the event subscription
:param str scope: The scope of the event subscription. The scope can be a subscription, or a resource group, or a top level resource belonging to a resource provider namespace, or an EventGrid topic. For example, use '/subscriptions/{subscriptionId}/' for a subscription, '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for a resource group, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}' for a resource, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}' for an EventGrid topic.
"""
__args__ = dict()
__args__['eventSubscriptionName'] = event_subscription_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/v20190101:getEventSubscriptionFullUrl', __args__, opts=opts, typ=GetEventSubscriptionFullUrlResult).value
return AwaitableGetEventSubscriptionFullUrlResult(
endpoint_url=__ret__.endpoint_url)
| 45.19697
| 669
| 0.728797
|
acfd0e9307c3e2902509906f2356d3a66e177cad
| 7,156
|
py
|
Python
|
homeassistant/components/media_player/mpd.py
|
TastyPi/home-assistant
|
aa1e4c564cb8660bf6b7637bc25317ee58869214
|
[
"MIT"
] | 13
|
2017-02-01T13:25:34.000Z
|
2022-01-26T01:30:39.000Z
|
homeassistant/components/media_player/mpd.py
|
1Forward1Back/home-assistant
|
ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6
|
[
"MIT"
] | 9
|
2017-07-26T18:05:32.000Z
|
2021-12-05T14:16:34.000Z
|
homeassistant/components/media_player/mpd.py
|
1Forward1Back/home-assistant
|
ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6
|
[
"MIT"
] | 21
|
2017-07-26T17:09:40.000Z
|
2022-03-27T22:37:22.000Z
|
"""
Support to interact with a Music Player Daemon.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.mpd/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, PLATFORM_SCHEMA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, MEDIA_TYPE_PLAYLIST,
MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_PORT, CONF_PASSWORD,
CONF_HOST)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-mpd2==0.5.5']
_LOGGER = logging.getLogger(__name__)
CONF_LOCATION = 'location'
DEFAULT_LOCATION = 'MPD'
DEFAULT_PORT = 6600
SUPPORT_MPD = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_LOCATION, default=DEFAULT_LOCATION): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the MPD platform."""
daemon = config.get(CONF_HOST)
port = config.get(CONF_PORT)
location = config.get(CONF_LOCATION)
password = config.get(CONF_PASSWORD)
import mpd
# pylint: disable=no-member
try:
mpd_client = mpd.MPDClient()
mpd_client.connect(daemon, port)
if password is not None:
mpd_client.password(password)
mpd_client.close()
mpd_client.disconnect()
except socket.error:
_LOGGER.error("Unable to connect to MPD")
return False
except mpd.CommandError as error:
if "incorrect password" in str(error):
_LOGGER.error("MPD reported incorrect password")
return False
else:
raise
add_devices([MpdDevice(daemon, port, location, password)])
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, location, password):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = location
self.password = password
self.status = None
self.currentsong = None
self.client = mpd.MPDClient()
self.client.timeout = 10
self.client.idletimeout = None
self.update()
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
self.status = self.client.status()
self.currentsong = self.client.currentsong()
except (mpd.ConnectionError, OSError, BrokenPipeError, ValueError):
# Cleanly disconnect in case connection is not in valid state
try:
self.client.disconnect()
except mpd.ConnectionError:
pass
self.client.connect(self.server, self.port)
if self.password is not None:
self.client.password(self.password)
self.status = self.client.status()
self.currentsong = self.client.currentsong()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self.status['state'] == 'play':
return STATE_PLAYING
elif self.status['state'] == 'pause':
return STATE_PAUSED
else:
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.currentsong.get('file')
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
# Time does not exist for streams
return self.currentsong.get('time')
@property
def media_title(self):
"""Title of current playing media."""
name = self.currentsong.get('name', None)
title = self.currentsong.get('title', None)
if name is None and title is None:
return "None"
elif name is None:
return title
elif title is None:
return name
else:
return '{}: {}'.format(name, title)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self.currentsong.get('artist')
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
return self.currentsong.get('album')
@property
def volume_level(self):
"""Return the volume level."""
return int(self.status['volume'])/100
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_MPD
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self.client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self.client.play()
def set_volume_level(self, volume):
"""Set volume of media player."""
self.client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
current_volume = int(self.status['volume'])
if current_volume <= 100:
self.client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
current_volume = int(self.status['volume'])
if current_volume >= 0:
self.client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self.client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self.client.pause(1)
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self.client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self.client.previous()
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.info(str.format("Playing playlist: {0}", media_id))
if media_type == MEDIA_TYPE_PLAYLIST:
self.client.clear()
self.client.load(media_id)
self.client.play()
else:
_LOGGER.error(str.format("Invalid media type. Expected: {0}",
MEDIA_TYPE_PLAYLIST))
| 30.322034
| 75
| 0.633315
|
acfd0ef99e1e9dc2e45af65c9c0e7d6374251c51
| 2,672
|
py
|
Python
|
fp/fp.py
|
LarsKue/free-proxy
|
b7534b342c6a47c95b4bf81645ed8a1db7b455a8
|
[
"MIT"
] | null | null | null |
fp/fp.py
|
LarsKue/free-proxy
|
b7534b342c6a47c95b4bf81645ed8a1db7b455a8
|
[
"MIT"
] | null | null | null |
fp/fp.py
|
LarsKue/free-proxy
|
b7534b342c6a47c95b4bf81645ed8a1db7b455a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import random
import sys
import lxml.html as lh
import requests
class FreeProxy:
def __init__(self, country_id=None, timeout=0.5, rand=False, anonym=False):
self.country_id = [] if country_id is None else country_id
self.timeout = timeout
self.random = rand
self.anonym = anonym
def get_proxy_list(self):
try:
page = requests.get('https://www.sslproxies.org')
doc = lh.fromstring(page.content)
tr_elements = doc.xpath('//*[@id="list"]//tr')
if not self.country_id:
proxies = [f'{tr_elements[i][0].text_content()}:{tr_elements[i][1].text_content()}' for i in
range(1, 101)
if((tr_elements[i][4].text_content()) == 'anonymous' if self.anonym else True)] # check the 5th column for `anonymous` if needed
else:
proxies = [f'{tr_elements[i][0].text_content()}:{tr_elements[i][1].text_content()}' for i in
range(1, 101)
if tr_elements[i][2].text_content() in self.country_id
and ((tr_elements[i][4].text_content()) == 'anonymous' if self.anonym else True)] # check the 5th column for `anonymous` if needed
return proxies
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
def get(self, any_country_on_failure=True):
proxy_list = self.get_proxy_list()
if self.random:
random.shuffle(proxy_list)
proxy_list = proxy_list
working_proxy = None
for i in range(len(proxy_list)):
proxies = {
'http': "http://" + proxy_list[i],
}
try:
if self.check_if_proxy_is_working(proxies):
working_proxy = self.check_if_proxy_is_working(proxies)
return working_proxy
except requests.exceptions.RequestException:
continue
if not working_proxy:
if any_country_on_failure and self.country_id is not None:
self.country_id = None
return self.get()
else:
raise RuntimeError('There are no working proxies at this time.')
def check_if_proxy_is_working(self, proxies):
with requests.get('http://www.google.com', proxies=proxies, timeout=self.timeout, stream=True) as r:
if r.raw.connection.sock:
if r.raw.connection.sock.getpeername()[0] == proxies['http'].split(':')[1][2:]:
return proxies['http']
| 41.107692
| 158
| 0.567365
|
acfd10b0e206b2a0cc8a04289ead44056c029606
| 8,305
|
py
|
Python
|
jishaku/features/python.py
|
mrvillage/jishaku
|
7cad064e1312e4a2fa34f907db3162af8b292154
|
[
"MIT"
] | 2
|
2022-02-01T16:03:21.000Z
|
2022-02-03T06:21:03.000Z
|
jishaku/features/python.py
|
mrvillage/jishaku
|
7cad064e1312e4a2fa34f907db3162af8b292154
|
[
"MIT"
] | null | null | null |
jishaku/features/python.py
|
mrvillage/jishaku
|
7cad064e1312e4a2fa34f907db3162af8b292154
|
[
"MIT"
] | 1
|
2021-12-16T03:32:52.000Z
|
2021-12-16T03:32:52.000Z
|
# -*- coding: utf-8 -*-
"""
jishaku.features.python
~~~~~~~~~~~~~~~~~~~~~~~~
The jishaku Python evaluation/execution commands.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import io
import discord
from discord.ext import commands
from jishaku.codeblocks import codeblock_converter
from jishaku.exception_handling import ReplResponseReactor
from jishaku.features.baseclass import Feature
from jishaku.flags import Flags
from jishaku.functools import AsyncSender
from jishaku.paginators import PaginatorInterface, WrappedPaginator, use_file_check
from jishaku.repl import AsyncCodeExecutor, Scope, all_inspections, disassemble, get_var_dict_from_ctx
class PythonFeature(Feature):
"""
Feature containing the Python-related commands
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._scope = Scope()
self.retain = Flags.RETAIN
self.last_result = None
@property
def scope(self):
"""
Gets a scope for use in REPL.
If retention is on, this is the internal stored scope,
otherwise it is always a new Scope.
"""
if self.retain:
return self._scope
return Scope()
@Feature.Command(parent="jsk", name="retain")
async def jsk_retain(self, ctx: commands.Context, *, toggle: bool = None):
"""
Turn variable retention for REPL on or off.
Provide no argument for current status.
"""
if toggle is None:
if self.retain:
return await ctx.send("Variable retention is set to ON.")
return await ctx.send("Variable retention is set to OFF.")
if toggle:
if self.retain:
return await ctx.send("Variable retention is already set to ON.")
self.retain = True
self._scope = Scope()
return await ctx.send("Variable retention is ON. Future REPL sessions will retain their scope.")
if not self.retain:
return await ctx.send("Variable retention is already set to OFF.")
self.retain = False
return await ctx.send("Variable retention is OFF. Future REPL sessions will dispose their scope when done.")
async def jsk_python_result_handling(self, ctx: commands.Context, result): # pylint: disable=too-many-return-statements
"""
Determines what is done with a result when it comes out of jsk py.
This allows you to override how this is done without having to rewrite the command itself.
What you return is what gets stored in the temporary _ variable.
"""
if isinstance(result, discord.Message):
return await ctx.send(f"<Message <{result.jump_url}>>")
if isinstance(result, discord.File):
return await ctx.send(file=result)
if isinstance(result, discord.Embed):
return await ctx.send(embed=result)
if isinstance(result, PaginatorInterface):
return await result.send_to(ctx)
if not isinstance(result, str):
# repr all non-strings
result = repr(result)
# Eventually the below handling should probably be put somewhere else
if len(result) <= 2000:
if result.strip() == '':
result = "\u200b"
return await ctx.send(result.replace(self.bot.http.token, "[token omitted]"))
if use_file_check(ctx, len(result)): # File "full content" preview limit
# Discord's desktop and web client now supports an interactive file content
# display for files encoded in UTF-8.
# Since this avoids escape issues and is more intuitive than pagination for
# long results, it will now be prioritized over PaginatorInterface if the
# resultant content is below the filesize threshold
return await ctx.send(file=discord.File(
filename="output.py",
fp=io.BytesIO(result.encode('utf-8'))
))
# inconsistency here, results get wrapped in codeblocks when they are too large
# but don't if they're not. probably not that bad, but noting for later review
paginator = WrappedPaginator(prefix='```py', suffix='```', max_size=1985)
paginator.add_line(result)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
return await interface.send_to(ctx)
@Feature.Command(parent="jsk", name="py", aliases=["python"])
async def jsk_python(self, ctx: commands.Context, *, argument: codeblock_converter):
"""
Direct evaluation of Python code.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
executor = AsyncCodeExecutor(argument.content, scope, arg_dict=arg_dict)
async for send, result in AsyncSender(executor):
if result is None:
continue
self.last_result = result
send(await self.jsk_python_result_handling(ctx, result))
finally:
scope.clear_intersection(arg_dict)
@Feature.Command(parent="jsk", name="py_inspect", aliases=["pyi", "python_inspect", "pythoninspect"])
async def jsk_python_inspect(self, ctx: commands.Context, *, argument: codeblock_converter):
"""
Evaluation of Python code with inspect information.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
executor = AsyncCodeExecutor(argument.content, scope, arg_dict=arg_dict)
async for send, result in AsyncSender(executor):
self.last_result = result
header = repr(result).replace("``", "`\u200b`").replace(self.bot.http.token, "[token omitted]")
if len(header) > 485:
header = header[0:482] + "..."
lines = [f"=== {header} ===", ""]
for name, res in all_inspections(result):
lines.append(f"{name:16.16} :: {res}")
text = "\n".join(lines)
if use_file_check(ctx, len(text)): # File "full content" preview limit
send(await ctx.send(file=discord.File(
filename="inspection.prolog",
fp=io.BytesIO(text.encode('utf-8'))
)))
else:
paginator = WrappedPaginator(prefix="```prolog", max_size=1985)
paginator.add_line(text)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
send(await interface.send_to(ctx))
finally:
scope.clear_intersection(arg_dict)
@Feature.Command(parent="jsk", name="dis", aliases=["disassemble"])
async def jsk_disassemble(self, ctx: commands.Context, *, argument: codeblock_converter):
"""
Disassemble Python code into bytecode.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
async with ReplResponseReactor(ctx.message):
text = "\n".join(disassemble(argument.content, arg_dict=arg_dict))
if use_file_check(ctx, len(text)): # File "full content" preview limit
await ctx.send(file=discord.File(
filename="dis.py",
fp=io.BytesIO(text.encode('utf-8'))
))
else:
paginator = WrappedPaginator(prefix='```py', max_size=1985)
paginator.add_line(text)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
| 37.075893
| 124
| 0.59422
|
acfd10d9aa8f4f9b366392555d9dd10b0afdc08c
| 971
|
py
|
Python
|
tectosaur/fmm/surrounding_surf.py
|
jlmaurer/tectosaur
|
7cc5606d814f061395b19754e7a4b6c5e4c236e5
|
[
"MIT"
] | 17
|
2017-06-29T16:48:56.000Z
|
2021-10-03T18:31:41.000Z
|
tectosaur/fmm/surrounding_surf.py
|
jlmaurer/tectosaur
|
7cc5606d814f061395b19754e7a4b6c5e4c236e5
|
[
"MIT"
] | 4
|
2018-05-29T08:21:13.000Z
|
2021-04-01T01:28:50.000Z
|
tectosaur/fmm/surrounding_surf.py
|
jlmaurer/tectosaur
|
7cc5606d814f061395b19754e7a4b6c5e4c236e5
|
[
"MIT"
] | 8
|
2019-06-10T22:19:40.000Z
|
2022-01-12T20:55:37.000Z
|
import numpy as np
def surrounding_surf_circle(order):
pts = np.empty((order, 2))
for i in range(order):
theta = i * 2 * np.pi / order
pts[i,0] = np.cos(theta)
pts[i,1] = np.sin(theta)
return pts
def surrounding_surf_sphere(order):
pts = []
a = 4 * np.pi / order;
d = np.sqrt(a);
M_theta = int(np.round(np.pi / d))
d_theta = np.pi / M_theta;
d_phi = a / d_theta;
for m in range(M_theta):
theta = np.pi * (m + 0.5) / M_theta;
M_phi = int(np.round(2 * np.pi * np.sin(theta) / d_phi))
for n in range(M_phi):
phi = 2 * np.pi * n / M_phi;
x = np.sin(theta) * np.cos(phi);
y = np.sin(theta) * np.sin(phi);
z = np.cos(theta);
pts.append((x, y, z))
return np.array(pts)
def surrounding_surf(order, dim):
if dim == 2:
return surrounding_surf_circle(order)
else:
return surrounding_surf_sphere(order)
| 27.742857
| 64
| 0.53862
|
acfd115e1a022ccd902dfe8620246184c00d4779
| 922
|
py
|
Python
|
HACKERRANK_Challenges/recursive_digitSum.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
HACKERRANK_Challenges/recursive_digitSum.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
HACKERRANK_Challenges/recursive_digitSum.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the superDigit function below.
def superDigit(n, k):
len_num = len(n)
if len_num != 1:
digit_sum = 0
for elem in n: #for every digit (element) in n
#calculate the superdigit that is equal to the super digit of the sum of the digits of x.
digit_sum += int(elem)
str_sum = str(digit_sum*k) #doing the k times concatenation of the string
return superDigit(str_sum,1) #call the recursion in case that there is more than 1 digit left
elif len_num == 1: #otherwise, If x has only 1 digit (so the len is 1), then the super digit is x itself.
return int(n)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = nk[0]
k = int(nk[1])
result = superDigit(n, k)
fptr.write(str(result) + '\n')
fptr.close()
| 30.733333
| 109
| 0.638829
|
acfd11a17d4dc4f3872d26130bd48e9e3a54f027
| 3,702
|
py
|
Python
|
demo/try_ngram_shortest_bak.py
|
Li-Ming-Fan/Zhong
|
0447da1a7c58c5bfcc6295ec9615b3874068c3a2
|
[
"MIT"
] | 1
|
2019-10-11T13:03:50.000Z
|
2019-10-11T13:03:50.000Z
|
demo/try_ngram_shortest_bak.py
|
Li-Ming-Fan/Zhong
|
0447da1a7c58c5bfcc6295ec9615b3874068c3a2
|
[
"MIT"
] | null | null | null |
demo/try_ngram_shortest_bak.py
|
Li-Ming-Fan/Zhong
|
0447da1a7c58c5bfcc6295ec9615b3874068c3a2
|
[
"MIT"
] | null | null | null |
from nltk.util import bigrams, trigrams
from nltk.text import Text
from nltk import FreqDist
from functools import reduce
from bidict import bidict
import numpy as np
corpus = [
'<begin> 小鸟 声音 不大 , 却 句 句 在理 , 全场 都 静静 恭听。 <end>',
'<begin> 他 说 : “ 神 是否 创造 世界 ,即 神 对 世界 的 关系 如何 ,这个 问题 其实 就是 关于 精神 对 感性 一般 或 抽象 对 实在、类 对 个体 的 关系 如何 的 问题 ;这个 问题 是 属于 人类 认识 和 哲学 上 最 重要 又 最 困难 的 问题 之一 , 整个 哲学史 其实 只在 这个 问题 周围 绕 圈子 , 古代 哲学 中 斯多葛派 和 伊壁鸠鲁派 间 、 柏拉图派 和 亚里士多德派 间 、 怀疑派 和 独断派 间 的 争论 , 中古哲学 中 唯名论者 和 实在论者 间 的 争论 , 以及 近代 哲学 中 唯心主义者 和 实在论者 或 经验主义者 间 的 争论 , 归根结底 都是 关于 这个 问题 。 <end>”',
'<begin> 讨论 法 的 本位 问题 , 应该 局限 于 实在 法效 用 的 实现 借助 于 何种 规范 手段 的 范围 内 , 它 主要 应 讨论 " 法 是 什么 " 的 问题 , 而 不是 " 法 应当 是 什么 " 的 问题 。 <end>',
'<begin> 现在 , 你 已是 全班 第一名 了 , 我们 都要 向 你 学习 , 我们 还会 继续 帮助 你 。 <end>',
'<begin> 他们 的 罪恶 行径 也 从 反面 教育 我们 , 革命 的 政治工作 对于 我们 党 的 各项 工作 , 对于 我们 军队 和 人民 来说 , 确实 是 不可以 须臾 离开 的 生命线 。 <end>',
'<begin> 从 研究系 办 的 刊物 来看 , 确实 登载 过 大量 的 讨论 社会主义 的 文章 , 似乎 亦 拥护 社会主义 , 但 实际上 这 只是 假象 。 <end>',
'<begin> 他 那些 舞台 下 、 剧场 外 的 事 的确 是 鲜为人知 的 。 <end>',
# '<begin> 他 说 的 确实 在理 <end>'
]
# 单字切分,暂时没用到
def atomic_split(param1, param2):
if isinstance(param1, list):
return param1 + list(param2.replace(' ', ''))
else:
return atomic_split(atomic_split([], param1), param2)
atomics = reduce(atomic_split, corpus)
#对语料的切分
def word_split(param1, param2):
if isinstance(param1, list):
return param1 + param2.split()
else:
return word_split(word_split([], param1), param2)
words = reduce(word_split, corpus)
#计算词频,索引
fd = FreqDist(words)
index = bidict()
pos = 0
for k, c in fd.items():
index[k] = pos
pos = pos + 1
#=====利用nltk的biggrams函数,建立gram矩阵==========================
grams = list(bigrams(words))
gc = np.zeros((fd.B(), fd.B()), dtype=np.int32)
#统计gram次数
for p1, p2 in grams:
gc[index[p1], index[p2]] += 1
#统计gram概率
gp = np.zeros((fd.B(), fd.B()))
#平滑系数
ratio = 0.9
for row in range(0, fd.B()):
for col in range(0, fd.B()):
gp[row, col] = ratio * (gc[row, col] / fd[index.inv[row]]) + (
1 - ratio) * (fd[index.inv[col]] / len(words))
#======================模型训练完成=================================
#=============求最短路径(非N-最短路径,算法和原方法不同,一个是因为对原算法有疑问,另一个为了快速完成DEMO)==================
def split(s, pos=0):
if len(s) <= pos: return [{'key': '<end>'}]
result = []
for k in fd.keys():
end = pos + len(k)
if len(k) > 1 and end <= len(s) and k == s[pos:end]:
result.append({'key': k, 'childs': split(s, end)})
result.append({'key': s[pos:pos + 1], 'childs': split(s, pos + 1)})
return result
def split_to_tree(s):
return {'key': '<begin>', 'childs': split(input)}
def segment(node):
k = node['key']
childs = node.get('childs')
if not childs:
return
i1 = index.get(k)
for child in childs:
i2 = index.get(child['key'])
child['score'] = gp[i1, i2] if (i1 and
i2) else (1 - ratio) * 1 / len(words)
segment(child)
def shortest(node):
childs = node.get("childs")
score = node.get('score', 0)
key = node.get('key')
if not childs:
return score, [key]
current_score, current_seq = -1, []
for child in childs:
_score, _seq = shortest(child)
if _score > current_score:
current_score, current_seq = _score, _seq
return current_score + score, [key] + current_seq
#================分词函数完成=================
input = '他说的确实在理'
# 将输入转化成tree
root = split_to_tree(input)
# 根据上面的gram概率模型,计算得分
segment(root)
#求最大得分
score, seq = shortest(root)
print(seq, score)
#
# https://www.jianshu.com/p/808cc55a3cd7
#
| 27.021898
| 341
| 0.560508
|
acfd11fa6b17f184f5fd60697252c5ab820ad72e
| 3,177
|
py
|
Python
|
punctuator/play_with_model.py
|
audapolis/punctuator2
|
b91b4fd5d19debb4af3d52b564ad8ffa2a8c75b7
|
[
"MIT"
] | null | null | null |
punctuator/play_with_model.py
|
audapolis/punctuator2
|
b91b4fd5d19debb4af3d52b564ad8ffa2a8c75b7
|
[
"MIT"
] | null | null | null |
punctuator/play_with_model.py
|
audapolis/punctuator2
|
b91b4fd5d19debb4af3d52b564ad8ffa2a8c75b7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import division, print_function
import models
import data
import aesara
import sys
from io import open
import aesara.tensor as T
import numpy as np
# pylint: disable=redefined-outer-name
def to_array(arr, dtype=np.int32):
# minibatch of 1 sequence as column
return np.array([arr], dtype=dtype).T
def convert_punctuation_to_readable(punct_token):
if punct_token == data.SPACE:
return " "
return punct_token[0]
def punctuate(predict, word_vocabulary, punctuation_vocabulary, reverse_punctuation_vocabulary, reverse_word_vocabulary, text, f_out, show_unk):
if not text:
sys.exit("Input text from stdin missing.")
text = [w for w in text.split() if w not in punctuation_vocabulary] + [data.END]
i = 0
while True:
subsequence = text[i:i + data.MAX_SEQUENCE_LEN]
if not subsequence:
break
converted_subsequence = [word_vocabulary.get(w, word_vocabulary[data.UNK]) for w in subsequence]
if show_unk:
subsequence = [reverse_word_vocabulary[w] for w in converted_subsequence]
y = predict(to_array(converted_subsequence))
f_out.write(subsequence[0])
last_eos_idx = 0
punctuations = []
for y_t in y:
p_i = np.argmax(y_t.flatten())
punctuation = reverse_punctuation_vocabulary[p_i]
punctuations.append(punctuation)
if punctuation in data.EOS_TOKENS:
last_eos_idx = len(punctuations) # we intentionally want the index of next element
if subsequence[-1] == data.END:
step = len(subsequence) - 1
elif last_eos_idx != 0:
step = last_eos_idx
else:
step = len(subsequence) - 1
for j in range(step):
f_out.write(" " + punctuations[j] + " " if punctuations[j] != data.SPACE else " ")
if j < step - 1:
f_out.write(subsequence[1 + j])
if subsequence[-1] == data.END:
break
i += step
if __name__ == "__main__":
if len(sys.argv) > 1:
model_file = sys.argv[1]
else:
sys.exit("Model file path argument missing")
show_unk = False
if len(sys.argv) > 2:
show_unk = bool(int(sys.argv[2]))
x = T.imatrix('x')
print("Loading model parameters...")
net, _ = models.load(model_file, 1, x)
print("Building model...")
predict = aesara.function(inputs=[x], outputs=net.y)
word_vocabulary = net.x_vocabulary
punctuation_vocabulary = net.y_vocabulary
reverse_word_vocabulary = {v: k for k, v in net.x_vocabulary.items()}
reverse_punctuation_vocabulary = {v: k for k, v in net.y_vocabulary.items()}
with open(sys.stdout.fileno(), 'w', encoding='utf-8', closefd=False) as f_out:
while True:
try:
text = raw_input("\nTEXT: ").decode('utf-8')
except NameError:
text = input("\nTEXT: ")
punctuate(predict, word_vocabulary, punctuation_vocabulary, reverse_punctuation_vocabulary, reverse_word_vocabulary, text, f_out, show_unk)
f_out.flush()
| 27.626087
| 151
| 0.627321
|
acfd132705216e37d76d0a4ac54318603dc0e054
| 574
|
py
|
Python
|
FlaskRESTFULAPITest_JE/venv/Lib/site-packages/werkzeug/middleware/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 2
|
2022-01-06T11:52:57.000Z
|
2022-01-09T01:53:56.000Z
|
FlaskRESTFULAPITest_JE/venv/Lib/site-packages/werkzeug/middleware/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
FlaskRESTFULAPITest_JE/venv/Lib/site-packages/werkzeug/middleware/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
"""
Middleware
==========
A WSGI middleware is a WSGI application that wraps another application
in order to observe or change its behavior. Werkzeug provides some
middleware for common use cases.
.. toctree::
:maxdepth: 1
proxy_fix
shared_data
dispatcher
http_proxy
lint
profiler
The :doc:`interactive debugger </debug>` is also a middleware that can
be applied manually, although it is typically used automatically with
the :doc:`development server </serving>`.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
| 22.076923
| 71
| 0.696864
|
acfd1391f3524db4a4cf7962476592cfcf69b419
| 5,994
|
py
|
Python
|
nic/datapreparation/data.py
|
StiliyanDr/neural-image-caption
|
26135abc917242388d1e8d4622af0585a8dd966d
|
[
"MIT"
] | null | null | null |
nic/datapreparation/data.py
|
StiliyanDr/neural-image-caption
|
26135abc917242388d1e8d4622af0585a8dd966d
|
[
"MIT"
] | null | null | null |
nic/datapreparation/data.py
|
StiliyanDr/neural-image-caption
|
26135abc917242388d1e8d4622af0585a8dd966d
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import tensorflow as tf
from nic.datapreparation import utils
def load_data(path, type, load_as_features=True):
"""
:param path: a str - the path of the directory storing the
preprocessed data.
:param type: a str - the type of data to load. Possible values:
'train', 'test' and 'val'.
:param load_as_features: a boolean value indicating whether to
load the image features. If `False`, the actual images (preprocessed
for the chosen CNN) are loaded; this should be used only for fine
tuning and testing. Defaults to `True`.
:returns: a tf.data.Dataset which yields 3-tuples whose components
are :
- image tensors (feature vectors if `load_as_features` is set to
`True`)
- integer sequences (vectors) which represent the captions,
without the end meta token at the end
- integer sequences (vectors) which represent the captions,
without the start meta token in front
"""
data_subdir = os.path.join(path, type)
captions = utils.deserialise_from(
os.path.join(data_subdir, "captions.pcl")
)
images_dir = os.path.join(data_subdir,
("features"
if (load_as_features)
else "images"))
image_paths, all_captions = _vectorise(captions, images_dir, path)
image_dataset = tf.data.Dataset.from_tensor_slices(
(image_paths, all_captions)
)
return image_dataset.map(
lambda path, caption:
tf.numpy_function(
_load_image,
[path, caption],
[tf.float32, tf.int32, tf.int32]
),
num_parallel_calls=tf.data.AUTOTUNE
)
def _vectorise(captions, images_dir, path):
tokenizer = load_tokenizer(path)
image_paths = []
source_captions, target_captions = [], []
for image_id, caps in captions.items():
image_path = os.path.join(images_dir, f"{image_id}.pcl")
image_paths.extend(image_path for _ in caps)
caps = tokenizer.texts_to_sequences(caps)
source_captions.extend(c[:-1] for c in caps)
target_captions.extend(c[1:] for c in caps)
source_captions = tf.keras.preprocessing.sequence.pad_sequences(
source_captions,
padding="post"
)
target_captions = tf.keras.preprocessing.sequence.pad_sequences(
target_captions,
padding="post"
)
all_captions = np.concatenate(
[source_captions[:, np.newaxis, :],
target_captions[:, np.newaxis, :]],
axis=1
)
return (np.array(image_paths), all_captions)
def _load_image(path, caption):
return (utils.deserialise_from(path.decode()).numpy(),
caption[0, :],
caption[1, :])
def load_tokenizer(path):
"""
:param path: a str - the path where preprocessed data is stored.
:returns: the tf.Tokenizer extracted from the train data.
"""
tokenizer_path = os.path.join(path, "train", "tokenizer.json")
with open(tokenizer_path) as file:
contents = file.read()
return tf.keras.preprocessing.text.tokenizer_from_json(
contents
)
def load_captions(path, type):
"""
:param path: a str - the path where preprocessed data is stored.
:param type: a str - the type of captions to load. Possible values:
'train', 'test' and 'val'.
:returns: a dictionary mapping image ids (int) to lists of captions
(strs) which are surrounded by the start and end meta tokens.
"""
return utils.deserialise_from(
os.path.join(path, type, "captions.pcl")
)
def load_images(path, type, load_as_features=False):
"""
:param path: a str - the path where preprocessed data is stored.
:param type: a str - the type of images to load. Possible values:
'train', 'test' and 'val'.
:param load_as_features: a boolean value indicating whether to
load image features or just the preprocessed images. Defaults to
`False`.
:returns: a pair of a tf.data.Dataset which yields pairs of:
- image tensors (feature vectors if `load_as_features` is set to
`True`)
- integers - the corresponding image ids
and an int - the number of images in the Dataset.
"""
data_subdir = os.path.join(path, type)
images_dir = os.path.join(data_subdir,
("features"
if (load_as_features)
else "images"))
image_paths = [os.path.join(images_dir, name)
for name in os.listdir(images_dir)]
image_dataset = tf.data.Dataset.from_tensor_slices(
np.array(image_paths)
)
image_dataset = image_dataset.map(
lambda path:
tf.numpy_function(
_do_load_image,
[path],
[tf.float32, tf.int32]
),
num_parallel_calls=tf.data.AUTOTUNE
)
return (image_dataset, len(image_paths))
def _do_load_image(path):
path = path.decode()
image = utils.deserialise_from(path).numpy()
image_id = utils.image_name_to_id(utils.short_name_for(path))
return (image, np.array(image_id, dtype=np.int32))
def vocabulary_size(path):
"""
:param path: a str - the path where preprocessed data is stored.
:returns: an int - the size of the vocabulary obtained from train
data.
"""
tokenizer = load_tokenizer(path)
return tokenizer.num_words
def features_size(path):
"""
:param path: a str - the path where preprocessed data is stored.
:returns: an int - the size of extracted image features.
"""
features_dir = os.path.join(path, "train", "features")
utils.verify_dir_exists(features_dir)
all_features = os.listdir(features_dir)
assert all_features
features_path = os.path.join(features_dir, all_features[0])
features = utils.deserialise_from(features_path)
assert features.ndim == 1
return int(tf.size(features))
| 32.053476
| 72
| 0.642976
|
acfd13d3aa219635794a293b557c76ee55084b3a
| 2,372
|
py
|
Python
|
models/custom_resnet.py
|
a-pujahari/Torch_CV_Utils
|
9b8e7e0186af2244c5589a5ca81ab644f8663303
|
[
"MIT"
] | null | null | null |
models/custom_resnet.py
|
a-pujahari/Torch_CV_Utils
|
9b8e7e0186af2244c5589a5ca81ab644f8663303
|
[
"MIT"
] | null | null | null |
models/custom_resnet.py
|
a-pujahari/Torch_CV_Utils
|
9b8e7e0186af2244c5589a5ca81ab644f8663303
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Custom_ResNet(nn.Module):
def __init__(self):
super(Custom_ResNet, self).__init__()
self.prep_layer = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.layer1 = self.X_seq(64, 128, 3)
self.resblock1 = self.resblock(128, 128, 3)
self.layer2 = self.X_seq(128, 256, 3, 1)
self.layer3 = self.X_seq(256, 512, 3)
self.resblock2 = self.resblock(512, 512, 3)
self.pool = nn.MaxPool2d(4,4)
self.FC = nn.Linear(512, 10, bias = False)
def resblock(self, in_channels, out_channels, kernel_size):
conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
return conv
def X_seq(self, in_channels, out_channels, kernel_size, padding_val = 1):
conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding_val, bias=False),
nn.MaxPool2d(2, 2),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
return conv
def forward(self, x):
x = self.prep_layer(x) ## Input size = 32x32, output size = 32x32
x = self.layer1(x) ## Input size = 32x32, output size = 16x16
res_1 = self.resblock1(x) ## Input size = 16x16, output size = 16x16
x = x + res_1
x = self.layer2(x) ## Input size = 16x16, output size = 8x8
x = self.layer3(x) ## Input size = 8x8, output size = 4x4
res_2 = self.resblock2(x) ## Input size = 4x4, output size = 4x4
x = x + res_2
x = self.pool(x) ## Input size = 4x4, output size = 1x1
x = x.view(x.size(0), -1)
x = self.FC(x)
x = x.view(-1, 10)
return F.softmax(x, dim=-1)
| 35.402985
| 132
| 0.561551
|
acfd1543d2e93e22b358207172f4f74c73ff2568
| 8,503
|
py
|
Python
|
src/osducli/config.py
|
equinor/osdu-cli
|
579922556925ea7ad759a6230498378cf724b445
|
[
"MIT"
] | 3
|
2021-08-19T05:59:39.000Z
|
2021-11-10T08:02:58.000Z
|
src/osducli/config.py
|
equinor/osdu-cli
|
579922556925ea7ad759a6230498378cf724b445
|
[
"MIT"
] | 2
|
2021-09-13T11:10:15.000Z
|
2021-11-25T13:21:54.000Z
|
src/osducli/config.py
|
equinor/osdu-cli
|
579922556925ea7ad759a6230498378cf724b445
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) Equinor ASA. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Read and modify configuration settings related to the CLI"""
import configparser
import os
import stat
from osducli.util.file import ensure_directory_exists
_UNSET = object()
# Default names
CLI_NAME = "osducli"
CLI_CONFIG_DIR = os.path.expanduser(os.path.join("~", ".{0}".format(CLI_NAME)))
CLI_ENV_VAR_PREFIX = CLI_NAME
CONFIG_SERVER = "server"
CONFIG_ENTITLEMENTS_URL = "entitlements_url"
CONFIG_FILE_URL = "file_url"
CONFIG_LEGAL_URL = "legal_url"
CONFIG_SCHEMA_URL = "schema_url"
CONFIG_SEARCH_URL = "search_url"
CONFIG_STORAGE_URL = "storage_url"
CONFIG_UNIT_URL = "unit_url"
CONFIG_WORKFLOW_URL = "workflow_url"
CONFIG_DATA_PARTITION_ID = "data_partition_id"
CONFIG_LEGAL_TAG = "legal_tag"
CONFIG_ACL_VIEWER = "acl_viewer"
CONFIG_ACL_OWNER = "acl_owner"
CONFIG_AUTHENTICATION_MODE = "authentication_mode"
CONFIG_AUTHENTICATION_AUTHORITY = "authority"
CONFIG_AUTHENTICATION_SCOPES = "scopes"
CONFIG_TOKEN_ENDPOINT = "token_endpoint"
CONFIG_REFRESH_TOKEN = "refresh_token"
CONFIG_CLIENT_ID = "client_id"
CONFIG_CLIENT_SECRET = "client_secret"
# TO DO: Add the below back in
# pylint: disable=C0115, C0116
class CLIConfig:
_BOOLEAN_STATES = {
"1": True,
"yes": True,
"true": True,
"on": True,
"0": False,
"no": False,
"false": False,
"off": False,
}
_DEFAULT_CONFIG_FILE_NAME = "config"
_CONFIG_DEFAULTS_SECTION = "defaults"
def __init__(
self,
config_dir,
config_env_var_prefix,
config_file_name=None,
):
"""Manages configuration options available in the CLI
:param config_dir: The directory to store config files
:type config_dir: str
:param config_env_var_prefix: The prefix for config environment variables
:type config_env_var_prefix: str
:param config_file_name: The name given to the config file to be created
:type config_file_name: str
"""
# ensure_dir(config_dir)
env_var_prefix = "{}_".format(config_env_var_prefix.upper())
default_config_dir = os.path.expanduser(config_dir)
self.config_dir = os.environ.get("{}CONFIG_DIR".format(env_var_prefix), default_config_dir)
self.config_file_name = config_file_name or CLIConfig._DEFAULT_CONFIG_FILE_NAME
self.config_path = os.path.join(self.config_dir, self.config_file_name)
self._env_var_format = "{}{}".format(env_var_prefix, "{section}_{option}")
self.defaults_section_name = CLIConfig._CONFIG_DEFAULTS_SECTION
self.config_parser = configparser.ConfigParser()
if os.path.exists(self.config_path):
self.config_parser.read(self.config_path)
def env_var_name(self, section, option):
return self._env_var_format.format(section=section.upper(), option=option.upper())
def has_option(self, section, option):
if self.env_var_name(section, option) in os.environ:
return True
return self.config_parser.has_option(section, option) if self.config_parser else False
def get(self, section, option, fallback=_UNSET):
env = self.env_var_name(section, option)
if env in os.environ:
return os.environ[env]
last_ex = None
try:
if self.config_parser:
return self.config_parser.get(section, option)
raise configparser.NoOptionError(option, section)
except (configparser.NoSectionError, configparser.NoOptionError) as ex:
last_ex = ex
if fallback is _UNSET:
raise last_ex # pylint:disable=raising-bad-type
return fallback
def sections(self):
return self.config_parser.sections() if self.config_parser else []
def items(self, section):
import re
# Only allow valid env vars, in all caps: CLI_SECTION_TEST_OPTION, CLI_SECTION__TEST_OPTION
pattern = self.env_var_name(section, "([0-9A-Z_]+)")
env_entries = []
for k in os.environ:
# Must be a full match, otherwise CLI_SECTION_T part in CLI_MYSECTION_Test_Option will match
matched = re.fullmatch(pattern, k)
if matched:
# (name, value, ENV_VAR_NAME)
item = (matched.group(1).lower(), os.environ[k], k)
env_entries.append(item)
# Prepare result with env entries first
result = {c[0]: c for c in env_entries}
# Add entries from config file if they do not exist yet
try:
entries = self.config_parser.items(section) if self.config_parser else []
for name, value in entries:
if name not in result:
result[name] = (name, value, self.config_path)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return [
{"name": name, "value": value, "source": source}
for name, value, source in result.values()
]
def getint(self, section, option, fallback=_UNSET):
return int(self.get(section, option, fallback))
def getfloat(self, section, option, fallback=_UNSET):
return float(self.get(section, option, fallback))
def getboolean(self, section, option, fallback=_UNSET):
val = str(self.get(section, option, fallback))
if val.lower() not in CLIConfig._BOOLEAN_STATES:
raise ValueError("Not a boolean: {}".format(val))
return CLIConfig._BOOLEAN_STATES[val.lower()]
def set(self, config):
ensure_directory_exists(self.config_dir)
with open(self.config_path, "w") as configfile:
# if self.config_comment:
# configfile.write(self.config_comment + '\n')
config.write(configfile)
os.chmod(self.config_path, stat.S_IRUSR | stat.S_IWUSR)
self.config_parser.read(self.config_path)
def set_value(self, section, option, value):
config = configparser.ConfigParser()
config.read(self.config_path)
try:
config.add_section(section)
except configparser.DuplicateSectionError:
pass
config.set(section, option, value)
self.set(config)
# def get_config_value(name, section=CLI_NAME, fallback=_UNSET):
# """Gets a config by name.
# In the case where the config name is not found, will use fallback value."""
# cli_config = CLIConfig(CLI_CONFIG_DIR, CLI_ENV_VAR_PREFIX)
# return cli_config.get(section, name, fallback)
# def get_config_bool(name, section=CLI_NAME, fallback=_UNSET):
# """Checks if a config value is set to a valid bool value."""
# cli_config = CLIConfig(CLI_CONFIG_DIR, CLI_ENV_VAR_PREFIX)
# return cli_config.getboolean(section, name, fallback)
# def get_config_int(name, section=CLI_NAME, fallback=_UNSET):
# """Checks if a config value is set to a valid int value."""
# cli_config = CLIConfig(CLI_CONFIG_DIR, CLI_ENV_VAR_PREFIX)
# return cli_config.getint(section, name, fallback)
# def set_config_value(name, value, section=CLI_NAME):
# """Set a config by name to a value."""
# cli_config = CLIConfig(CLI_CONFIG_DIR, CLI_ENV_VAR_PREFIX)
# cli_config.set_value(section, name, value)
def get_default_from_config(config, section, option, fallback=1):
"""Get the default value from configuration, replacing with fallback if not found"""
try:
return config.get(section, option)
except (IndexError, configparser.NoSectionError, configparser.NoOptionError):
return fallback
def get_default_choice_index_from_config(config, section, option, choice_list, fallback=1):
"""Get index + 1 of the current choice value from cong, replacing with fallback if not found"""
try:
config_val = config.get(section, option)
return [i for i, x in enumerate(choice_list) if "name" in x and x["name"] == config_val][
0
] + 1
except (IndexError, configparser.NoSectionError, configparser.NoOptionError):
return fallback
# def client_endpoint():
# """Cluster HTTP gateway endpoint address and port, represented as a URL."""
# return get_config_value("endpoint", None)
| 35.877637
| 104
| 0.663766
|
acfd15c4d3365e3115fd72db74f203e92c3b4ade
| 463
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_enhance_health_b.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_enhance_health_b.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_enhance_health_b.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/chemistry/shared_medpack_enhance_health_b.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.235294
| 89
| 0.736501
|
acfd160f371f10466a3d2d1245a9ce44fdb2a65c
| 11,963
|
py
|
Python
|
api/barriers/serializers/public_barriers.py
|
uktrade/market-access-api
|
850a59880f8f62263784bcd9c6b3362e447dbc7a
|
[
"MIT"
] | null | null | null |
api/barriers/serializers/public_barriers.py
|
uktrade/market-access-api
|
850a59880f8f62263784bcd9c6b3362e447dbc7a
|
[
"MIT"
] | 51
|
2018-05-31T12:16:31.000Z
|
2022-03-08T09:36:48.000Z
|
api/barriers/serializers/public_barriers.py
|
uktrade/market-access-api
|
850a59880f8f62263784bcd9c6b3362e447dbc7a
|
[
"MIT"
] | 2
|
2019-12-24T09:47:42.000Z
|
2021-02-09T09:36:51.000Z
|
from hashid_field.rest import HashidSerializerCharField
from rest_framework import serializers
from api.barriers.fields import (
DisplayChoiceField,
NoneToBlankCharField,
ReadOnlyAllSectorsField,
ReadOnlyCategoriesField,
ReadOnlyCountryField,
ReadOnlySectorsField,
ReadOnlyStatusField,
ReadOnlyTradingBlocField,
)
from api.barriers.helpers import get_published_public_barriers
from api.barriers.models import PublicBarrier, PublicBarrierLightTouchReviews
from api.barriers.serializers.mixins import LocationFieldMixin
from api.core.serializers.mixins import AllowNoneAtToRepresentationMixin
from api.interactions.models import PublicBarrierNote
from api.interactions.serializers import PublicBarrierNoteSerializer
from api.metadata.constants import PublicBarrierStatus
from api.metadata.fields import TradingBlocField
from api.metadata.serializers import OrganisationSerializer
PUBLIC_ID = "barriers.PublicBarrier.id"
class PublicBarrierLightTouchReviewsSerializer(serializers.ModelSerializer):
class Meta:
model = PublicBarrierLightTouchReviews
fields = (
"content_team_approval",
"has_content_changed_since_approval",
"hm_trade_commissioner_approval",
"hm_trade_commissioner_approval_enabled",
"government_organisation_approvals",
"missing_government_organisation_approvals",
"enabled",
)
read_only_fields = ("missing_government_organisation_approvals", "enabled")
class NestedPublicBarrierSerializer(serializers.ModelSerializer):
"""
Simple serializer for use within BarrierDetailSerializer.
"""
id = HashidSerializerCharField(source_field=PUBLIC_ID, read_only=True)
title = NoneToBlankCharField()
summary = NoneToBlankCharField()
unpublished_changes = serializers.SerializerMethodField()
public_view_status_display = DisplayChoiceField(
source="public_view_status", choices=PublicBarrierStatus.choices
)
class Meta:
model = PublicBarrier
fields = (
"id",
"public_view_status",
"public_view_status_display",
"title",
"summary",
"unpublished_changes",
"last_published_on",
)
def get_unpublished_changes(self, obj):
return obj.unpublished_changes
class PublicBarrierSerializer(
AllowNoneAtToRepresentationMixin, serializers.ModelSerializer
):
"""
Generic serializer for barrier public data.
"""
id = HashidSerializerCharField(source_field=PUBLIC_ID, read_only=True)
title = NoneToBlankCharField()
summary = NoneToBlankCharField()
internal_title_changed = serializers.SerializerMethodField()
internal_summary_changed = serializers.SerializerMethodField()
internal_government_organisations = serializers.SerializerMethodField()
status = ReadOnlyStatusField()
internal_status = ReadOnlyStatusField()
country = ReadOnlyCountryField()
internal_country = ReadOnlyCountryField()
trading_bloc = TradingBlocField()
internal_trading_bloc = TradingBlocField()
sectors = ReadOnlySectorsField()
internal_sectors = ReadOnlySectorsField()
all_sectors = ReadOnlyAllSectorsField()
internal_all_sectors = ReadOnlyAllSectorsField()
categories = ReadOnlyCategoriesField()
internal_categories = ReadOnlyCategoriesField()
latest_published_version = serializers.SerializerMethodField()
unpublished_changes = serializers.SerializerMethodField()
ready_to_be_published = serializers.SerializerMethodField()
internal_code = serializers.SerializerMethodField()
internal_id = serializers.SerializerMethodField()
latest_note = serializers.SerializerMethodField()
reported_on = serializers.DateTimeField(source="internal_created_on")
light_touch_reviews = PublicBarrierLightTouchReviewsSerializer()
class Meta:
model = PublicBarrier
fields = (
"id",
"internal_code",
"internal_id",
"title",
"title_updated_on",
"internal_title_changed",
"internal_title_at_update",
"summary",
"summary_updated_on",
"internal_summary_changed",
"internal_summary_at_update",
"status",
"internal_status",
"internal_status_changed",
"status_date",
"internal_status_date",
"internal_status_date_changed",
"is_resolved",
"internal_is_resolved",
"internal_is_resolved_changed",
"country",
"internal_country",
"internal_country_changed",
"trading_bloc",
"internal_trading_bloc",
"internal_trading_bloc_changed",
"location",
"internal_location",
"internal_location_changed",
"sectors",
"internal_sectors",
"internal_sectors_changed",
"all_sectors",
"internal_all_sectors",
"internal_all_sectors_changed",
"categories",
"internal_categories",
"internal_categories_changed",
"public_view_status",
"first_published_on",
"last_published_on",
"unpublished_on",
"latest_published_version",
"unpublished_changes",
"ready_to_be_published",
"internal_government_organisations",
"latest_note",
"reported_on",
"light_touch_reviews",
)
read_only_fields = (
"id",
"internal_code",
"internal_id",
"title_updated_on",
"internal_title_changed",
"internal_title_at_update",
"summary_updated_on",
"internal_summary_changed",
"internal_summary_at_update",
"status",
"internal_status",
"internal_status_changed",
"status_date",
"internal_status_date",
"internal_status_date_changed",
"is_resolved",
"internal_is_resolved",
"internal_is_resolved_changed",
"country",
"internal_country",
"internal_country_changed",
"trading_bloc",
"internal_trading_bloc",
"internal_trading_bloc_changed",
"location",
"internal_location",
"internal_location_changed",
"sectors",
"internal_sectors",
"internal_sectors_changed",
"all_sectors",
"internal_all_sectors",
"internal_all_sectors_changed",
"categories",
"internal_categories",
"internal_categories_changed",
"public_view_status",
"first_published_on",
"last_published_on",
"unpublished_on",
"latest_published_version",
"unpublished_changes",
"ready_to_be_published",
"internal_government_organisations",
"latest_note",
"reported_on",
)
def get_internal_title_changed(self, obj):
return obj.internal_title_changed
def get_internal_summary_changed(self, obj):
return obj.internal_summary_changed
def get_latest_published_version(self, obj):
return PublishedVersionSerializer(obj.latest_published_version).data
def get_unpublished_changes(self, obj):
return obj.unpublished_changes
def get_ready_to_be_published(self, obj):
return obj.ready_to_be_published
def get_internal_code(self, obj):
return obj.barrier.code
def get_internal_id(self, obj):
return obj.barrier_id
def get_internal_government_organisations(self, obj):
return OrganisationSerializer(obj.barrier.organisations, many=True).data
def get_latest_note(self, obj):
try:
# We need to perform Python sorting instead of SQL
# as otherwise the prefetch would not get used
note = sorted(
list(obj.notes.all()), key=lambda note: note.created_on, reverse=True
)[0]
return PublicBarrierNoteSerializer(note).data
except IndexError:
return None
except PublicBarrierNote.DoesNotExist:
return None
class PublishedVersionSerializer(
LocationFieldMixin, AllowNoneAtToRepresentationMixin, serializers.ModelSerializer
):
"""
Serializer to be used with DMAS FE app
"""
id = serializers.CharField()
title = serializers.CharField()
summary = serializers.CharField()
is_resolved = serializers.BooleanField()
country = ReadOnlyCountryField()
location = serializers.CharField()
sectors = ReadOnlySectorsField()
all_sectors = ReadOnlyAllSectorsField()
categories = ReadOnlyCategoriesField()
class Meta:
model = PublicBarrier
fields = (
"id",
"title",
"summary",
"is_resolved",
"status_date",
"country",
"location",
"sectors",
"all_sectors",
"categories",
)
class PublicPublishedVersionSerializer(
LocationFieldMixin, AllowNoneAtToRepresentationMixin, serializers.ModelSerializer
):
"""
Serializer to be used with gov.uk
"""
id = HashidSerializerCharField(source_field=PUBLIC_ID, read_only=True)
title = serializers.CharField()
summary = serializers.CharField()
country = ReadOnlyCountryField(to_repr_keys=("name", "trading_bloc"))
trading_bloc = ReadOnlyTradingBlocField()
sectors = serializers.SerializerMethodField()
categories = ReadOnlyCategoriesField(to_repr_keys=("name",))
reported_on = serializers.DateTimeField(source="internal_created_on")
class Meta:
model = PublicBarrier
fields = (
"id",
"title",
"summary",
"is_resolved",
"status_date",
"country",
# "caused_by_country_trading_bloc",
"caused_by_trading_bloc",
"trading_bloc",
"location",
"sectors",
"categories",
"last_published_on",
"reported_on",
)
def get_sectors(self, obj):
if obj.all_sectors:
return [{"name": "All sectors"}]
else:
return ReadOnlySectorsField(to_repr_keys=("name",)).to_representation(
obj.sectors
)
def public_barriers_to_json(public_barriers=None):
"""
Helper to serialize latest published version of published barriers.
Public Barriers in the flat file should look similar.
{
"barriers": [
{
"id": "kjdfhkzx",
"title": "Belgian chocolate...",
"summary": "Lorem ipsum",
"status": {"name": "Open: in progress",}
"country": {"name": "Belgium",}
"caused_by_trading_bloc": false,
"trading_bloc": null,
"location": "Belgium"
"sectors: [
{"name": "Automotive"}
],
"categories": [
{"name": "Goods and Services"}
],
"last_published_on: "date",
"reported_on": "date"
}
]
}
If all sectors is true, use the sectors key to represent that as follows:
"sectors: [{"name": "All sectors"}],
"""
if public_barriers is None:
public_barriers = (
pb.latest_published_version for pb in get_published_public_barriers()
)
serializer = PublicPublishedVersionSerializer(public_barriers, many=True)
return serializer.data
| 33.509804
| 85
| 0.630862
|
acfd16d63737a1b31d63f9c0feb4f2d0ac479798
| 490
|
py
|
Python
|
doctor_app/migrations/0006_auto_20200811_0039.py
|
JuliasBright/SendMoney
|
d13e2df81bf75a9154abfc57d897a416b4950e80
|
[
"CC0-1.0"
] | 1
|
2021-01-29T16:57:42.000Z
|
2021-01-29T16:57:42.000Z
|
doctor_app/migrations/0006_auto_20200811_0039.py
|
JuliasBright/SendMoney
|
d13e2df81bf75a9154abfc57d897a416b4950e80
|
[
"CC0-1.0"
] | null | null | null |
doctor_app/migrations/0006_auto_20200811_0039.py
|
JuliasBright/SendMoney
|
d13e2df81bf75a9154abfc57d897a416b4950e80
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-08-10 19:09
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doctor_app', '0005_auto_20200811_0037'),
]
operations = [
migrations.AlterField(
model_name='doctor_register',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 8, 11, 0, 39, 48, 26996)),
),
]
| 24.5
| 110
| 0.604082
|
acfd16fb74922d0ae51e30c58a31661466577f50
| 67,276
|
py
|
Python
|
core/domain/skill_services_test.py
|
lheureuxe13/oppia
|
7110e3e5d5a53527c31d7b33e14d25e8d5b981f9
|
[
"Apache-2.0"
] | 4
|
2021-09-16T16:46:53.000Z
|
2022-02-06T13:00:14.000Z
|
core/domain/skill_services_test.py
|
lheureuxe13/oppia
|
7110e3e5d5a53527c31d7b33e14d25e8d5b981f9
|
[
"Apache-2.0"
] | 80
|
2020-10-31T09:14:46.000Z
|
2021-01-12T23:38:15.000Z
|
core/domain/skill_services_test.py
|
lheureuxe13/oppia
|
7110e3e5d5a53527c31d7b33e14d25e8d5b981f9
|
[
"Apache-2.0"
] | 1
|
2020-10-02T13:28:26.000Z
|
2020-10-02T13:28:26.000Z
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the methods defined in skill services."""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from core import feconf
from core.constants import constants
from core.domain import config_services
from core.domain import question_domain
from core.domain import skill_domain
from core.domain import skill_fetchers
from core.domain import skill_services
from core.domain import state_domain
from core.domain import suggestion_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(skill_models, suggestion_models) = models.Registry.import_models(
[models.NAMES.skill, models.NAMES.suggestion])
class SkillServicesUnitTests(test_utils.GenericTestBase):
"""Test the skill services module."""
SKILL_ID = None
USER_ID = 'user'
MISCONCEPTION_ID_1 = 1
MISCONCEPTION_ID_2 = 2
def setUp(self):
super(SkillServicesUnitTests, self).setUp()
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
misconceptions = [skill_domain.Misconception(
self.MISCONCEPTION_ID_1, 'name', '<p>description</p>',
'<p>default_feedback</p>', True)]
self.num_queries_to_fetch = 10
self.SKILL_ID = skill_services.get_new_skill_id()
self.SKILL_ID2 = skill_services.get_new_skill_id()
self.SKILL_ID3 = skill_services.get_new_skill_id()
self.signup('a@example.com', 'A')
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.signup('admin2@example.com', 'adm2')
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_admin = (
self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL))
self.user_id_admin_2 = self.get_user_id_from_email('admin2@example.com')
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME, 'adm2'])
self.user_a = user_services.get_user_actions_info(self.user_id_a)
self.user_admin = user_services.get_user_actions_info(
self.user_id_admin)
self.user_admin_2 = user_services.get_user_actions_info(
self.user_id_admin_2)
self.skill = self.save_new_skill(
self.SKILL_ID, self.USER_ID, description='Description',
misconceptions=misconceptions,
skill_contents=skill_contents,
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
def test_apply_change_list_with_invalid_property_name(self):
class MockSkillChange:
def __init__(self, cmd, property_name):
self.cmd = cmd
self.property_name = property_name
invalid_skill_change_list = [MockSkillChange(
skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'invalid_property_name')]
with self.assertRaisesRegexp(Exception, 'Invalid change dict.'):
skill_services.apply_change_list(
self.SKILL_ID, invalid_skill_change_list, self.user_id_a)
def test_compute_summary(self):
skill_summary = skill_services.compute_summary_of_skill(self.skill)
self.assertEqual(skill_summary.id, self.SKILL_ID)
self.assertEqual(skill_summary.description, 'Description')
self.assertEqual(skill_summary.misconception_count, 1)
self.assertEqual(skill_summary.worked_examples_count, 1)
def test_get_image_filenames_from_skill(self):
explanation_html = (
'Explanation with image: <oppia-noninteractive-image '
'filepath-with-value=""img.svg"" caption-with-value='
'"""" alt-with-value=""Image"">'
'</oppia-noninteractive-image>'
)
example_explanation_html = (
'Explanation with image: <oppia-noninteractive-image '
'filepath-with-value=""img2.svg"" caption-with-value='
'"""" alt-with-value=""Image"">'
'</oppia-noninteractive-image>'
)
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', example_explanation_html)
)
self.skill.skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml('1', explanation_html), [example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
filenames = skill_services.get_image_filenames_from_skill(self.skill)
self.assertItemsEqual(filenames, ['img.svg', 'img2.svg'])
def test_get_new_skill_id(self):
new_skill_id = skill_services.get_new_skill_id()
self.assertEqual(len(new_skill_id), 12)
self.assertEqual(skill_models.SkillModel.get_by_id(new_skill_id), None)
def test_get_descriptions_of_skills(self):
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
self.save_new_skill(
'skill_id_1', self.user_id_admin, description='Description 1',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
self.save_new_skill(
'skill_id_2', self.user_id_admin, description='Description 2',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
skill_services.delete_skill(self.user_id_admin, 'skill_id_2')
skill_descriptions, deleted_skill_ids = (
skill_services.get_descriptions_of_skills(
['skill_id_1', 'skill_id_2']))
self.assertEqual(deleted_skill_ids, ['skill_id_2'])
self.assertEqual(
skill_descriptions, {
'skill_id_1': 'Description 1',
'skill_id_2': None
}
)
def test_get_rubrics_of_linked_skills(self):
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
self.save_new_skill(
'skill_id_1', self.user_id_admin, description='Description 1',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
self.save_new_skill(
'skill_id_2', self.user_id_admin, description='Description 2',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
skill_services.delete_skill(self.user_id_admin, 'skill_id_2')
skill_rubrics, deleted_skill_ids = (
skill_services.get_rubrics_of_skills(
['skill_id_1', 'skill_id_2']))
self.assertEqual(deleted_skill_ids, ['skill_id_2'])
self.assertEqual(
skill_rubrics, {
'skill_id_1': [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']
).to_dict(),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']
).to_dict(),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']
).to_dict()],
'skill_id_2': None
}
)
def test_get_skill_from_model(self):
skill_model = skill_models.SkillModel.get(self.SKILL_ID)
skill = skill_fetchers.get_skill_from_model(skill_model)
self.assertEqual(skill.to_dict(), self.skill.to_dict())
def test_get_skill_summary_from_model(self):
skill_summary_model = skill_models.SkillSummaryModel.get(self.SKILL_ID)
skill_summary = skill_services.get_skill_summary_from_model(
skill_summary_model)
self.assertEqual(skill_summary.id, self.SKILL_ID)
self.assertEqual(skill_summary.description, 'Description')
self.assertEqual(skill_summary.misconception_count, 1)
self.assertEqual(skill_summary.worked_examples_count, 1)
def test_get_all_skill_summaries(self):
skill_summaries = skill_services.get_all_skill_summaries()
self.assertEqual(len(skill_summaries), 1)
self.assertEqual(skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(skill_summaries[0].description, 'Description')
self.assertEqual(skill_summaries[0].misconception_count, 1)
self.assertEqual(skill_summaries[0].worked_examples_count, 1)
def test_commit_log_entry(self):
skill_commit_log_entry = (
skill_models.SkillCommitLogEntryModel.get_commit(self.SKILL_ID, 1)
)
self.assertEqual(skill_commit_log_entry.commit_type, 'create')
self.assertEqual(skill_commit_log_entry.skill_id, self.SKILL_ID)
self.assertEqual(skill_commit_log_entry.user_id, self.USER_ID)
def test_get_skill_summary_by_id(self):
skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID)
self.assertEqual(skill_summary.id, self.SKILL_ID)
self.assertEqual(skill_summary.description, 'Description')
self.assertEqual(skill_summary.misconception_count, 1)
def test_get_filtered_skill_summaries(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None, None, None))
self.assertEqual(next_cursor, None)
self.assertFalse(more)
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
1, None, 'english', None, None, None))
self.assertEqual(len(augmented_skill_summaries), 0)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
'Oldest Created', None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID2)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
'Most Recently Updated', None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
'Least Recently Updated', None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID2)
def test_cursor_behaves_correctly_when_fetching_skills_in_batches(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=[])
self.save_new_skill(
self.SKILL_ID3, self.USER_ID, description='Description3',
prerequisite_skill_ids=[])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
1, None, None, None, None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertIsInstance(next_cursor, str)
self.assertTrue(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None, None, next_cursor))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertIsNone(next_cursor)
self.assertFalse(more)
def test_filter_skills_by_status_all(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'All', None, None,
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_status_assigned(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'Assigned', None, None, None, None))
self.assertEqual(len(augmented_skill_summaries), 0)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='topic1',
abbreviated_name='topic-one', url_fragment='topic-one',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID2],
subtopics=[], next_subtopic_id=1)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'Assigned', None,
None, None, None))
self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1'])
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_status_unassigned(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'Unassigned', None, None,
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_classroom_name(self):
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, 'english', None, None, None))
self.assertEqual(len(augmented_skill_summaries), 0)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='topic1',
abbreviated_name='topic-two', url_fragment='topic-two',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID2],
subtopics=[], next_subtopic_id=1)
config_services.set_property(
self.user_id_admin, 'classroom_pages_data', [{
'url_fragment': 'math',
'name': 'math',
'topic_ids': [topic_id],
'topic_list_intro': 'Topics Covered',
'course_details': 'Course Details'
}]
)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, 'math', None,
None, None))
self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1'])
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(
augmented_skill_summaries[0].classroom_names, ['math'])
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_keywords(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Alpha',
misconceptions=None,
skill_contents=None,
prerequisite_skill_ids=[])
self.save_new_skill(
self.SKILL_ID3, self.USER_ID, description='Beta',
misconceptions=None,
skill_contents=None,
prerequisite_skill_ids=[])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None, None, None))
self.assertEqual(len(augmented_skill_summaries), 3)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
1, None, None, ['Non_existent'],
'Least Recently Updated', None))
self.assertEqual(len(augmented_skill_summaries), 0)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, [], None, None))
self.assertEqual(len(augmented_skill_summaries), 3)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['descr'], None, None))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['alph'], None, None))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['bet'], None, None))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID3)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['alp', 'bet'],
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_get_all_topic_assignments_for_skill(self):
topic_id = topic_fetchers.get_new_topic_id()
topic_id_1 = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-three', url_fragment='topic-three',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID],
subtopics=[], next_subtopic_id=1)
subtopic = topic_domain.Subtopic.from_dict({
'id': 1,
'title': 'subtopic1',
'skill_ids': [self.SKILL_ID],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': None,
'url_fragment': 'subtopic-one'
})
self.save_new_topic(
topic_id_1, self.USER_ID, name='Topic2',
abbreviated_name='topic-four', url_fragment='topic-four',
description='Description2', canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[],
subtopics=[subtopic], next_subtopic_id=2)
topic_assignments = (
skill_services.get_all_topic_assignments_for_skill(self.SKILL_ID))
topic_assignments = sorted(
topic_assignments, key=lambda i: i.topic_name)
self.assertEqual(len(topic_assignments), 2)
self.assertEqual(topic_assignments[0].topic_name, 'Topic1')
self.assertEqual(topic_assignments[0].topic_id, topic_id)
self.assertEqual(topic_assignments[0].topic_version, 1)
self.assertIsNone(topic_assignments[0].subtopic_id)
self.assertEqual(topic_assignments[1].topic_name, 'Topic2')
self.assertEqual(topic_assignments[1].topic_id, topic_id_1)
self.assertEqual(topic_assignments[1].topic_version, 1)
self.assertEqual(topic_assignments[1].subtopic_id, 1)
def test_remove_skill_from_all_topics(self):
topic_id = topic_fetchers.get_new_topic_id()
topic_id_1 = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-five', url_fragment='topic-five',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID],
subtopics=[], next_subtopic_id=1)
subtopic = topic_domain.Subtopic.from_dict({
'id': 1,
'title': 'subtopic1',
'skill_ids': [self.SKILL_ID],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': None,
'url_fragment': 'subtopic-one'
})
self.save_new_topic(
topic_id_1, self.USER_ID, name='Topic2',
abbreviated_name='topic-six', url_fragment='topic-six',
description='Description2', canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[],
subtopics=[subtopic], next_subtopic_id=2)
skill_services.remove_skill_from_all_topics(self.USER_ID, self.SKILL_ID)
topic_assignments_dict = (
skill_services.get_all_topic_assignments_for_skill(self.SKILL_ID))
self.assertEqual(len(topic_assignments_dict), 0)
def test_successfully_replace_skill_id_in_all_topics(self):
topic_id = topic_fetchers.get_new_topic_id()
topic_id_1 = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-five', url_fragment='topic-five',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID],
subtopics=[], next_subtopic_id=1)
subtopic = topic_domain.Subtopic.from_dict({
'id': 1,
'title': 'subtopic1',
'skill_ids': [self.SKILL_ID],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': None,
'url_fragment': 'subtopic-one'
})
self.save_new_topic(
topic_id_1, self.USER_ID, name='Topic2',
abbreviated_name='topic-six', url_fragment='topic-six',
description='Description2', canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[],
subtopics=[subtopic], next_subtopic_id=2)
topic_assignments_dict = (
skill_services.get_all_topic_assignments_for_skill('new_skill_id'))
self.assertEqual(len(topic_assignments_dict), 0)
skill_services.replace_skill_id_in_all_topics(
self.USER_ID, self.SKILL_ID, 'new_skill_id')
topic_assignments_dict = (
skill_services.get_all_topic_assignments_for_skill('new_skill_id'))
self.assertEqual(len(topic_assignments_dict), 2)
def test_failure_replace_skill_id_in_all_topics(self):
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-five', url_fragment='topic-five',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID, 'new_skill_id'],
subtopics=[], next_subtopic_id=1)
error_message = (
'Found topic \'Topic1\' contains the two skills to be merged. '
'Please unassign one of these skills from topic '
'and retry this operation.')
with self.assertRaisesRegexp(Exception, error_message):
skill_services.replace_skill_id_in_all_topics(
self.USER_ID, self.SKILL_ID, 'new_skill_id')
def test_update_skill(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_ADD_SKILL_MISCONCEPTION,
'new_misconception_dict': {
'id': self.skill.next_misconception_id,
'name': 'test name',
'notes': '<p>test notes</p>',
'feedback': '<p>test feedback</p>',
'must_be_addressed': True
}
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME),
'misconception_id': self.skill.next_misconception_id,
'old_value': 'test name',
'new_value': 'Name'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED
),
'misconception_id': self.skill.next_misconception_id,
'old_value': True,
'new_value': False
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_ADD_PREREQUISITE_SKILL,
'skill_id': 'skill_id_3'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_PREREQUISITE_SKILL,
'skill_id': 'skill_id_1'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_RUBRICS,
'difficulty': constants.SKILL_DIFFICULTIES[0],
'explanations': [
'<p>New Explanation 1</p>', '<p>New Explanation 2</p>']
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_RUBRICS,
'difficulty': constants.SKILL_DIFFICULTIES[1],
'explanations': ['<p>Explanation</p>']
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception name.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID)
self.assertEqual(skill_summary.misconception_count, 2)
self.assertEqual(skill_summary.version, 2)
self.assertEqual(skill.version, 2)
self.assertEqual(
skill.prerequisite_skill_ids, ['skill_id_2', 'skill_id_3'])
self.assertEqual(skill.misconceptions[1].name, 'Name')
self.assertEqual(skill.misconceptions[1].must_be_addressed, False)
self.assertEqual(
skill.rubrics[0].explanations, [
'<p>New Explanation 1</p>', '<p>New Explanation 2</p>'])
self.assertEqual(skill.rubrics[1].explanations, ['<p>Explanation</p>'])
def test_merge_skill(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID),
'old_value': '',
'new_value': 'TestSkillId'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED),
'old_value': None,
'new_value': False
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Merging skill.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(skill.version, 2)
self.assertEqual(skill.superseding_skill_id, 'TestSkillId')
self.assertEqual(skill.all_questions_merged, False)
def test_set_merge_complete_for_skill(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID),
'old_value': None,
'new_value': self.SKILL_ID
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED),
'old_value': False,
'new_value': True
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Setting merge complete for skill.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(skill.version, 2)
self.assertEqual(skill.all_questions_merged, True)
def test_get_merged_skill_ids(self):
skill_ids = skill_services.get_merged_skill_ids()
self.assertEqual(len(skill_ids), 0)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID),
'old_value': '',
'new_value': 'TestSkillId'
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Merging skill.')
skill_ids = skill_services.get_merged_skill_ids()
self.assertEqual(len(skill_ids), 1)
self.assertEqual(skill_ids[0], self.SKILL_ID)
def test_delete_skill(self):
skill_services.delete_skill(self.USER_ID, self.SKILL_ID)
self.assertEqual(
skill_fetchers.get_skill_by_id(self.SKILL_ID, strict=False), None)
self.assertEqual(
skill_services.get_skill_summary_by_id(
self.SKILL_ID, strict=False), None)
def test_delete_skill_marked_deleted(self):
skill_models.SkillModel.delete_multi(
[self.SKILL_ID], self.USER_ID, '', force_deletion=False)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertTrue(skill_model.deleted)
skill_services.delete_skill(
self.USER_ID, self.SKILL_ID, force_deletion=True)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertEqual(skill_model, None)
self.assertEqual(
skill_services.get_skill_summary_by_id(
self.SKILL_ID, strict=False), None)
def test_delete_skill_model_with_deleted_summary_model(self):
skill_summary_model = (
skill_models.SkillSummaryModel.get(self.SKILL_ID))
skill_summary_model.delete()
skill_summary_model = (
skill_models.SkillSummaryModel.get(self.SKILL_ID, False))
self.assertIsNone(skill_summary_model)
skill_services.delete_skill(
self.USER_ID, self.SKILL_ID, force_deletion=True)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertEqual(skill_model, None)
self.assertEqual(
skill_services.get_skill_summary_by_id(
self.SKILL_ID, strict=False), None)
def test_delete_skill_model_with_linked_suggestion(self):
suggestion_change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.SKILL_ID,
'skill_difficulty': 0.3
}
suggestion = suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, self.SKILL_ID, 1,
self.user_id_a, suggestion_change, 'test description'
)
skill_services.delete_skill(
self.user_id_a, self.SKILL_ID, force_deletion=True)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertEqual(skill_model, None)
with self.assertRaisesRegexp(
Exception, 'The suggestion with id %s has already been accepted/'
'rejected.' % suggestion.suggestion_id):
suggestion_services.auto_reject_question_suggestions_for_skill_id(
self.SKILL_ID)
def test_cannot_update_skill_with_no_commit_message(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})
]
with self.assertRaisesRegexp(
Exception, 'Expected a commit message, received none.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, '')
def test_cannot_update_skill_with_empty_changelist(self):
with self.assertRaisesRegexp(
Exception,
'Unexpected error: received an invalid change list when trying to '
'save skill'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, [], 'No changes made.')
def test_mismatch_of_skill_versions(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})
]
skill_model = skill_models.SkillModel.get(self.SKILL_ID)
skill_model.version = 0
with self.assertRaisesRegexp(
Exception,
'Unexpected error: trying to update version 0 of skill '
'from version 1. Please reload the page and try again.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Change language code.')
skill_model.version = 2
with self.assertRaisesRegexp(
Exception,
'Trying to update version 2 of skill from version 1, which is too '
'old. Please reload the page and try again.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Change language code.')
def test_normal_user_cannot_update_skill_property(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_DESCRIPTION,
'old_value': 'Description',
'new_value': 'New description'
})
]
with self.assertRaisesRegexp(
Exception,
'The user does not have enough rights to edit the '
'skill description.'):
skill_services.update_skill(
self.user_id_a, self.SKILL_ID, changelist,
'Change description.')
def test_update_skill_explanation(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
old_explanation = {'content_id': '1', 'html': '<p>Explanation</p>'}
new_explanation = {'content_id': '1', 'html': '<p>New explanation</p>'}
self.assertEqual(
skill.skill_contents.explanation.to_dict(), old_explanation)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY,
'property_name': (
skill_domain.SKILL_CONTENTS_PROPERTY_EXPLANATION),
'old_value': old_explanation,
'new_value': new_explanation
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Change explanation.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(
skill.skill_contents.explanation.to_dict(), new_explanation)
def test_update_skill_worked_examples(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
old_worked_example = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
).to_dict()
new_worked_example = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1 new</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1 new</p>')
).to_dict()
self.assertEqual(len(skill.skill_contents.worked_examples), 1)
self.assertEqual(
skill.skill_contents.worked_examples[0].to_dict(),
old_worked_example)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY,
'property_name': (
skill_domain.SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES),
'old_value': [old_worked_example],
'new_value': [new_worked_example]
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Change worked examples.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.skill_contents.worked_examples), 1)
self.assertEqual(
skill.skill_contents.worked_examples[0].to_dict(),
new_worked_example)
def test_delete_skill_misconception(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION,
'misconception_id': self.MISCONCEPTION_ID_1,
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Delete misconception.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(skill.misconceptions, [])
def test_update_skill_misconception_notes(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(skill.misconceptions[0].notes, '<p>description</p>')
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES),
'misconception_id': self.MISCONCEPTION_ID_1,
'old_value': '<p>description</p>',
'new_value': '<p>new description</p>'
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Update misconception notes.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(
skill.misconceptions[0].notes, '<p>new description</p>')
def test_update_skill_misconception_feedback(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(
skill.misconceptions[0].feedback, '<p>default_feedback</p>')
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK),
'misconception_id': self.MISCONCEPTION_ID_1,
'old_value': '<p>default_feedback</p>',
'new_value': '<p>new feedback</p>'
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Update misconception feedback.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(
skill.misconceptions[0].feedback, '<p>new feedback</p>')
def test_update_skill_schema(self):
orig_skill_dict = (
skill_fetchers.get_skill_by_id(self.SKILL_ID).to_dict())
changelist = [
skill_domain.SkillChange({
'cmd': (
skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION),
'from_version': 1,
'to_version': 2,
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Update schema.')
new_skill_dict = skill_fetchers.get_skill_by_id(self.SKILL_ID).to_dict()
# Check version is updated.
self.assertEqual(new_skill_dict['version'], 2)
# Delete version and check that the two dicts are the same.
del orig_skill_dict['version']
del new_skill_dict['version']
self.assertEqual(orig_skill_dict, new_skill_dict)
def test_cannot_update_skill_with_invalid_change_list(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'error', _mock_logging_function)
assert_raises_context_manager = self.assertRaisesRegexp(
Exception, '\'str\' object has no attribute \'cmd\'')
with logging_swap, assert_raises_context_manager:
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, 'invalid_change_list',
'commit message')
self.assertEqual(len(observed_log_messages), 1)
self.assertRegexpMatches(
observed_log_messages[0], 'object has no'
' attribute \'cmd\' %s invalid_change_list' % self.SKILL_ID)
def test_cannot_update_misconception_name_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME),
'misconception_id': 'invalid_id',
'old_value': 'test name',
'new_value': 'Name'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception name.')
def test_cannot_update_misconception_must_be_addressed_with_invalid_id(
self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED),
'misconception_id': 'invalid_id',
'old_value': False,
'new_value': True
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception must_be_addressed.')
def test_cannot_add_already_existing_prerequisite_skill(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_ADD_PREREQUISITE_SKILL,
'skill_id': 'skill_id_1'
})]
with self.assertRaisesRegexp(
Exception, 'The skill is already a prerequisite skill.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Added prereq skill.')
def test_cannot_delete_non_existent_prerequisite_skill(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_PREREQUISITE_SKILL,
'skill_id': 'skill_id_5'
})]
with self.assertRaisesRegexp(
Exception, 'The skill to remove is not a prerequisite skill.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Removed prereq skill.')
def test_cannot_add_rubric_with_invalid_difficulty(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_RUBRICS,
'difficulty': 'invalid_difficulty',
'explanations': ['<p>Explanation</p>']
})]
with self.assertRaisesRegexp(
Exception, 'There is no rubric for the given difficulty.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Added rubric.')
def test_cannot_delete_misconception_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION,
'misconception_id': 'invalid_id'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Delete misconception')
def test_cannot_update_misconception_notes_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES),
'misconception_id': 'invalid_id',
'old_value': 'description',
'new_value': 'new description'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception notes.')
def test_cannot_update_misconception_feedback_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK),
'misconception_id': 'invalid_id',
'old_value': 'default_feedback',
'new_value': 'new feedback'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception feedback.')
class SkillMasteryServicesUnitTests(test_utils.GenericTestBase):
"""Test the skill mastery services module."""
SKILL_IDS = []
USER_ID = 'user'
DEGREE_OF_MASTERY_1 = 0.0
DEGREE_OF_MASTERY_2 = 0.5
def setUp(self):
super(SkillMasteryServicesUnitTests, self).setUp()
self.SKILL_ID_1 = skill_services.get_new_skill_id()
self.SKILL_ID_2 = skill_services.get_new_skill_id()
self.SKILL_ID_3 = skill_services.get_new_skill_id()
self.SKILL_IDS = [self.SKILL_ID_1, self.SKILL_ID_2, self.SKILL_ID_3]
skill_services.create_user_skill_mastery(
self.USER_ID, self.SKILL_ID_1, self.DEGREE_OF_MASTERY_1)
skill_services.create_user_skill_mastery(
self.USER_ID, self.SKILL_ID_2, self.DEGREE_OF_MASTERY_2)
def test_get_user_skill_mastery(self):
degree_of_mastery = skill_services.get_user_skill_mastery(
self.USER_ID, self.SKILL_ID_1)
self.assertEqual(degree_of_mastery, self.DEGREE_OF_MASTERY_1)
degree_of_mastery = skill_services.get_user_skill_mastery(
self.USER_ID, self.SKILL_ID_3)
self.assertEqual(degree_of_mastery, None)
def test_get_multi_user_skill_mastery(self):
degree_of_mastery = skill_services.get_multi_user_skill_mastery(
self.USER_ID, self.SKILL_IDS)
self.assertEqual(
degree_of_mastery, {
self.SKILL_ID_1: self.DEGREE_OF_MASTERY_1,
self.SKILL_ID_2: self.DEGREE_OF_MASTERY_2,
self.SKILL_ID_3: None
})
def test_create_multi_user_skill_mastery(self):
skill_id_4 = skill_services.get_new_skill_id()
skill_id_5 = skill_services.get_new_skill_id()
skill_services.create_multi_user_skill_mastery(
self.USER_ID, {skill_id_4: 0.3, skill_id_5: 0.5})
degrees_of_mastery = skill_services.get_multi_user_skill_mastery(
self.USER_ID, [skill_id_4, skill_id_5])
self.assertEqual(
degrees_of_mastery, {skill_id_4: 0.3, skill_id_5: 0.5})
def test_get_sorted_skill_ids(self):
degrees_of_masteries = skill_services.get_multi_user_skill_mastery(
self.USER_ID, self.SKILL_IDS)
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2):
sorted_skill_ids = skill_services.get_sorted_skill_ids(
degrees_of_masteries)
expected_sorted_skill_ids = [self.SKILL_ID_3, self.SKILL_ID_1]
self.assertEqual(len(sorted_skill_ids), 2)
self.assertEqual(sorted_skill_ids, expected_sorted_skill_ids)
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 3):
sorted_skill_ids = skill_services.get_sorted_skill_ids(
degrees_of_masteries)
expected_sorted_skill_ids = [
self.SKILL_ID_3, self.SKILL_ID_1, self.SKILL_ID_2]
self.assertEqual(sorted_skill_ids, expected_sorted_skill_ids)
def test_filter_skills_by_mastery(self):
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2):
arranged_filtered_skill_ids = (
skill_services.filter_skills_by_mastery(
self.USER_ID, self.SKILL_IDS))
self.assertEqual(len(arranged_filtered_skill_ids), 2)
expected_skill_ids = [self.SKILL_ID_1, self.SKILL_ID_3]
self.assertEqual(arranged_filtered_skill_ids, expected_skill_ids)
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', len(self.SKILL_IDS)):
arranged_filtered_skill_ids = (
skill_services.filter_skills_by_mastery(
self.USER_ID, self.SKILL_IDS))
self.assertEqual(arranged_filtered_skill_ids, self.SKILL_IDS)
class SkillMigrationTests(test_utils.GenericTestBase):
def test_migrate_skill_contents_to_latest_schema(self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
written_translations_dict = {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': '',
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
}
}
}
written_translations_dict_math = {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': expected_html_content,
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
}
}
}
worked_example_dict = {
'question': {
'content_id': 'question1',
'html': ''
},
'explanation': {
'content_id': 'explanation1',
'html': ''
}
}
worked_example_dict_math = {
'question': {
'content_id': 'question1',
'html': expected_html_content
},
'explanation': {
'content_id': 'explanation1',
'html': expected_html_content
}
}
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, ''),
[skill_domain.WorkedExample.from_dict(worked_example_dict)],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict(
written_translations_dict))
skill_contents_dict = skill_contents.to_dict()
skill_contents_dict['explanation']['html'] = html_content
skill_contents_dict['written_translations']['translations_mapping'][
'content1']['en']['translation'] = html_content
skill_contents_dict['worked_examples'][0]['question']['html'] = (
html_content)
skill_contents_dict['worked_examples'][0]['explanation']['html'] = (
html_content)
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[],
rubrics=[],
skill_contents=skill_contents_dict,
next_misconception_id=1,
misconceptions_schema_version=1,
rubric_schema_version=1,
skill_contents_schema_version=1,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'skill model created', commit_cmd_dicts)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_SKILL_CONTENTS_SCHEMA_VERSION', 4)
with current_schema_version_swap:
skill = skill_fetchers.get_skill_from_model(model)
self.assertEqual(skill.skill_contents_schema_version, 4)
self.assertEqual(
skill.skill_contents.explanation.html,
expected_html_content)
self.assertEqual(
skill.skill_contents.written_translations.to_dict(),
written_translations_dict_math)
self.assertEqual(
skill.skill_contents.worked_examples[0].to_dict(),
worked_example_dict_math)
def test_migrate_misconceptions_to_latest_schema(self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
explanation_content_id: {}
}
}))
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[{
'id': 1,
'name': 'name',
'notes': html_content,
'feedback': html_content
}],
rubrics=[],
skill_contents=skill_contents.to_dict(),
next_misconception_id=2,
misconceptions_schema_version=1,
rubric_schema_version=1,
skill_contents_schema_version=1,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'skill model created', commit_cmd_dicts)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_MISCONCEPTIONS_SCHEMA_VERSION', 5)
with current_schema_version_swap:
skill = skill_fetchers.get_skill_from_model(model)
self.assertEqual(skill.misconceptions_schema_version, 5)
self.assertEqual(skill.misconceptions[0].must_be_addressed, True)
self.assertEqual(skill.misconceptions[0].notes, expected_html_content)
self.assertEqual(
skill.misconceptions[0].feedback, expected_html_content)
def test_migrate_rubrics_to_latest_schema(self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
explanation_content_id: {}
}
}))
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[],
rubrics=[{
'difficulty': 'Easy',
'explanations': ['Easy explanation']
}, {
'difficulty': 'Medium',
'explanations': ['Medium explanation']
}, {
'difficulty': 'Hard',
'explanations': ['Hard explanation', html_content]
}],
skill_contents=skill_contents.to_dict(),
next_misconception_id=1,
misconceptions_schema_version=1,
rubric_schema_version=2,
skill_contents_schema_version=2,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'skill model created', commit_cmd_dicts)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_RUBRIC_SCHEMA_VERSION', 5)
with current_schema_version_swap:
skill = skill_fetchers.get_skill_from_model(model)
self.assertEqual(skill.rubric_schema_version, 5)
self.assertEqual(skill.rubrics[0].difficulty, 'Easy')
self.assertEqual(skill.rubrics[0].explanations, ['Easy explanation'])
self.assertEqual(skill.rubrics[1].difficulty, 'Medium')
self.assertEqual(skill.rubrics[1].explanations, ['Medium explanation'])
self.assertEqual(skill.rubrics[2].difficulty, 'Hard')
self.assertEqual(
skill.rubrics[2].explanations,
['Hard explanation', expected_html_content])
| 41.864343
| 80
| 0.616342
|
acfd1708ebe44a51913b8818c4064ed549a110b5
| 27
|
py
|
Python
|
airbyte-integrations/bases/base-python/base_python/cdk/utils/__init__.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | 6,215
|
2020-09-21T13:45:56.000Z
|
2022-03-31T21:21:45.000Z
|
airbyte-integrations/bases/base-python/base_python/cdk/utils/__init__.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | 8,448
|
2020-09-21T00:43:50.000Z
|
2022-03-31T23:56:06.000Z
|
airbyte-integrations/bases/base-python/base_python/cdk/utils/__init__.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | 1,251
|
2020-09-20T05:48:47.000Z
|
2022-03-31T10:41:29.000Z
|
# Initialize Utils Package
| 13.5
| 26
| 0.814815
|
acfd1787fb24f862ce3131d404138f3252dd85de
| 5,009
|
py
|
Python
|
marlin-firmware/buildroot/share/vscode/create_custom_upload_command_CDC.py
|
voicevon/gogame_bot
|
a1d91f4a1b2537d00b5cd5ed78d429a9c1aad3d1
|
[
"MIT"
] | 6
|
2020-12-04T21:55:04.000Z
|
2022-02-02T20:49:45.000Z
|
marlin-firmware/buildroot/share/vscode/create_custom_upload_command_CDC.py
|
voicevon/gogame_bot
|
a1d91f4a1b2537d00b5cd5ed78d429a9c1aad3d1
|
[
"MIT"
] | 24
|
2020-12-25T05:00:51.000Z
|
2021-04-20T00:56:50.000Z
|
marlin-firmware/buildroot/share/vscode/create_custom_upload_command_CDC.py
|
voicevon/gogame_bot
|
a1d91f4a1b2537d00b5cd5ed78d429a9c1aad3d1
|
[
"MIT"
] | 3
|
2021-05-01T15:13:41.000Z
|
2022-02-11T01:15:30.000Z
|
#!/usr/bin/env python
#
# Builds custom upload command
# 1) Run platformio as a subprocess to find a COM port
# 2) Build the upload command
# 3) Exit and let upload tool do the work
#
# This script runs between completion of the library/dependencies installation and compilation.
#
# Will continue on if a COM port isn't found so that the compilation can be done.
#
from __future__ import print_function
from __future__ import division
import subprocess
import os
import sys
from SCons.Script import DefaultEnvironment
import platform
current_OS = platform.system()
env = DefaultEnvironment()
build_type = os.environ.get("BUILD_TYPE", 'Not Set')
if not(build_type == 'upload' or build_type == 'traceback' or build_type == 'Not Set') :
env.Replace(UPLOAD_PROTOCOL = 'teensy-gui') # run normal Teensy2 scripts
else:
com_first = ''
com_last = ''
com_CDC = ''
description_first = ''
description_last = ''
description_CDC = ''
#
# grab the first com port that pops up unless we find one we know for sure
# is a CDC device
#
def get_com_port(com_search_text, descr_search_text, start):
global com_first
global com_last
global com_CDC
global description_first
global description_last
global description_CDC
print('\nLooking for Serial Port\n')
# stream output from subprocess and split it into lines
pio_subprocess = subprocess.Popen(['platformio', 'device', 'list'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
looking_for_description = False
for line in iter(pio_subprocess.stdout.readline, ''):
if 0 <= line.find(com_search_text):
looking_for_description = True
com_last = line.replace('\n', '')
if com_first == '':
com_first = com_last
if 0 <= line.find(descr_search_text) and looking_for_description:
looking_for_description = False
description_last = line[ start : ]
if description_first == '':
description_first = description_last
if 0 <= description_last.find('CDC'):
com_CDC = com_last
description_CDC = description_last
if com_CDC == '' and com_first != '':
com_CDC = com_first
description_CDC = description_first
elif com_CDC == '':
com_CDC = 'COM_PORT_NOT_FOUND'
while 0 <= com_CDC.find('\n'):
com_CDC = com_CDC.replace('\n', '')
while 0 <= com_CDC.find('\r'):
com_CDC = com_CDC.replace('\r', '')
if com_CDC == 'COM_PORT_NOT_FOUND':
print(com_CDC, '\n')
else:
print('FOUND: ', com_CDC)
print('DESCRIPTION: ', description_CDC, '\n')
if current_OS == 'Windows':
get_com_port('COM', 'Hardware ID:', 13)
# avrdude_conf_path = env.get("PIOHOME_DIR") + '\\packages\\toolchain-atmelavr\\etc\\avrdude.conf'
avrdude_conf_path = 'buildroot\\share\\atom\\avrdude.conf'
avrdude_exe_path = 'buildroot\\share\\atom\\avrdude_5.10.exe'
# source_path = env.get("PROJECTBUILD_DIR") + '\\' + env.get("PIOENV") + '\\firmware.hex'
source_path = '.pio\\build\\' + env.get("PIOENV") + '\\firmware.hex'
upload_string = avrdude_exe_path + ' -p usb1286 -c avr109 -P ' + com_CDC + ' -U flash:w:' + source_path + ':i'
if current_OS == 'Darwin': # MAC
get_com_port('usbmodem', 'Description:', 13)
# avrdude_conf_path = env.get("PIOHOME_DIR") + '/packages/toolchain-atmelavr/etc/avrdude.conf'
avrdude_conf_path = 'buildroot/share/vscode/avrdude_macOS.conf'
avrdude_exe_path = 'buildroot/share/vscode/avrdude_5.10_macOS'
# source_path = env.get("PROJECTBUILD_DIR") + '/' + env.get("PIOENV") + '/firmware.hex'
source_path = '.pio/build/' + env.get("PIOENV") + '/firmware.hex'
# upload_string = 'avrdude -p usb1286 -c avr109 -P ' + com_CDC + ' -U flash:w:' + source_path + ':i'
upload_string = avrdude_exe_path + ' -p usb1286 -c avr109 -P ' + com_CDC + ' -C ' + avrdude_conf_path + ' -U flash:w:' + source_path + ':i'
print('upload_string: ', upload_string)
if current_OS == 'Linux':
get_com_port('/dev/tty', 'Description:', 13)
# avrdude_conf_path = env.get("PIOHOME_DIR") + '/packages/toolchain-atmelavr/etc/avrdude.conf'
avrdude_conf_path = 'buildroot/share/vscode/avrdude_linux.conf'
avrdude_exe_path = 'buildroot/share/vscode/avrdude_5.10_linux'
# source_path = env.get("PROJECTBUILD_DIR") + '/' + env.get("PIOENV") + '/firmware.hex'
source_path = '.pio/build/' + env.get("PIOENV") + '/firmware.hex'
# upload_string = 'avrdude -p usb1286 -c avr109 -P ' + com_CDC + ' -U flash:w:' + source_path + ':i'
upload_string = avrdude_exe_path + ' -p usb1286 -c avr109 -P ' + com_CDC + ' -C ' + avrdude_conf_path + ' -U flash:w:' + source_path + ':i'
env.Replace(
UPLOADCMD = upload_string,
MAXIMUM_RAM_SIZE = 8192,
MAXIMUM_SIZE = 130048
)
| 34.308219
| 146
| 0.641645
|
acfd17fc660eee87c93c9a82e765c0b0fe187ca9
| 29,632
|
py
|
Python
|
chunked_media/tests/test_views.py
|
CerealBoxMedia/chunked_media
|
324d9b7d76323fa1ca5296d733b82dc5ab5f98c0
|
[
"BSD-2-Clause"
] | null | null | null |
chunked_media/tests/test_views.py
|
CerealBoxMedia/chunked_media
|
324d9b7d76323fa1ca5296d733b82dc5ab5f98c0
|
[
"BSD-2-Clause"
] | null | null | null |
chunked_media/tests/test_views.py
|
CerealBoxMedia/chunked_media
|
324d9b7d76323fa1ca5296d733b82dc5ab5f98c0
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
import json
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.files.base import ContentFile
from django.test import TestCase, modify_settings
from django.test.utils import override_settings
from django.urls import reverse
from six import b
from wagtail.core.models import Collection, GroupCollectionPermission, Page
from wagtail.tests.utils import WagtailTestUtils
from knowledge_hub.chunked_media import models
from knowledge_hub.chunked_media.tests.testapp.models import EventPage, EventPageRelatedMedia
class TestMediaIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_simple(self):
response = self.client.get(reverse('chunked_media:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/index.html')
self.assertContains(response, "Add audio")
self.assertContains(response, "Add video")
@modify_settings(INSTALLED_APPS={
'prepend': 'chunked_media.tests.testextends',
})
def test_extends(self):
response = self.client.get(reverse('chunked_media:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/index.html')
self.assertNotContains(response, "Add audio")
self.assertNotContains(response, "Add video")
self.assertContains(response, "You shan't act")
def test_search(self):
response = self.client.get(reverse('chunked_media:index'), {'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
@staticmethod
def make_media():
fake_file = ContentFile(b("A boring example song"))
fake_file.name = 'song.mp3'
for i in range(50):
media = models.Media(
title="Test " + str(i),
duration=100 + i,
file=fake_file,
type='audio',
)
media.save()
def test_pagination(self):
self.make_media()
response = self.client.get(reverse('chunked_media:index'), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/index.html')
# Check that we got the correct page
self.assertEqual(response.context['media_files'].number, 2)
def test_pagination_invalid(self):
self.make_media()
response = self.client.get(reverse('chunked_media:index'), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/index.html')
# Check that we got page one
self.assertEqual(response.context['media_files'].number, 1)
def test_pagination_out_of_range(self):
self.make_media()
response = self.client.get(reverse('chunked_media:index'), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/index.html')
# Check that we got the last page
self.assertEqual(response.context['media_files'].number, response.context['media_files'].paginator.num_pages)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.client.get(reverse('chunked_media:index'), {'ordering': ordering})
self.assertEqual(response.status_code, 200)
class TestMediaAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_get_audio(self):
response = self.client.get(reverse('chunked_media:add', args=('audio', )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/add.html')
# as standard, only the root collection exists and so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
self.assertContains(response, 'Add audio')
self.assertNotContains(response, 'Add audio or video')
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse('chunked_media:add', args=('audio',))
),
count=1
)
def test_get_video(self):
response = self.client.get(reverse('chunked_media:add', args=('video', )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/add.html')
self.assertContains(response, 'Add video')
self.assertNotContains(response, 'Add audio or video')
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse('chunked_media:add', args=('video',))
),
count=1
)
# as standard, only the root collection exists and so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
def test_get_audio_or_video(self):
response = self.client.get(reverse('chunked_media:add', args=('media', )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/add.html')
self.assertNotContains(response, 'Add video')
self.assertContains(response, 'Add audio or video')
def test_get_audio_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
response = self.client.get(reverse('chunked_media:add', args=('audio', )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/add.html')
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, "Evil plans")
self.assertContains(response, 'Add audio')
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse('chunked_media:add', args=('audio',))
),
count=1
)
def test_get_video_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
response = self.client.get(reverse('chunked_media:add', args=('video', )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/add.html')
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, "Evil plans")
self.assertContains(response, 'Add video')
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse('chunked_media:add', args=('video',))
),
count=1
)
def test_post_audio(self):
# Build a fake file
fake_file = ContentFile(b("A boring example song"))
fake_file.name = 'song.mp3'
# Submit
post_data = {
'title': "Test media",
'file': fake_file,
'duration': 100,
}
response = self.client.post(reverse('chunked_media:add', args=('audio', )), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media should be created, and be placed in the root collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
root_collection = Collection.get_first_root_node()
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, root_collection)
self.assertEqual(media.type, 'audio')
def test_post_video(self):
# Build a fake file
fake_file = ContentFile(b("A boring example movie"))
fake_file.name = 'movie.mp4'
# Submit
post_data = {
'title': "Test media",
'file': fake_file,
'duration': 100,
'width': 720,
'height': 480,
}
response = self.client.post(reverse('chunked_media:add', args=('video', )), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media should be created, and be placed in the root collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
root_collection = Collection.get_first_root_node()
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, root_collection)
self.assertEqual(media.type, 'video')
def test_post_audio_with_collections(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
# Build a fake file
fake_file = ContentFile(b("A boring example song"))
fake_file.name = 'song.mp3'
# Submit
post_data = {
'title': "Test media",
'file': fake_file,
'duration': 100,
'collection': evil_plans_collection.id,
}
response = self.client.post(reverse('chunked_media:add', args=('audio', )), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media should be created, and be placed in the Evil Plans collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, evil_plans_collection)
self.assertEqual(media.type, 'audio')
def test_post_video_with_collections(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
# Build a fake file
fake_file = ContentFile(b("A boring example movie"))
fake_file.name = 'movie.mp3'
# Submit
post_data = {
'title': "Test media",
'file': fake_file,
'duration': 100,
'collection': evil_plans_collection.id,
}
response = self.client.post(reverse('chunked_media:add', args=('video', )), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media should be created, and be placed in the Evil Plans collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, evil_plans_collection)
self.assertEqual(media.type, 'video')
class TestMediaAddViewWithLimitedCollectionPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_media_permission = Permission.objects.get(
content_type__app_label='chunked_media', codename='add_media'
)
admin_permission = Permission.objects.get(
content_type__app_label='wagtailadmin', codename='access_admin'
)
root_collection = Collection.get_first_root_node()
self.evil_plans_collection = root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_media_permission
)
user = get_user_model().objects.create_user(
username='moriarty',
email='moriarty@example.com',
password='password'
)
user.groups.add(conspirators_group)
self.client.login(username='moriarty', password='password')
def test_get_audio(self):
response = self.client.get(reverse('chunked_media:add', args=('audio', )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/add.html')
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
self.assertContains(response, 'Add audio')
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse('chunked_media:add', args=('audio',))
),
count=1
)
def test_get_video(self):
response = self.client.get(reverse('chunked_media:add', args=('video', )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/add.html')
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
self.assertContains(response, 'Add video')
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse('chunked_media:add', args=('video',))
),
count=1
)
def test_post_audio(self):
# Build a fake file
fake_file = ContentFile(b("A boring example song"))
fake_file.name = 'song.mp3'
# Submit
post_data = {
'title': "Test media",
'file': fake_file,
'duration': 100,
}
response = self.client.post(reverse('chunked_media:add', args=('audio', )), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media should be created with type 'audio' and in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, self.evil_plans_collection)
self.assertEqual(media.type, 'audio')
def test_post_video(self):
# Build a fake file
fake_file = ContentFile(b("A boring example movie"))
fake_file.name = 'movie.mp4'
# Submit
post_data = {
'title': "Test media",
'file': fake_file,
'duration': 100,
}
response = self.client.post(reverse('chunked_media:add', args=('video', )), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media should be created with type 'video' and in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, self.evil_plans_collection)
self.assertEqual(media.type, 'video')
class TestMediaEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Build a fake file
fake_file = ContentFile(b("A boring example song"))
fake_file.name = 'song.mp3'
# Create a media to edit
self.media = models.Media.objects.create(title="Test media", file=fake_file, duration=100)
def test_simple(self):
response = self.client.get(reverse('chunked_media:edit', args=(self.media.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/edit.html')
self.assertContains(response, "Filesize")
@modify_settings(INSTALLED_APPS={
'prepend': 'chunked_media.tests.testextends',
})
def test_extends(self):
response = self.client.get(reverse('chunked_media:edit', args=(self.media.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/edit.html')
self.assertNotContains(response, "Filesize")
self.assertContains(response, "sweet-style")
self.assertContains(response, "sweet-code")
self.assertContains(response, "sweet-form-row")
self.assertContains(response, "sweet-stats")
def test_post(self):
# Build a fake file
fake_file = ContentFile(b("A boring example song"))
fake_file.name = 'song.mp3'
# Submit title change
post_data = {
'title': "Test media changed!",
'file': fake_file,
'duration': 100,
}
response = self.client.post(reverse('chunked_media:edit', args=(self.media.id,)), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media title should be changed
self.assertEqual(models.Media.objects.get(id=self.media.id).title, "Test media changed!")
def test_with_missing_source_file(self):
# Build a fake file
fake_file = ContentFile(b("An ephemeral media"))
fake_file.name = 'to-be-deleted.mp3'
# Create a new media to delete the source for
media = models.Media.objects.create(title="Test missing source media", file=fake_file, duration=100)
media.file.delete(False)
response = self.client.get(reverse('chunked_media:edit', args=(media.id,)), {})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/edit.html')
self.assertContains(response, 'File not found')
class TestMediaDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create a media to delete
self.media = models.Media.objects.create(title="Test media", duration=100)
def test_simple(self):
response = self.client.get(reverse('chunked_media:delete', args=(self.media.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/confirm_delete.html')
def test_delete(self):
# Submit title change
post_data = {
'foo': 'bar'
}
response = self.client.post(reverse('chunked_media:delete', args=(self.media.id,)), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('chunked_media:index'))
# Media should be deleted
self.assertFalse(models.Media.objects.filter(id=self.media.id).exists())
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_link(self):
response = self.client.get(reverse('chunked_media:delete', args=(self.media.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/confirm_delete.html')
self.assertIn('Used 0 times', str(response.content))
class TestMediaChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
def test_simple(self):
response = self.client.get(reverse('chunked_media:chooser'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
json_data = json.loads(response.content.decode('utf-8'))
self.assertSetEqual(set(json_data.keys()), {
'html', 'step', 'error_label', 'error_message', 'tag_autocomplete_url'
})
self.assertTemplateUsed(response, 'chunked_media/chooser/chooser.html')
self.assertEqual(json_data['step'], 'chooser')
self.assertEqual(json_data['tag_autocomplete_url'], reverse('wagtailadmin_tag_autocomplete'))
def test_search(self):
response = self.client.get(reverse('chunked_media:chooser'), {'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
@staticmethod
def make_media():
fake_file = ContentFile(b("A boring example song"))
fake_file.name = 'song.mp3'
for i in range(50):
media = models.Media(
title="Test " + str(i),
duration=100 + i,
file=fake_file,
type='audio',
)
media.save()
def test_pagination(self):
self.make_media()
response = self.client.get(reverse('chunked_media:chooser'), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/list.html')
# Check that we got the correct page
self.assertEqual(response.context['media_files'].number, 2)
def test_pagination_invalid(self):
self.make_media()
response = self.client.get(reverse('chunked_media:chooser'), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/list.html')
# Check that we got page one
self.assertEqual(response.context['media_files'].number, 1)
def test_pagination_out_of_range(self):
self.make_media()
response = self.client.get(reverse('chunked_media:chooser'), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chunked_media/media/list.html')
# Check that we got the last page
self.assertEqual(response.context['media_files'].number, response.context['media_files'].paginator.num_pages)
def test_construct_queryset_hook_browse(self):
media = models.Media.objects.create(
title="Test media shown",
duration=100,
type='audio',
uploaded_by_user=self.user,
)
models.Media.objects.create(
title="Test media not shown",
duration=100,
type='audio',
)
def filter_media(media, request):
return media.filter(uploaded_by_user=self.user)
with self.register_hook('construct_media_chooser_queryset', filter_media):
response = self.client.get(reverse('chunked_media:chooser'))
self.assertEqual(len(response.context['media_files']), 1)
self.assertEqual(response.context['media_files'][0], media)
def test_construct_queryset_hook_search(self):
media = models.Media.objects.create(
title="Test media shown",
duration=100,
type='audio',
uploaded_by_user=self.user,
)
models.Media.objects.create(
title="Test media not shown",
duration=100,
type='audio',
)
def filter_media(media, request):
return media.filter(uploaded_by_user=self.user)
with self.register_hook('construct_media_chooser_queryset', filter_media):
response = self.client.get(reverse('chunked_media:chooser'), {'q': 'Test'})
self.assertEqual(len(response.context['media_files']), 1)
self.assertEqual(response.context['media_files'][0], media)
class TestMediaChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create a media to choose
self.media = models.Media.objects.create(title="Test media", duration=100)
def test_simple(self):
response = self.client.get(reverse('chunked_media:media_chosen', args=(self.media.id,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertDictEqual(json.loads(response.content.decode('utf-8')), {
'step': 'media_chosen',
'result': {
'id': self.media.id,
'title': self.media.title,
'edit_link': reverse('chunked_media:edit', args=[self.media.id],)
}
})
class TestMediaFilenameProperties(TestCase):
def setUp(self):
self.media = models.Media(title="Test media", duration=100)
self.media.file.save('example.mp4', ContentFile("A amazing example music video"))
self.extensionless_media = models.Media(title="Test media", duration=101)
self.extensionless_media.file.save('example', ContentFile("A boring example music video"))
def test_filename(self):
self.assertEqual('example.mp4', self.media.filename)
self.assertEqual('example', self.extensionless_media.filename)
def test_file_extension(self):
self.assertEqual('mp4', self.media.file_extension)
self.assertEqual('', self.extensionless_media.file_extension)
def tearDown(self):
self.media.delete()
self.extensionless_media.delete()
class TestUsageCount(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_media_usage_count(self):
media = models.Media.objects.get(id=1)
self.assertEqual(media.get_usage().count(), 0)
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_media_usage_count(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
self.assertEqual(media.get_usage().count(), 1)
def test_usage_count_does_not_appear(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
response = self.client.get(reverse('chunked_media:edit',
args=(1,)))
self.assertNotContains(response, 'Used 1 time')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_count_appears(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
response = self.client.get(reverse('chunked_media:edit',
args=(1,)))
self.assertContains(response, 'Used 1 time')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_count_zero_appears(self):
response = self.client.get(reverse('chunked_media:edit',
args=(1,)))
self.assertContains(response, 'Used 0 times')
class TestGetUsage(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
def test_media_get_usage_not_enabled(self):
media = models.Media.objects.get(id=1)
self.assertEqual(list(media.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_media_get_usage(self):
media = models.Media.objects.get(id=1)
self.assertEqual(list(media.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_media_get_usage(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
self.assertTrue(issubclass(Page, type(media.get_usage()[0])))
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_page(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
response = self.client.get(reverse('chunked_media:media_usage',
args=(1,)))
self.assertContains(response, 'Christmas')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_page_no_usage(self):
response = self.client.get(reverse('chunked_media:media_usage',
args=(1,)))
# There's no usage so there should be no table rows
self.assertRegex(response.content, rb'<tbody>(\s|\n)*</tbody>')
| 39.299735
| 117
| 0.649433
|
acfd1a4b1d1cde481021c6b5ea65a6fbe42dfc37
| 4,323
|
py
|
Python
|
scikits/talkbox/tools/tests/test_correlations.py
|
neEverett/talkbox
|
ee0ec30a6a6d483eb9284f72bdaf26bd99765f80
|
[
"MIT"
] | 65
|
2015-02-18T05:23:12.000Z
|
2022-02-21T13:09:34.000Z
|
scikits/talkbox/tools/tests/test_correlations.py
|
neEverett/talkbox
|
ee0ec30a6a6d483eb9284f72bdaf26bd99765f80
|
[
"MIT"
] | 5
|
2016-06-26T08:46:22.000Z
|
2019-01-09T03:03:39.000Z
|
scikits/talkbox/tools/tests/test_correlations.py
|
neEverett/talkbox
|
ee0ec30a6a6d483eb9284f72bdaf26bd99765f80
|
[
"MIT"
] | 31
|
2015-02-23T22:42:15.000Z
|
2022-02-21T13:09:34.000Z
|
import numpy as np
from numpy.testing import TestCase, assert_array_equal, \
assert_array_almost_equal, dec
from scikits.talkbox.tools.correlations import nextpow2, acorr
from scikits.talkbox.tools.cacorr import acorr as cacorr
class TestNextpow2(TestCase):
X = np.array([0, 1, 2, 3, 4, 6, 8, 15, 16, 17, 32, np.nan, np.infty])
Y = np.array([0., 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, np.nan, np.infty])
def test_simple(self):
assert nextpow2(0) == 0
assert nextpow2(1) == 0
assert nextpow2(2) == 1
assert nextpow2(3) == 2
assert nextpow2(4) == 2
def test_vector(self):
assert_array_equal(nextpow2(self.X), self.Y)
class _TestCorrCommon(TestCase):
X = np.linspace(1, 11, 11)
Y = np.array([11.0000, 32.0000, 62.0000, 100.0000, 145.0000, 196.0000,
252.0000, 312.0000, 375.0000, 440.0000, 506.0000, 440.0000,
375.0000, 312.0000, 252.0000, 196.0000, 145.0000, 100.0000,
62.0000, 32.0000 , 11.0000])
Yc = np.array([ 0.02173913043478, 0.06324110671937, 0.12252964426877,
0.19762845849802, 0.28656126482213, 0.38735177865613, 0.49802371541502,
0.61660079051383, 0.74110671936759, 0.86956521739130, 1.00000000000000,
0.86956521739130, 0.74110671936759, 0.61660079051383, 0.49802371541502,
0.38735177865613, 0.28656126482213, 0.19762845849802, 0.12252964426877,
0.06324110671937, 0.02173913043478,])
Xm = np.linspace(1, 22, 22).reshape(2, 11)
Ym = np.array([[11., 32., 62., 100., 145., 196., 252., 312., 375.,
440., 506., 440., 375., 312., 252., 196., 145., 100.,
62., 32., 11.],
[264., 538., 821., 1112., 1410., 1714., 2023., 2336.,
2652., 2970., 3289., 2970., 2652., 2336., 2023., 1714.,
1410., 1112., 821., 538., 264.]])
def test_simple(self):
"""Test autocorrelation for a rank 1 array."""
a = self.acorr(self.X)
assert_array_almost_equal(a, self.Y)
def test_axis0(self):
"""Test autocorrelation along default axis."""
a = self.acorr(self.Xm)
assert_array_almost_equal(a, self.Ym)
def test_axis1(self):
"""Test autocorrelation along axis 0."""
a = self.acorr(self.Xm.T, axis=0)
assert_array_almost_equal(a, self.Ym.T)
def test_normalization(self):
a = self.acorr(self.X, scale='coeff')
assert_array_almost_equal(a, self.Yc)
def test_normalization_onesided(self):
a = self.acorr(self.X, scale='coeff', onesided=True)
assert_array_almost_equal(a, self.Yc[self.X.size-1:])
def test_normalization_axis1(self):
x = np.random.randn(5, 25)
a = np.zeros((5, 49))
for i in range(5):
a[i] = self.acorr(x[i], scale='coeff')
b = self.acorr(x, scale='coeff', axis=-1)
assert_array_almost_equal(b, a)
def test_normalization_axis0(self):
x = np.random.randn(5, 25)
a = np.zeros((9, 25))
for i in range(25):
a[:, i] = self.acorr(x[:, i], scale='coeff', axis=0)
b = self.acorr(x, scale='coeff', axis=0)
assert_array_almost_equal(b, a)
def test_normalization_onesided_axis1(self):
x = np.random.randn(5, 25)
a = np.zeros((5, 25))
for i in range(5):
a[i] = self.acorr(x[i], scale='coeff', onesided=True)
b = self.acorr(x, scale='coeff', axis=-1, onesided=True)
assert_array_almost_equal(b, a)
def test_normalization_onesided_axis0(self):
x = np.random.randn(5, 25)
a = np.zeros((5, 25))
for i in range(25):
a[:, i] = self.acorr(x[:, i], scale='coeff', axis=0, onesided=True)
b = self.acorr(x, scale='coeff', axis=0, onesided=True)
assert_array_almost_equal(b, a)
class TestAcorr(_TestCorrCommon):
def setUp(self):
self.acorr = acorr
#class TestCythonAcorr(_TestCorrCommon):
# def setUp(self):
# self.acorr = cacorr
# self.X = self.X[np.newaxis, :]
# self.Y = self.Y[np.newaxis, :]
#
# @dec.skipif(True, "Arbitrary axis not suppported yet in cython version")
# def test_axis1(self):
# pass
| 37.921053
| 82
| 0.582003
|
acfd1a83830a42d9f4c1c0999469d309d0a0f5a7
| 19,903
|
py
|
Python
|
copct-master/baxter_corpus/demo_replace_red_with_green_2.py
|
jhomble/electron435
|
2a94a901679a1ebbdeea01bb9e888d365d536bec
|
[
"MIT"
] | 4
|
2016-10-26T13:58:44.000Z
|
2018-11-13T13:03:52.000Z
|
copct-master/baxter_corpus/demo_replace_red_with_green_2.py
|
jhomble/electron435
|
2a94a901679a1ebbdeea01bb9e888d365d536bec
|
[
"MIT"
] | 4
|
2020-03-31T01:10:26.000Z
|
2020-03-31T03:06:28.000Z
|
copct-master/baxter_corpus/demo_replace_red_with_green_2.py
|
jhomble/electron435
|
2a94a901679a1ebbdeea01bb9e888d365d536bec
|
[
"MIT"
] | 1
|
2020-03-03T06:22:08.000Z
|
2020-03-03T06:22:08.000Z
|
demo = (
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move arm and grasp",
(
2.000000,
"dock-body",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"move grasped object",
(
2.000000,
"dock-case",
(
(1.000000, 0.000622, 0.000107, ),
(-0.000622, 1.000000, 0.000198, ),
(-0.000107, -0.000198, 1.000000, ),
),
(
(3.851632, ),
(-0.000641, ),
(0.173459, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"release",
(
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_7_2",
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move arm and grasp",
(
2.000000,
"c3",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c3")),
),
"move grasped object",
(
2.000000,
"discard-bin",
(
(0.999989, 0.000909, -0.004614, ),
(-0.000904, 0.999999, 0.001247, ),
(0.004615, -0.001243, 0.999989, ),
),
(
(-0.322622, ),
(1.211868, ),
(5.854191, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c3")),
),
"release",
(
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_5_2",
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move arm and grasp",
(
2.000000,
"c1",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c1")),
),
"move grasped object",
(
2.000000,
"dock-body_7_1",
(
(0.999817, 0.019017, 0.002105, ),
(-0.019004, 0.999802, -0.005852, ),
(-0.002216, 0.005811, 0.999981, ),
),
(
(-0.421438, ),
(-0.003108, ),
(0.338827, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c1")),
),
"release",
(
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_7_2",
1.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move arm and grasp",
(
2.000000,
"dock-body",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"move grasped object",
(
2.000000,
"dock-case",
(
(1.000000, 0.000580, 0.000014, ),
(-0.000580, 1.000000, -0.000069, ),
(-0.000014, 0.000069, 1.000000, ),
),
(
(0.014394, ),
(-0.008064, ),
(0.001375, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("c4", "Cartridge"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"release",
(
2.000000,
)
),
)
| 25.615187
| 40
| 0.562729
|
acfd1a9bb218fa3181b4875e8a74463fdfbe7713
| 1,029
|
py
|
Python
|
heightmaptilemaker/geo/transform.py
|
ulrichji/HeightmapTileMaker
|
b817f3e39beb383fcb54e8c5ce29666a202dce04
|
[
"MIT"
] | null | null | null |
heightmaptilemaker/geo/transform.py
|
ulrichji/HeightmapTileMaker
|
b817f3e39beb383fcb54e8c5ce29666a202dce04
|
[
"MIT"
] | 7
|
2019-08-10T10:35:59.000Z
|
2022-03-12T00:49:24.000Z
|
heightmaptilemaker/geo/transform.py
|
ulrichji/HeightmapTileMaker
|
b817f3e39beb383fcb54e8c5ce29666a202dce04
|
[
"MIT"
] | null | null | null |
class Transform:
def transformGeoLocationToPixelLocation(self, geo_x, geo_y):
...
def transformPixelLocationToGeoLocation(self, pixel_x, pixel_y):
...
class GeoTransform(Transform):
def __init__(self, transform_parameters):
self.transform_parameters = transform_parameters
def transformGeoLocationToPixelLocation(self, geo_x, geo_y):
return self.__coordinateToXYIndex(geo_x, geo_y)
def transformPixelLocationToGeoLocation(self, pixel_x, pixel_y):
return self.__xyIndexToCoordinate(pixel_x, pixel_y)
def __xyIndexToCoordinate(self, pixel_x, pixel_y):
g0,g1,g2,g3,g4,g5 = self.transform_parameters
return ( g0 + pixel_x * g1 + pixel_y * g2,
g3 + pixel_x * g4 + pixel_y * g5)
def __coordinateToXYIndex(self, geo_x, geo_y):
g0,g1,g2,g3,g4,g5 = self.transform_parameters
return ((geo_x*g5 - geo_y*g2 - g0*g5 + g3*g2) / (g1*g5 - g4*g2),
(geo_y*g1 - geo_x*g4 - g3*g1 + g0*g4) / (g5*g1 - g2*g4))
| 35.482759
| 72
| 0.665695
|
acfd1b1aa005b585283b16da349b51b80886fe27
| 1,045
|
py
|
Python
|
common/code/snippets/spoof-ip.py
|
nevesnunes/env
|
7a5e3816334337e04a87e1a2e4dc322215901744
|
[
"MIT"
] | 4
|
2020-04-07T14:45:02.000Z
|
2021-12-28T22:43:16.000Z
|
common/code/snippets/spoof-ip.py
|
nevesnunes/env
|
7a5e3816334337e04a87e1a2e4dc322215901744
|
[
"MIT"
] | null | null | null |
common/code/snippets/spoof-ip.py
|
nevesnunes/env
|
7a5e3816334337e04a87e1a2e4dc322215901744
|
[
"MIT"
] | 2
|
2020-04-08T03:12:06.000Z
|
2021-03-04T20:33:03.000Z
|
#!/usr/bin/env python2
# -*- encoding: utf-8 -*-
# Usage:
# ./ipspoof.py 111.111.111.111 127.0.0.1
# tcpdump -i lo
import socket,sys
from impacket import import ImpactDecoder, ImpactPacket
if __name__ == "__main__":
src = sys.argv[1]
dst = sys.argv[2]
# Create a new IP packet and set its source and destination addresses
ip = ImpactPacket.IP()
ip.set_ip_src(src)
ip.set_ip_dst(dst)
# Create a new ICMP packet
icmp = ImpactPacket.ICMP()
icmp.set_icmp_type(icmp.ICMP_ECHO)
# Include a small payload inside the ICMP packet
# and have the ip packet contain the ICMP packet
icmp.contains(ImpactPacket.Data("a"*100))
ip.contains(icmp)
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# Give the ICMP packet some ID
icmp.set_icmp_id(1)
# Calculate checksum
icmp.set_icmp_cksum(0)
icmp.auto_checksum = 0
s.sendto(ip.get_packet(), (dst, 0))
| 25.487805
| 75
| 0.657416
|
acfd1b3513e30ab924b23a0f0cee311cace75064
| 1,587
|
py
|
Python
|
examples/simple_example.py
|
pythub-project/CustomTkinter
|
bb1fe2562517928dde5a98e0dce650b9a3ef2380
|
[
"CC0-1.0"
] | null | null | null |
examples/simple_example.py
|
pythub-project/CustomTkinter
|
bb1fe2562517928dde5a98e0dce650b9a3ef2380
|
[
"CC0-1.0"
] | null | null | null |
examples/simple_example.py
|
pythub-project/CustomTkinter
|
bb1fe2562517928dde5a98e0dce650b9a3ef2380
|
[
"CC0-1.0"
] | null | null | null |
import tkinter
import customtkinter # <- import the CustomTkinter module
customtkinter.set_appearance_mode("System") # Modes: "System" (standard), "Dark", "Light"
customtkinter.set_default_color_theme("blue") # Themes: "blue" (standard), "green", "dark-blue"
root_tk = customtkinter.CTk() # create CTk window like you do with the Tk window (you can also use normal tkinter.Tk window)
root_tk.geometry("400x340")
root_tk.title("CustomTkinter Test")
def button_function():
print("Button click", label_1.text_label.cget("text"))
def slider_function(value):
progressbar_1.set(value)
def check_box_function():
print("checkbox_1:", checkbox_1.get())
y_padding = 13
frame_1 = customtkinter.CTkFrame(master=root_tk, corner_radius=15)
frame_1.pack(pady=20, padx=60, fill="both", expand=True)
label_1 = customtkinter.CTkLabel(master=frame_1)
label_1.pack(pady=y_padding, padx=10)
progressbar_1 = customtkinter.CTkProgressBar(master=frame_1)
progressbar_1.pack(pady=y_padding, padx=10)
button_1 = customtkinter.CTkButton(master=frame_1, corner_radius=8, command=button_function)
button_1.pack(pady=y_padding, padx=10)
# button_1.configure(state="disabled")
slider_1 = customtkinter.CTkSlider(master=frame_1, command=slider_function, from_=0, to=1)
slider_1.pack(pady=y_padding, padx=10)
slider_1.set(0.5)
entry_1 = customtkinter.CTkEntry(master=frame_1, placeholder_text="CTkEntry")
entry_1.pack(pady=y_padding, padx=10)
checkbox_1 = customtkinter.CTkCheckBox(master=frame_1, command=check_box_function)
checkbox_1.pack(pady=y_padding, padx=10)
root_tk.mainloop()
| 31.74
| 125
| 0.779458
|
acfd1bfc2d9395702818ce62427e4ff19af57f13
| 1,607
|
py
|
Python
|
vox-central-line-infections/scripts/location.py
|
swkasica/data-projects
|
6a2c5a198c2dc2a2f0a7e4ff3dd3c385a6c9c625
|
[
"Unlicense"
] | 193
|
2015-07-09T14:22:39.000Z
|
2022-02-01T08:04:38.000Z
|
vox-central-line-infections/scripts/location.py
|
swkasica/data-projects
|
6a2c5a198c2dc2a2f0a7e4ff3dd3c385a6c9c625
|
[
"Unlicense"
] | 6
|
2015-07-29T15:03:46.000Z
|
2021-06-01T21:44:21.000Z
|
vox-central-line-infections/scripts/location.py
|
swkasica/data-projects
|
6a2c5a198c2dc2a2f0a7e4ff3dd3c385a6c9c625
|
[
"Unlicense"
] | 70
|
2015-07-15T16:13:48.000Z
|
2021-12-07T00:09:13.000Z
|
#!/bin/python
import csv
import re
# open up `hospitals_temp.csv` and write to `hospitals_info.csv`
with open('hospitals_temp.csv', 'rb') as infile, open('hospitals_info.csv', 'wb') as outfile:
reader = csv.reader(infile)
# skip header in `hospitals_temp.csv`
next(reader, None)
writer = csv.writer(outfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
# here's a better header
writer.writerow(['Provider ID', 'Hospital Name', 'Address', 'City', 'State', 'ZIP Code', 'lat', 'lng'])
for row in reader:
provider_id = row[0]
name = row[1]
street = row[2]
city = row[3]
state = row[4]
zip_code = row[5]
location = row[6]
# `match` looks for a latitude and longitude. Considering
# the geography of the U.S. (and most of its territories),
# the first number will always be positive (north), while
# the second number will always be negative (west) (except
# for some islands in Alaska that don't have hospitals).
match = re.search( r'\d*\.\d*, .\d*\.\d*', location )
if match:
# if we get a match, then split the match by the comma, and write a row in the new csv
point_string = match.group()
point_array = [n.strip() for n in point_string.split(',')]
writer.writerow([provider_id,name,street,city,state,zip_code,point_array[0],point_array[1]])
else:
# all the csvs will match, so this case will never happen
writer.writerow([provider_id,name,street,city,state,zip_code,'NA','NA'])
| 37.372093
| 107
| 0.615432
|
acfd1c38ed1290512e3a6bea7ced5c3421167e14
| 4,806
|
py
|
Python
|
django/core/cache/backends/filebased.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T11:42:08.000Z
|
2015-11-08T11:42:08.000Z
|
django/core/cache/backends/filebased.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
django/core/cache/backends/filebased.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
"File-based cache backend"
import hashlib
import os
import shutil
import time
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache, DEFAULT_TIMEOUT
from django.utils.encoding import force_bytes
class FileBasedCache(BaseCache):
def __init__(self, dir, params):
BaseCache.__init__(self, params)
self._dir = dir
if not os.path.exists(self._dir):
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version=version):
return False
self.set(key, value, timeout, version=version)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
try:
with open(fname, 'rb') as f:
exp = pickle.load(f)
now = time.time()
if exp is not None and exp < now:
self._delete(fname)
else:
return pickle.load(f)
except (IOError, OSError, EOFError, pickle.PickleError):
pass
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
dirname = os.path.dirname(fname)
self._cull()
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(fname, 'wb') as f:
expiry = self.get_backend_timeout(timeout)
pickle.dump(expiry, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
except (IOError, OSError):
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
try:
self._delete(self._key_to_file(key))
except (IOError, OSError):
pass
def _delete(self, fname):
os.remove(fname)
try:
# Remove the 2 subdirs if they're empty
dirname = os.path.dirname(fname)
os.rmdir(dirname)
os.rmdir(os.path.dirname(dirname))
except (IOError, OSError):
pass
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
try:
with open(fname, 'rb') as f:
exp = pickle.load(f)
now = time.time()
if exp < now:
self._delete(fname)
return False
else:
return True
except (IOError, OSError, EOFError, pickle.PickleError):
return False
def _cull(self):
if int(self._num_entries) < self._max_entries:
return
try:
filelist = sorted(os.listdir(self._dir))
except (IOError, OSError):
return
if self._cull_frequency == 0:
doomed = filelist
else:
doomed = [os.path.join(self._dir, k) for (i, k) in enumerate(filelist) if i % self._cull_frequency == 0]
for topdir in doomed:
try:
for root, _, files in os.walk(topdir):
for f in files:
self._delete(os.path.join(root, f))
except (IOError, OSError):
pass
def _createdir(self):
try:
os.makedirs(self._dir)
except OSError:
raise EnvironmentError("Cache directory '%s' does not exist and could not be created'" % self._dir)
def _key_to_file(self, key):
"""
Convert the filename into an md5 string. We'll turn the first couple
bits of the path into directory prefixes to be nice to filesystems
that have problems with large numbers of files in a directory.
Thus, a cache key of "foo" gets turnned into a file named
``{cache-dir}ac/bd/18db4cc2f85cedef654fccc4a4d8``.
"""
path = hashlib.md5(force_bytes(key)).hexdigest()
path = os.path.join(path[:2], path[2:4], path[4:])
return os.path.join(self._dir, path)
def _get_num_entries(self):
count = 0
for _, _, files in os.walk(self._dir):
count += len(files)
return count
_num_entries = property(_get_num_entries)
def clear(self):
try:
shutil.rmtree(self._dir)
except (IOError, OSError):
pass
# For backwards compatibility
class CacheClass(FileBasedCache):
pass
| 30.417722
| 116
| 0.570953
|
acfd1c64be2d83b261e1ca3808158ad0c008cd6e
| 1,950
|
py
|
Python
|
squidpy/instruments/ppms.py
|
guenp/squidpy
|
17af231cef7142325a483aaa95041671e4daaea4
|
[
"MIT"
] | null | null | null |
squidpy/instruments/ppms.py
|
guenp/squidpy
|
17af231cef7142325a483aaa95041671e4daaea4
|
[
"MIT"
] | null | null | null |
squidpy/instruments/ppms.py
|
guenp/squidpy
|
17af231cef7142325a483aaa95041671e4daaea4
|
[
"MIT"
] | null | null | null |
from squidpy.instrument import Instrument
from squidpy.utils import ask_socket, connect_socket
class PPMS(Instrument):
'''
For remote operation of the Quantum Design PPMS.
Make sure to run PyQDInstrument.run_server() in an IronPython console on a machine that can connect to the PPMS control PC's QDInstrument_Server.exe program.
Attributes represent the system control parameters:
'temperature', 'temperature_rate', 'temperature_approach', 'field', 'field_rate', 'field_approach', 'field_mode', 'temperature_status', 'field_status', 'chamber'
'''
def __init__(self, host, port, s=None, name='ppms'):
self._name = name
if s == None:
self._s = connect_socket(host, port)
else:
self._s = s
self._units = {'temperature': 'K', 'temperature_rate': 'K/min','field': 'Oe', 'field_rate': 'Oe/min'}
for param in ['temperature', 'temperature_rate', 'field', 'field_rate', 'temperature_approach', 'field_approach', 'field_mode']:
setattr(PPMS,param,property(fget=eval("lambda self: self._get_param('%s')" %param),
fset=eval("lambda self, value: self._set_param('%s',value)" %param)))
for param in ['temperature_status', 'field_status', 'chamber']:
setattr(PPMS,param,property(fget=eval("lambda self: self._get_param('%s')" %param)))
self._params = ['temperature', 'temperature_rate', 'temperature_approach', 'field', 'field_rate', 'field_approach', 'field_mode', 'temperature_status', 'field_status', 'chamber']
self._functions = []
def _get_param(self, param):
return ask_socket(self._s, param)
def _set_param(self, param, value):
if type(value) == str:
cmd = "%s = '%s'" %(param, value)
else:
cmd = '%s = %s' %(param, value)
return ask_socket(self._s, cmd)
def __del__(self):
self._s.close()
| 52.702703
| 186
| 0.633846
|
acfd2002708839729f02b256f0d5abc8688d6158
| 8,195
|
py
|
Python
|
display/dpy_pygame.py
|
smmalmansoori/bub-n-bros
|
5d1db7192edcf981a988fa17f008af3f50611822
|
[
"MIT"
] | null | null | null |
display/dpy_pygame.py
|
smmalmansoori/bub-n-bros
|
5d1db7192edcf981a988fa17f008af3f50611822
|
[
"MIT"
] | null | null | null |
display/dpy_pygame.py
|
smmalmansoori/bub-n-bros
|
5d1db7192edcf981a988fa17f008af3f50611822
|
[
"MIT"
] | null | null | null |
################################################
## pygame-based implementation of xshm ##
################################################
import os
import pygame
from pygame.locals import *
from modes import KeyPressed, KeyReleased
class Display:
musthidemouse = 0
mousevisible = 1
def __init__(self, width, height, title, transparency='yes',
fullscreen='no', zoom='100%', smooth='yes',
smoothfast='yes'):
self.use_transparency = not transparency.startswith('n')
self.use_fullscreen = fullscreen.startswith('y')
if zoom.endswith('%'):
zoom = zoom[:-1]
scale = float(zoom) / 100.0
iscale = int(scale+0.001)
if abs(scale - iscale) < 0.002:
scale = iscale
self.scale = scale
self.smooth = smooth.startswith('y')
self.smoothfast = smoothfast.startswith('y')
# Initialize pygame
pygame.init()
# Set the display mode
winstyle = HWSURFACE
if self.use_fullscreen:
winstyle |= FULLSCREEN
bestdepth = pygame.display.mode_ok((int(width * self.scale),
int(height * self.scale)),
winstyle, 32)
self.screen = pygame.display.set_mode((int(width * self.scale),
int(height * self.scale)),
winstyle, bestdepth)
self.offscreen = pygame.Surface((width, height))
#decorate the game window
pygame.display.set_caption(title)
#pygame.mouse.set_visible(0)
self.tbcache = None, None
self.events_key = []
self.events_mouse = []
self.prevposition = None
EVENT_HANDLERS[KEYDOWN] = self.keydown_handler
EVENT_HANDLERS[KEYUP] = self.keyup_handler
EVENT_HANDLERS[MOUSEBUTTONDOWN] = self.mousebuttondown_handler
def keydown_handler(self, e):
if e.key == K_ESCAPE and self.use_fullscreen:
raise SystemExit # ESC to exit the game if full-screen
self.showmouse(not self.musthidemouse)
self.events_key.append((e.key, KeyPressed))
del self.events_key[:-16]
def keyup_handler(self, e):
self.events_key.append((e.key, KeyReleased))
del self.events_key[:-16]
def mousebuttondown_handler(self, e):
self.showmouse(1)
self.events_mouse.append(self.fixpos(e.pos))
del self.events_mouse[:-8]
def pixmap(self, w, h, data, colorkey=-1):
img = pygame.image.fromstring(data, (w, h), "RGB")
if colorkey >= 0:
r = colorkey & 0xFF
g = (colorkey >> 8) & 0xFF
b = (colorkey >> 16) & 0xFF
img.set_colorkey([r, g, b])
return img # not optimized -- must use getopticon()
def getopticon(self, pixmap, rect, alpha=255):
if not self.use_transparency:
alpha = 255
img = pixmap.subsurface(rect)
colorkey = pixmap.get_colorkey()
if alpha == 255 and not colorkey:
return img.convert(self.offscreen)
else:
if colorkey:
img.set_colorkey(colorkey, RLEACCEL)
if alpha < 255:
img.set_alpha(alpha, RLEACCEL)
img = img.convert_alpha(self.offscreen)
img.set_alpha(255, RLEACCEL)
return img
## def vflipppm(self, img):
## w, h = img.get_size()
## colorkey = img.get_colorkey()
## data = pygame.image.tostring(img, "RGB", 1)
## flipimg = pygame.image.fromstring(data, (w, h), "RGB")
## flipimg.set_colorkey(colorkey, RLEACCEL)
## return flipimg, h
def getppm(self, rect):
bkgnd = pygame.Surface(rect[2:])
bkgnd.blit(self.offscreen, (0, 0), rect)
return bkgnd
def putppm(self, x, y, bitmap, rect=None):
if rect:
self.offscreen.blit(bitmap, (x, y), rect)
else:
self.offscreen.blit(bitmap, (x, y))
def flip(self):
offscreen = self.offscreen
if self.scale != 1:
w, h = offscreen.get_size()
w = int(w * self.scale)
h = int(h * self.scale)
if self.scale == 2 and self.smoothfast:
offscreen = pygame.transform.scale2x(offscreen)
elif self.smooth:
offscreen = pygame.transform.smoothscale(offscreen, (w, h))
else:
offscreen = pygame.transform.scale(offscreen, (w, h))
self.screen.blit(offscreen, (0, 0))
pygame.display.flip()
events_dispatch()
def close(self):
self.showmouse(1)
pygame.display.quit()
def clear(self):
self.offscreen.fill([0,0,0,])
def fixpos(self, (x, y)):
if self.scale != 1:
x = int(x / self.scale)
y = int(y / self.scale)
return (x, y)
def events_poll(self):
while 1:
e = pygame.event.poll()
if e.type == NOEVENT:
break
elif e.type == KEYDOWN:
self.events_key.append((e.key, KeyPressed))
del self.events_key[:-16]
elif e.type == KEYUP:
self.events_key.append((e.key, KeyReleased))
del self.events_key[:-16]
elif e.type == MOUSEBUTTONDOWN:
self.events_mouse.append(self.fixpos(e.pos))
del self.events_mouse[:-8]
elif e.type == ENDMUSICEVENT:
self.next_music()
elif e.type == QUIT:
raise SystemExit
def keyevents(self):
events_dispatch()
events = self.events_key
self.events_key = []
return events
def pointermotion(self):
position = pygame.mouse.get_pos()
if position != self.prevposition:
self.showmouse(1)
self.prevposition = position
return self.fixpos(position)
else:
return None
def mouseevents(self):
events_dispatch()
events = self.events_mouse
self.events_mouse = []
return events
def selectlist(self):
return []
def taskbar(self, (x, y, w, h)):
tbs, tbh = self.tbcache
if tbh != h:
tbs = pygame.Surface((32, h)).convert_alpha(self.offscreen)
alpha_f = 256.0 / h
for j in range(h):
tbs.fill((128, 128, 255, int(j*alpha_f)),
(0, j, 32, 1))
self.tbcache = tbs, h
for i in range(x, x+w, 32):
dw = x+w-i
if dw < 32:
self.offscreen.blit(tbs, (i, y), (0, 0, dw, h))
else:
self.offscreen.blit(tbs, (i, y))
def settaskbar(self, tb_visible):
self.showmouse(1)
self.musthidemouse = not tb_visible # and self.use_fullscreen
def showmouse(self, v):
if v != self.mousevisible:
self.mousevisible = v
pygame.mouse.set_visible(v)
def quit_handler(e):
raise SystemExit
EVENT_HANDLERS = {
QUIT: quit_handler,
}
def events_dispatch(handlers = EVENT_HANDLERS):
while 1:
e = pygame.event.poll()
if e.type == NOEVENT:
break
elif handlers.has_key(e.type):
handlers[e.type](e)
def htmloptionstext(nameval):
return '''
<%s> Full Screen (Esc key to exit)</input><%s><br>
<%s> Draw slightly transparent bubbles</input><%s><br>
Scale image by <%s size=5>%%<br>
<%s> Smoothed scaled image</input><%s><br>
<%s> Semi-smoothed scaled image (for 200%% only)</input><%s><br>
''' % (nameval("checkbox", "fullscreen", "yes", default="no"),
nameval("hidden", "fullscreen", "no"),
nameval("checkbox", "transparency", "yes", default="yes"),
nameval("hidden", "transparency", "no"),
nameval("text", "zoom", default="100"),
nameval("checkbox", "smooth", "yes", default="yes"),
nameval("hidden", "smooth", "no"),
nameval("checkbox", "smoothfast", "yes", default="no"),
nameval("hidden", "smoothfast", "no"))
| 33.313008
| 75
| 0.539231
|
acfd200c6740a989171723106305fcb7bb1e4905
| 22,900
|
py
|
Python
|
qcfractal/server.py
|
yudongqiu/QCFractal
|
43b5b4807dfe19f78177288f204aab1066de2dea
|
[
"BSD-3-Clause"
] | null | null | null |
qcfractal/server.py
|
yudongqiu/QCFractal
|
43b5b4807dfe19f78177288f204aab1066de2dea
|
[
"BSD-3-Clause"
] | null | null | null |
qcfractal/server.py
|
yudongqiu/QCFractal
|
43b5b4807dfe19f78177288f204aab1066de2dea
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The FractalServer class
"""
import asyncio
import datetime
import logging
import ssl
import time
import traceback
from typing import Any, Dict, List, Optional, Union
import tornado.ioloop
import tornado.log
import tornado.options
import tornado.web
from .extras import get_information
from .interface import FractalClient
from .queue import QueueManager, QueueManagerHandler, ServiceQueueHandler, TaskQueueHandler
from .services import construct_service
from .storage_sockets import storage_socket_factory
from .storage_sockets.api_logger import API_AccessLogger
from .web_handlers import (CollectionHandler, InformationHandler, KeywordHandler, KVStoreHandler, MoleculeHandler,
ProcedureHandler, ResultHandler)
myFormatter = logging.Formatter('[%(asctime)s] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
def _build_ssl():
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import sys
import socket
import ipaddress
import random
hostname = socket.gethostname()
public_ip = ipaddress.ip_address(socket.gethostbyname(hostname))
key = rsa.generate_private_key(public_exponent=65537, key_size=1024, backend=default_backend())
alt_name_list = [x509.DNSName(hostname), x509.IPAddress(ipaddress.ip_address(public_ip))]
alt_names = x509.SubjectAlternativeName(alt_name_list)
# Basic data
name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, hostname)])
basic_contraints = x509.BasicConstraints(ca=True, path_length=0)
now = datetime.datetime.utcnow()
# Build cert
cert = (x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.public_key(key.public_key())
.serial_number(int(random.random() * sys.maxsize))
.not_valid_before(now)
.not_valid_after(now + datetime.timedelta(days=10*365))
.add_extension(basic_contraints, False)
.add_extension(alt_names, False)
.sign(key, hashes.SHA256(), default_backend())) # yapf: disable
# Build and return keys
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
) # yapf: disable
return cert_pem, key_pem
class FractalServer:
def __init__(
self,
# Server info options
name: str="QCFractal Server",
port: int=7777,
loop: 'IOLoop'=None,
compress_response: bool=True,
# Security
security: Optional[str]=None,
allow_read: bool=False,
ssl_options: Union[bool, Dict[str, str]]=True,
# Database options
storage_uri: str="postgresql://localhost:5432",
storage_project_name: str="qcfractal_default",
query_limit: int=1000,
# Log options
logfile_prefix: str=None,
log_apis: bool=False,
geo_file_path: str=None,
# Queue options
queue_socket: 'BaseAdapter'=None,
heartbeat_frequency: float=1800,
# Service options
max_active_services: int=20,
service_frequency: float=60):
"""QCFractal initialization
Parameters
----------
name : str, optional
The name of the server itself, provided when users query information
port : int, optional
The port the server will listen on.
loop : IOLoop, optional
Provide an IOLoop to use for the server
compress_response : bool, optional
Automatic compression of responses, turn on unless behind a proxy that
provides this capability.
security : Optional[str], optional
The security options for the server {None, "local"}. The local security
option uses the database to cache users.
allow_read : bool, optional
Allow unregistered to perform GET operations on Molecule/KeywordSets/KVStore/Results/Procedures
ssl_options : Optional[Dict[str, str]], optional
True, automatically creates self-signed SSL certificates. False, turns off SSL entirely. A user can also supply a dictionary of valid certificates.
storage_uri : str, optional
The database URI that the underlying storage socket will connect to.
storage_project_name : str, optional
The project name to use on the database.
query_limit : int, optional
The maximum number of entries a query will return.
logfile_prefix : str, optional
The logfile to use for logging.
queue_socket : BaseAdapter, optional
An optional Adapter to provide for server to have limited local compute.
Should only be used for testing and interactive sessions.
heartbeat_frequency : float, optional
The time (in seconds) of the heartbeat manager frequency.
max_active_services : int, optional
The maximum number of active Services that can be running at any given time.
service_frequency : float, optional
The time (in seconds) before checking and updating services.
"""
# Save local options
self.name = name
self.port = port
if ssl_options is False:
self._address = "http://localhost:" + str(self.port) + "/"
else:
self._address = "https://localhost:" + str(self.port) + "/"
self.max_active_services = max_active_services
self.service_frequency = service_frequency
self.heartbeat_frequency = heartbeat_frequency
# Setup logging.
if logfile_prefix is not None:
tornado.options.options['log_file_prefix'] = logfile_prefix
tornado.log.enable_pretty_logging()
self.logger = logging.getLogger("tornado.application")
# Create API Access logger class if enables
if log_apis:
self.api_logger = API_AccessLogger(geo_file_path=geo_file_path)
else:
self.api_logger = None
# Build security layers
if security is None:
storage_bypass_security = True
elif security == "local":
storage_bypass_security = False
else:
raise KeyError("Security option '{}' not recognized.".format(security))
# Handle SSL
ssl_ctx = None
self.client_verify = True
if ssl_options is True:
self.logger.warning("No SSL files passed in, generating self-signed SSL certificate.")
self.logger.warning("Clients must use `verify=False` when connecting.\n")
cert, key = _build_ssl()
# Add quick names
ssl_name = name.lower().replace(" ", "_")
cert_name = ssl_name + "_ssl.crt"
key_name = ssl_name + "_ssl.key"
ssl_options = {"crt": cert_name, "key": key_name}
with open(cert_name, "wb") as handle:
handle.write(cert)
with open(key_name, "wb") as handle:
handle.write(key)
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(ssl_options["crt"], ssl_options["key"])
# Destroy keyfiles upon close
import atexit
import os
atexit.register(os.remove, cert_name)
atexit.register(os.remove, key_name)
self.client_verify = False
elif ssl_options is False:
ssl_ctx = None
elif isinstance(ssl_options, dict):
if ("crt" not in ssl_options) or ("key" not in ssl_options):
raise KeyError("'crt' (SSL Certificate) and 'key' (SSL Key) fields are required for `ssl_options`.")
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(ssl_options["crt"], ssl_options["key"])
else:
raise KeyError("ssl_options not understood")
# Setup the database connection
self.storage_database = storage_project_name
self.storage_uri = storage_uri
self.storage = storage_socket_factory(
storage_uri,
project_name=storage_project_name,
bypass_security=storage_bypass_security,
allow_read=allow_read,
max_limit=query_limit)
# Pull the current loop if we need it
self.loop = loop or tornado.ioloop.IOLoop.current()
# Build up the application
self.objects = {
"storage_socket": self.storage,
"logger": self.logger,
"api_logger": self.api_logger,
}
# Public information
self.objects["public_information"] = {
"name": self.name,
"heartbeat_frequency": self.heartbeat_frequency,
"version": get_information("version"),
"query_limit": self.storage.get_limit(1.e9),
"client_lower_version_limit": "0.8.0", # Must be XX.YY.ZZ
"client_upper_version_limit": "0.8.1" # Must be XX.YY.ZZ
}
endpoints = [
# Generic web handlers
(r"/information", InformationHandler, self.objects),
(r"/kvstore", KVStoreHandler, self.objects),
(r"/molecule", MoleculeHandler, self.objects),
(r"/keyword", KeywordHandler, self.objects),
(r"/collection", CollectionHandler, self.objects),
(r"/result", ResultHandler, self.objects),
(r"/procedure", ProcedureHandler, self.objects),
# Queue Schedulers
(r"/task_queue", TaskQueueHandler, self.objects),
(r"/service_queue", ServiceQueueHandler, self.objects),
(r"/queue_manager", QueueManagerHandler, self.objects),
]
# Build the app
app_settings = {
"compress_response": compress_response,
}
self.app = tornado.web.Application(endpoints, **app_settings)
self.endpoints = set([v[0].replace("/", "", 1) for v in endpoints])
self.http_server = tornado.httpserver.HTTPServer(self.app, ssl_options=ssl_ctx)
self.http_server.listen(self.port)
# Add periodic callback holders
self.periodic = {}
# Exit callbacks
self.exit_callbacks = []
self.logger.info("FractalServer:")
self.logger.info(" Name: {}".format(self.name))
self.logger.info(" Version: {}".format(get_information("version")))
self.logger.info(" Address: {}".format(self._address))
self.logger.info(" Database URI: {}".format(storage_uri))
self.logger.info(" Database Name: {}".format(storage_project_name))
self.logger.info(" Query Limit: {}\n".format(self.storage.get_limit(1.e9)))
self.loop_active = False
# Queue manager if direct build
self.queue_socket = queue_socket
self.executor = None
self.futures = []
if (self.queue_socket is not None):
if security == "local":
raise ValueError("Cannot yet use local security with a internal QueueManager")
# Create the executor
from concurrent.futures import ThreadPoolExecutor
self.executor = ThreadPoolExecutor(max_workers=2)
def _build_manager():
client = FractalClient(self, username="qcfractal_server")
self.objects["queue_manager"] = QueueManager(
client, self.queue_socket, logger=self.logger, manager_name="FractalServer", verbose=False)
# Build the queue manager, will not run until loop starts
self.objects["queue_manager_future"] = self._run_in_thread(_build_manager)
def __repr__(self):
return f"FractalServer(name='{self.name}' uri='{self._address}')"
def _run_in_thread(self, func, timeout=5):
"""
Runs a function in a background thread
"""
if self.executor is None:
raise AttributeError("No Executor was created, but run_in_thread was called.")
fut = self.loop.run_in_executor(self.executor, func)
return fut
## Start/stop functionality
def start(self, start_loop: bool=True, start_periodics: bool=True) -> None:
"""
Starts up the IOLoop and periodic calls.
Parameters
----------
start_loop : bool, optional
If False, does not start the IOLoop
start_periodics : bool, optional
If False, does not start the server periodic updates such as
Service iterations and Manager heartbeat checking.
"""
if "queue_manager_future" in self.objects:
def start_manager():
self._check_manager("manager_build")
self.objects["queue_manager"].start()
# Call this after the loop has started
self._run_in_thread(start_manager)
# Add services callback
if start_periodics:
nanny_services = tornado.ioloop.PeriodicCallback(self.update_services, self.service_frequency * 1000)
nanny_services.start()
self.periodic["update_services"] = nanny_services
# Add Manager heartbeats
heartbeats = tornado.ioloop.PeriodicCallback(self.check_manager_heartbeats, self.heartbeat_frequency * 1000)
heartbeats.start()
self.periodic["heartbeats"] = heartbeats
# Soft quit with a keyboard interrupt
self.logger.info("FractalServer successfully started.\n")
if start_loop:
self.loop_active = True
self.loop.start()
def stop(self, stop_loop: bool=True) -> None:
"""
Shuts down the IOLoop and periodic updates.
Parameters
----------
stop_loop : bool, optional
If False, does not shut down the IOLoop. Useful if the IOLoop is externally managed.
"""
# Shut down queue manager
if "queue_manager" in self.objects:
self._run_in_thread(self.objects["queue_manager"].stop)
# Close down periodics
for cb in self.periodic.values():
cb.stop()
# Call exit callbacks
for func, args, kwargs in self.exit_callbacks:
func(*args, **kwargs)
# Shutdown executor and futures
if "queue_manager_future" in self.objects:
self.objects["queue_manager_future"].cancel()
if self.executor is not None:
self.executor.shutdown()
# Shutdown IOLoop if needed
if (asyncio.get_event_loop().is_running()) and stop_loop:
self.loop.stop()
self.loop_active = False
# Final shutdown
if stop_loop:
self.loop.close(all_fds=True)
self.logger.info("FractalServer stopping gracefully. Stopped IOLoop.\n")
def add_exit_callback(self, callback, *args, **kwargs):
"""Adds additional callbacks to perform when closing down the server.
Parameters
----------
callback : callable
The function to call at exit
*args
Arguments to call with the function.
**kwargs
Kwargs to call with the function.
"""
self.exit_callbacks.append((callback, args, kwargs))
## Helpers
def get_address(self, endpoint: Optional[str]=None) -> str:
"""Obtains the full URI for a given function on the FractalServer.
Parameters
----------
endpoint : Optional[str], optional
Specifies a endpoint to provide the URI for. If None returns the server address.
Returns
-------
str
The endpoint URI
"""
if endpoint and (endpoint not in self.endpoints):
raise AttributeError("Endpoint '{}' not found.".format(endpoint))
if endpoint:
return self._address + endpoint
else:
return self._address
## Updates
def update_services(self) -> int:
"""Runs through all active services and examines their current status.
"""
# Grab current services
current_services = self.storage.get_services(status="RUNNING")["data"]
# Grab new services if we have open slots
open_slots = max(0, self.max_active_services - len(current_services))
if open_slots > 0:
new_services = self.storage.get_services(status="WAITING", limit=open_slots)["data"]
current_services.extend(new_services)
if len(new_services):
self.logger.info(f"Starting {len(new_services)} new services.")
self.logger.debug(f"Updating {len(current_services)} services.")
# Loop over the services and iterate
running_services = 0
completed_services = []
for data in current_services:
# Attempt to iteration and get message
try:
service = construct_service(self.storage, self.logger, data)
finished = service.iterate()
except Exception as e:
error_message = "FractalServer Service Build and Iterate Error:\n{}".format(traceback.format_exc())
self.logger.error(error_message)
service.status = "ERROR"
service.error = {"error_type": "iteration_error", "error_message": error_message}
finished = False
r = self.storage.update_services([service])
if finished is not False:
# Add results to procedures, remove complete_ids
completed_services.append(service)
else:
running_services += 1
if len(completed_services):
self.logger.info(f"Completed {len(completed_services)} services.")
# Add new procedures and services
self.storage.services_completed(completed_services)
return running_services
def check_manager_heartbeats(self) -> None:
"""
Checks the heartbeats and kills off managers that have not been heard from.
"""
dt = datetime.datetime.utcnow() - datetime.timedelta(seconds=self.heartbeat_frequency)
ret = self.storage.get_managers(status="ACTIVE", modified_before=dt)
for blob in ret["data"]:
nshutdown = self.storage.queue_reset_status(manager=blob["name"])
self.storage.manager_update(blob["name"], returned=nshutdown, status="INACTIVE")
self.logger.info("Hearbeat missing from {}. Shutting down, recycling {} incomplete tasks.".format(
blob["name"], nshutdown))
def list_managers(self, status: Optional[str]=None, name: Optional[str]=None) -> List[Dict[str, Any]]:
"""
Provides a list of managers associated with the server both active and inactive.
Parameters
----------
status : Optional[str], optional
Filters managers by status.
name : Optional[str], optional
Filters managers by name
Returns
-------
List[Dict[str, Any]]
The requested Manager data.
"""
return self.storage.get_managers(status=status, name=name)["data"]
def client(self):
"""
Builds a client from this server.
"""
return FractalClient(self)
### Functions only available if using a local queue_adapter
def _check_manager(self, func_name: str) -> None:
if self.queue_socket is None:
raise AttributeError(
"{} is only available if the server was initialized with a queue manager.".format(func_name))
# Wait up to one second for the queue manager to build
if "queue_manager" not in self.objects:
self.logger.info("Waiting on queue_manager to build.")
for x in range(20):
time.sleep(0.1)
if "queue_manager" in self.objects:
break
if "queue_manager" not in self.objects:
raise AttributeError("QueueManager never constructed.")
def update_tasks(self) -> bool:
"""Pulls tasks from the queue_adapter, inserts them into the database,
and fills the queue_adapter with new tasks.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("update_tasks")
if self.loop_active:
# Drop this in a thread so that we are not blocking each other
self._run_in_thread(self.objects["queue_manager"].update)
else:
self.objects["queue_manager"].update()
return True
def await_results(self) -> bool:
"""A synchronous method for testing or small launches
that awaits task completion before adding all queued results
to the database and returning.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("await_results")
self.logger.info("Updating tasks")
return self.objects["queue_manager"].await_results()
def await_services(self, max_iter: int=10) -> bool:
"""A synchronous method that awaits the completion of all services
before returning.
Parameters
----------
max_iter : int, optional
The maximum number of service iterations the server will run through. Will
terminate early if all services have completed.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("await_services")
self.await_results()
for x in range(1, max_iter + 1):
self.logger.info("\nAwait services: Iteration {}\n".format(x))
running_services = self.update_services()
self.await_results()
if running_services == 0:
break
return True
def list_current_tasks(self) -> List[Any]:
"""Provides a list of tasks currently in the queue along
with the associated keys.
Returns
-------
ret : list of tuples
All tasks currently still in the database
"""
self._check_manager("list_current_tasks")
return self.objects["queue_manager"].list_current_tasks()
| 35.893417
| 159
| 0.620087
|
acfd2025297a049bed2f00ca8977f3ad24da961d
| 2,476
|
py
|
Python
|
dataladmetadatamodel/tests/test_common.py
|
datalad/metadata-model
|
03d5178f0f9c55b75d483957057f0b9daa7625a9
|
[
"MIT"
] | null | null | null |
dataladmetadatamodel/tests/test_common.py
|
datalad/metadata-model
|
03d5178f0f9c55b75d483957057f0b9daa7625a9
|
[
"MIT"
] | 6
|
2021-10-07T20:53:58.000Z
|
2022-03-31T10:32:22.000Z
|
dataladmetadatamodel/tests/test_common.py
|
datalad/metadata-model
|
03d5178f0f9c55b75d483957057f0b9daa7625a9
|
[
"MIT"
] | null | null | null |
import subprocess
import tempfile
import unittest
from dataladmetadatamodel.common import (
get_top_level_metadata_objects,
get_top_nodes_and_metadata_root_record,
)
from dataladmetadatamodel.metadatapath import MetadataPath
from dataladmetadatamodel.metadatarootrecord import MetadataRootRecord
from dataladmetadatamodel.tests.utils import get_uuid
from dataladmetadatamodel.uuidset import UUIDSet
from dataladmetadatamodel.versionlist import TreeVersionList
uuid_0 = get_uuid(0)
uuid_1 = get_uuid(1)
class TestTopLevelObjects(unittest.TestCase):
# This is not exactly a unit test since it does not
# isolate the units from the other components, but
# more a module test.
def test_top_level_functions(self):
with tempfile.TemporaryDirectory() as realm:
subprocess.run(["git", "init", realm])
tvl, uuid_set = get_top_level_metadata_objects("git", realm)
self.assertIsNone(tvl)
self.assertIsNone(uuid_set)
tvl, uuid_set, mrr = get_top_nodes_and_metadata_root_record(
mapper_family="git",
realm=realm,
dataset_id=uuid_0,
primary_data_version="v1",
prefix_path=MetadataPath(""),
dataset_tree_path=MetadataPath("a/b/c"),
sub_dataset_id=None,
sub_dataset_version=None,
auto_create=False
)
self.assertIsNone(tvl)
self.assertIsNone(uuid_set)
self.assertIsNone(mrr)
tvl, uuid_set, mrr = get_top_nodes_and_metadata_root_record(
mapper_family="git",
realm=realm,
dataset_id=uuid_0,
primary_data_version="v1",
prefix_path=MetadataPath(""),
dataset_tree_path=MetadataPath("a/b/c"),
sub_dataset_id=uuid_1,
sub_dataset_version="v-sub1",
auto_create=True
)
self.assertIsInstance(tvl, TreeVersionList)
self.assertIsInstance(uuid_set, UUIDSet)
self.assertIsInstance(mrr, MetadataRootRecord)
tvl.write_out(realm)
uuid_set.write_out(realm)
tvl, uuid_set = get_top_level_metadata_objects("git", realm)
self.assertIsInstance(tvl, TreeVersionList)
self.assertIsInstance(uuid_set, UUIDSet)
if __name__ == '__main__':
unittest.main()
| 33.459459
| 72
| 0.642165
|
acfd205dc844bafd8d4c9ea6d7e53dc768c5c8ff
| 57
|
py
|
Python
|
recipe/run_test.py
|
regro-cf-autotick-bot/lxml-stubs-feedstock
|
273100ab1ea7657519c14a9b5c96de0760570263
|
[
"BSD-3-Clause"
] | null | null | null |
recipe/run_test.py
|
regro-cf-autotick-bot/lxml-stubs-feedstock
|
273100ab1ea7657519c14a9b5c96de0760570263
|
[
"BSD-3-Clause"
] | 4
|
2021-05-21T11:59:20.000Z
|
2022-03-04T12:46:51.000Z
|
recipe/run_test.py
|
regro-cf-autotick-bot/lxml-stubs-feedstock
|
273100ab1ea7657519c14a9b5c96de0760570263
|
[
"BSD-3-Clause"
] | 1
|
2021-05-21T11:58:56.000Z
|
2021-05-21T11:58:56.000Z
|
import importlib
importlib.import_module("lxml-stubs")
| 11.4
| 37
| 0.807018
|
acfd21b1886b82a354849170efd162b72fe7af29
| 3,747
|
py
|
Python
|
validate.py
|
Susmit-A/FSHDR
|
45dfbe7783b2a5859bbe8653761058b9ee207a01
|
[
"MIT"
] | 6
|
2021-07-26T17:41:26.000Z
|
2021-12-31T09:56:55.000Z
|
validate.py
|
Susmit-A/FSHDR
|
45dfbe7783b2a5859bbe8653761058b9ee207a01
|
[
"MIT"
] | 4
|
2021-07-27T13:42:45.000Z
|
2022-03-20T11:31:29.000Z
|
validate.py
|
Susmit-A/FSHDR
|
45dfbe7783b2a5859bbe8653761058b9ee207a01
|
[
"MIT"
] | 1
|
2021-12-05T13:35:08.000Z
|
2021-12-05T13:35:08.000Z
|
import argparse, os, glob, cv2, sys
import numpy as np
print(" ".join(sys.argv))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='BridgeNet')
parser.add_argument('--weights_loc', type=str, default=None)
parser.add_argument('--model_name', type=str, default=None)
parser.add_argument('--dataset', type=str, default='SIG17') #['SIG17' or 'ICCP19']
parser.add_argument('--image_type', type=str, default='flow_corrected') #['normal' or 'flow_corrected']
parser.add_argument('--gpu_num', type=str, default='0')
parser.add_argument('--val_downsample', type=int, default=1)
parser.add_argument('--rtx_mixed_precision', action='store_true')
args = parser.parse_args()
if args.model not in ['BridgeNet', 'AHDR', 'WE', 'Resnet']:
print("Unknown Model. Exiting.")
exit()
else:
print("Using {} model".format(args.model))
if args.dataset not in ['SIG17', 'ICCP19']:
print("Unknown Dataset. Exiting.")
exit()
else:
print("Using {} dataset".format(args.dataset))
if args.image_type not in ['normal', 'flow_corrected']:
print("Unknown Image Type. Exiting.")
exit()
else:
print("Using {} images".format(args.image_type))
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_num
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_devices) > 0:
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
if args.rtx_mixed_precision:
from tensorflow.keras.mixed_precision import experimental as mixed_precision
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
from dataloader import *
from models import *
from losses import *
from utils import *
model_name = args.model if args.model_name is None else args.model_name
model = models[args.model](name=model_name)
model.load_weights(args.weights_loc)
print("Loading model weights from ", args.weights_loc)
losses = [MSE_TM]
metrics = [PSNR_L, PSNR_T]
init_sequences_S1(dataset=args.dataset, image_type=args.image_type)
init_validation(dataset=args.dataset, image_type=args.image_type)
val_loader = Validation()
folder = 'val_' + args.dataset
print("\nValidation")
progbar = tf.keras.utils.Progbar(len(val_loader))
step = 1
if not os.path.exists(os.path.join('results', model_name)):
os.mkdir(os.path.join('results', model_name))
elif os.path.exists(os.path.join('results', model_name, folder)):
os.rmdir(os.path.join('results', model_name, folder))
os.mkdir(os.path.join('results', model_name, folder))
for i in range(len(val_loader)):
loss_vals = []
metric_vals = []
os.mkdir(os.path.join('results', model_name, folder, str(i)))
X, Y, exp = val_loader[i]
if args.val_downsample > 1:
inp = tf.image.resize(X, (X.shape[1] // args.val_downsample, X.shape[2] // args.val_downsample))
Y = tf.image.resize(Y, (Y.shape[1] // args.val_downsample, Y.shape[2] // args.val_downsample))
else:
inp = X
pred = model.predict(inp)
radiance_writer(os.path.join('results', model_name, folder, str(i), str(i) + '.hdr'), np.squeeze(pred, axis=0))
radiance_writer(os.path.join('results', model_name, folder, str(i), str(i) + '_gt.hdr'), np.squeeze(Y, axis=0))
for l in losses:
_loss = tf.reduce_mean(l(Y, pred))
loss_vals.append((l.__name__.lower(), tf.reduce_mean(_loss)))
for m in metrics:
_metric = tf.reduce_mean(m(Y, pred))
metric_vals.append((m.__name__.lower(), tf.reduce_mean(_metric)))
progbar.update(step, loss_vals + metric_vals)
step += 1
| 38.628866
| 116
| 0.680811
|
acfd2215169bc423651c16c18c5df46ec02ba8d3
| 1,947
|
py
|
Python
|
effect/test_retry.py
|
timgates42/effect
|
df04e119faf419a5608a51ca1af606f2874a11a7
|
[
"Unlicense",
"MIT"
] | 265
|
2015-05-09T17:28:51.000Z
|
2022-03-23T03:48:16.000Z
|
effect/test_retry.py
|
timgates42/effect
|
df04e119faf419a5608a51ca1af606f2874a11a7
|
[
"Unlicense",
"MIT"
] | 37
|
2015-05-09T16:31:54.000Z
|
2020-09-17T20:20:03.000Z
|
effect/test_retry.py
|
timgates42/effect
|
df04e119faf419a5608a51ca1af606f2874a11a7
|
[
"Unlicense",
"MIT"
] | 17
|
2015-05-09T04:45:46.000Z
|
2021-07-21T12:48:52.000Z
|
from testtools import TestCase
from testtools.matchers import raises
from .retry import retry
from . import base_dispatcher
from .testing import ESConstant, ESError, ESFunc, resolve_stubs
class RetryTests(TestCase):
def test_should_not_retry(self):
"""retry raises the last error if should_retry returns False."""
result = retry(ESError(RuntimeError("oh no!")), lambda e: ESConstant(False))
self.assertThat(
lambda: resolve_stubs(base_dispatcher, result),
raises(RuntimeError("oh no!")),
)
def _repeated_effect_func(self, *funcs):
"""
Return an (impure) function which does different things based on the
number of times it's been called.
"""
counter = [0]
def func():
count = counter[0]
counter[0] += 1
return funcs[count]()
return func
def test_retry(self):
"""
When should_retry returns an Effect of True, the func will be called
again.
"""
func = self._repeated_effect_func(
lambda: raise_(RuntimeError("foo")), lambda: "final"
)
result = retry(ESFunc(func), lambda e: ESConstant(True))
self.assertEqual(resolve_stubs(base_dispatcher, result), "final")
def test_continue_retrying(self):
"""
should_retry is passed the exception information, and will be
called until it returns False.
"""
func = self._repeated_effect_func(
lambda: raise_(RuntimeError("1")),
lambda: raise_(RuntimeError("2")),
lambda: raise_(RuntimeError("3")),
)
def should_retry(e):
return ESConstant(str(e) != "3")
result = retry(ESFunc(func), should_retry)
self.assertThat(
lambda: resolve_stubs(base_dispatcher, result), raises(RuntimeError("3"))
)
def raise_(exc):
raise exc
| 29.5
| 85
| 0.607088
|
acfd22b8f153dcd0253153cdce4f1b8e3df9f535
| 18,879
|
py
|
Python
|
dataloader/flow_transforms.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 138
|
2021-01-12T03:02:04.000Z
|
2022-03-30T07:14:15.000Z
|
dataloader/flow_transforms.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 12
|
2021-02-02T14:19:30.000Z
|
2022-03-28T01:23:44.000Z
|
dataloader/flow_transforms.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 14
|
2021-01-13T01:31:34.000Z
|
2022-01-30T14:48:06.000Z
|
from __future__ import division
import torch
import random
import numpy as np
import numbers
import types
import scipy.ndimage as ndimage
import pdb
import torchvision
import PIL.Image as Image
import cv2
from torch.nn import functional as F
class Compose(object):
""" Composes several co_transforms together.
For example:
>>> co_transforms.Compose([
>>> co_transforms.CenterCrop(10),
>>> co_transforms.ToTensor(),
>>> ])
"""
def __init__(self, co_transforms):
self.co_transforms = co_transforms
def __call__(self, input, target):
for t in self.co_transforms:
input,target = t(input,target)
return input,target
class Scale(object):
""" Rescales the inputs and target arrays to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation order: Default: 2 (bilinear)
"""
def __init__(self, size, order=1):
self.ratio = size
self.order = order
if order==0:
self.code=cv2.INTER_NEAREST
elif order==1:
self.code=cv2.INTER_LINEAR
elif order==2:
self.code=cv2.INTER_CUBIC
def __call__(self, inputs, target):
if self.ratio==1:
return inputs, target
h, w, _ = inputs[0].shape
ratio = self.ratio
inputs[0] = cv2.resize(inputs[0], None, fx=ratio,fy=ratio,interpolation=cv2.INTER_LINEAR)
inputs[1] = cv2.resize(inputs[1], None, fx=ratio,fy=ratio,interpolation=cv2.INTER_LINEAR)
# keep the mask same
tmp = cv2.resize(target[:,:,2], None, fx=ratio,fy=ratio,interpolation=cv2.INTER_NEAREST)
target = cv2.resize(target, None, fx=ratio,fy=ratio,interpolation=self.code) * ratio
target[:,:,2] = tmp
return inputs, target
class SpatialAug(object):
def __init__(self, crop, scale=None, rot=None, trans=None, squeeze=None, schedule_coeff=1, order=1, black=False):
self.crop = crop
self.scale = scale
self.rot = rot
self.trans = trans
self.squeeze = squeeze
self.t = np.zeros(6)
self.schedule_coeff = schedule_coeff
self.order = order
self.black = black
def to_identity(self):
self.t[0] = 1; self.t[2] = 0; self.t[4] = 0; self.t[1] = 0; self.t[3] = 1; self.t[5] = 0;
def left_multiply(self, u0, u1, u2, u3, u4, u5):
result = np.zeros(6)
result[0] = self.t[0]*u0 + self.t[1]*u2;
result[1] = self.t[0]*u1 + self.t[1]*u3;
result[2] = self.t[2]*u0 + self.t[3]*u2;
result[3] = self.t[2]*u1 + self.t[3]*u3;
result[4] = self.t[4]*u0 + self.t[5]*u2 + u4;
result[5] = self.t[4]*u1 + self.t[5]*u3 + u5;
self.t = result
def inverse(self):
result = np.zeros(6)
a = self.t[0]; c = self.t[2]; e = self.t[4];
b = self.t[1]; d = self.t[3]; f = self.t[5];
denom = a*d - b*c;
result[0] = d / denom;
result[1] = -b / denom;
result[2] = -c / denom;
result[3] = a / denom;
result[4] = (c*f-d*e) / denom;
result[5] = (b*e-a*f) / denom;
return result
def grid_transform(self, meshgrid, t, normalize=True, gridsize=None):
if gridsize is None:
h, w = meshgrid[0].shape
else:
h, w = gridsize
vgrid = torch.cat([(meshgrid[0] * t[0] + meshgrid[1] * t[2] + t[4])[:,:,np.newaxis],
(meshgrid[0] * t[1] + meshgrid[1] * t[3] + t[5])[:,:,np.newaxis]],-1)
if normalize:
vgrid[:,:,0] = 2.0*vgrid[:,:,0]/max(w-1,1)-1.0
vgrid[:,:,1] = 2.0*vgrid[:,:,1]/max(h-1,1)-1.0
return vgrid
def __call__(self, inputs, target):
h, w, _ = inputs[0].shape
th, tw = self.crop
meshgrid = torch.meshgrid([torch.Tensor(range(th)), torch.Tensor(range(tw))])[::-1]
cornergrid = torch.meshgrid([torch.Tensor([0,th-1]), torch.Tensor([0,tw-1])])[::-1]
for i in range(50):
# im0
self.to_identity()
#TODO add mirror
if np.random.binomial(1,0.5):
mirror = True
else:
mirror = False
##TODO
#mirror = False
if mirror:
self.left_multiply(-1, 0, 0, 1, .5 * tw, -.5 * th);
else:
self.left_multiply(1, 0, 0, 1, -.5 * tw, -.5 * th);
scale0 = 1; scale1 = 1; squeeze0 = 1; squeeze1 = 1;
if not self.rot is None:
rot0 = np.random.uniform(-self.rot[0],+self.rot[0])
rot1 = np.random.uniform(-self.rot[1]*self.schedule_coeff, self.rot[1]*self.schedule_coeff) + rot0
self.left_multiply(np.cos(rot0), np.sin(rot0), -np.sin(rot0), np.cos(rot0), 0, 0)
if not self.trans is None:
trans0 = np.random.uniform(-self.trans[0],+self.trans[0], 2)
trans1 = np.random.uniform(-self.trans[1]*self.schedule_coeff,+self.trans[1]*self.schedule_coeff, 2) + trans0
self.left_multiply(1, 0, 0, 1, trans0[0] * tw, trans0[1] * th)
if not self.squeeze is None:
squeeze0 = np.exp(np.random.uniform(-self.squeeze[0], self.squeeze[0]))
squeeze1 = np.exp(np.random.uniform(-self.squeeze[1]*self.schedule_coeff, self.squeeze[1]*self.schedule_coeff)) * squeeze0
if not self.scale is None:
scale0 = np.exp(np.random.uniform(self.scale[2]-self.scale[0], self.scale[2]+self.scale[0]))
scale1 = np.exp(np.random.uniform(-self.scale[1]*self.schedule_coeff, self.scale[1]*self.schedule_coeff)) * scale0
self.left_multiply(1.0/(scale0*squeeze0), 0, 0, 1.0/(scale0/squeeze0), 0, 0)
self.left_multiply(1, 0, 0, 1, .5 * w, .5 * h);
transmat0 = self.t.copy()
# im1
self.to_identity()
if mirror:
self.left_multiply(-1, 0, 0, 1, .5 * tw, -.5 * th);
else:
self.left_multiply(1, 0, 0, 1, -.5 * tw, -.5 * th);
if not self.rot is None:
self.left_multiply(np.cos(rot1), np.sin(rot1), -np.sin(rot1), np.cos(rot1), 0, 0)
if not self.trans is None:
self.left_multiply(1, 0, 0, 1, trans1[0] * tw, trans1[1] * th)
self.left_multiply(1.0/(scale1*squeeze1), 0, 0, 1.0/(scale1/squeeze1), 0, 0)
self.left_multiply(1, 0, 0, 1, .5 * w, .5 * h);
transmat1 = self.t.copy()
transmat1_inv = self.inverse()
if self.black:
# black augmentation, allowing 0 values in the input images
# https://github.com/lmb-freiburg/flownet2/blob/master/src/caffe/layers/black_augmentation_layer.cu
break
else:
if ((self.grid_transform(cornergrid, transmat0, gridsize=[float(h),float(w)]).abs()>1).sum() +\
(self.grid_transform(cornergrid, transmat1, gridsize=[float(h),float(w)]).abs()>1).sum()) == 0:
break
if i==49:
print('max_iter in augmentation')
self.to_identity()
self.left_multiply(1, 0, 0, 1, -.5 * tw, -.5 * th);
self.left_multiply(1, 0, 0, 1, .5 * w, .5 * h);
transmat0 = self.t.copy()
transmat1 = self.t.copy()
# do the real work
vgrid = self.grid_transform(meshgrid, transmat0,gridsize=[float(h),float(w)])
inputs_0 = F.grid_sample(torch.Tensor(inputs[0]).permute(2,0,1)[np.newaxis], vgrid[np.newaxis], align_corners=True)[0].permute(1,2,0)
if self.order == 0:
target_0 = F.grid_sample(torch.Tensor(target).permute(2,0,1)[np.newaxis], vgrid[np.newaxis], mode='nearest', align_corners=True)[0].permute(1,2,0)
else:
target_0 = F.grid_sample(torch.Tensor(target).permute(2,0,1)[np.newaxis], vgrid[np.newaxis], align_corners=True)[0].permute(1,2,0)
mask_0 = target[:,:,2:3].copy(); mask_0[mask_0==0]=np.nan
if self.order == 0:
mask_0 = F.grid_sample(torch.Tensor(mask_0).permute(2,0,1)[np.newaxis], vgrid[np.newaxis], mode='nearest', align_corners=True)[0].permute(1,2,0)
else:
mask_0 = F.grid_sample(torch.Tensor(mask_0).permute(2,0,1)[np.newaxis], vgrid[np.newaxis], align_corners=True)[0].permute(1,2,0)
mask_0[torch.isnan(mask_0)] = 0
vgrid = self.grid_transform(meshgrid, transmat1,gridsize=[float(h),float(w)])
inputs_1 = F.grid_sample(torch.Tensor(inputs[1]).permute(2,0,1)[np.newaxis], vgrid[np.newaxis], align_corners=True)[0].permute(1,2,0)
# flow
pos = target_0[:,:,:2] + self.grid_transform(meshgrid, transmat0,normalize=False)
pos = self.grid_transform(pos.permute(2,0,1),transmat1_inv,normalize=False)
if target_0.shape[2]>=4:
# scale
exp = target_0[:,:,3:] * scale1 / scale0
target = torch.cat([ (pos[:,:,0] - meshgrid[0]).unsqueeze(-1),
(pos[:,:,1] - meshgrid[1]).unsqueeze(-1),
mask_0,
exp], -1)
else:
target = torch.cat([ (pos[:,:,0] - meshgrid[0]).unsqueeze(-1),
(pos[:,:,1] - meshgrid[1]).unsqueeze(-1),
mask_0], -1)
# target_0[:,:,2].unsqueeze(-1) ], -1)
inputs = [np.asarray(inputs_0), np.asarray(inputs_1)]
target = np.asarray(target)
return inputs,target
class pseudoPCAAug(object):
"""
Chromatic Eigen Augmentation: https://github.com/lmb-freiburg/flownet2/blob/master/src/caffe/layers/data_augmentation_layer.cu
This version is faster.
"""
def __init__(self, schedule_coeff=1):
self.augcolor = torchvision.transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.5, hue=0.5/3.14)
def __call__(self, inputs, target):
inputs[0] = np.asarray(self.augcolor(Image.fromarray(np.uint8(inputs[0]*255))))/255.
inputs[1] = np.asarray(self.augcolor(Image.fromarray(np.uint8(inputs[1]*255))))/255.
return inputs,target
class PCAAug(object):
"""
Chromatic Eigen Augmentation: https://github.com/lmb-freiburg/flownet2/blob/master/src/caffe/layers/data_augmentation_layer.cu
"""
def __init__(self, lmult_pow =[0.4, 0,-0.2],
lmult_mult =[0.4, 0,0, ],
lmult_add =[0.03,0,0, ],
sat_pow =[0.4, 0,0, ],
sat_mult =[0.5, 0,-0.3],
sat_add =[0.03,0,0, ],
col_pow =[0.4, 0,0, ],
col_mult =[0.2, 0,0, ],
col_add =[0.02,0,0, ],
ladd_pow =[0.4, 0,0, ],
ladd_mult =[0.4, 0,0, ],
ladd_add =[0.04,0,0, ],
col_rotate =[1., 0,0, ],
schedule_coeff=1):
# no mean
self.pow_nomean = [1,1,1]
self.add_nomean = [0,0,0]
self.mult_nomean = [1,1,1]
self.pow_withmean = [1,1,1]
self.add_withmean = [0,0,0]
self.mult_withmean = [1,1,1]
self.lmult_pow = 1
self.lmult_mult = 1
self.lmult_add = 0
self.col_angle = 0
if not ladd_pow is None:
self.pow_nomean[0] =np.exp(np.random.normal(ladd_pow[2], ladd_pow[0]))
if not col_pow is None:
self.pow_nomean[1] =np.exp(np.random.normal(col_pow[2], col_pow[0]))
self.pow_nomean[2] =np.exp(np.random.normal(col_pow[2], col_pow[0]))
if not ladd_add is None:
self.add_nomean[0] =np.random.normal(ladd_add[2], ladd_add[0])
if not col_add is None:
self.add_nomean[1] =np.random.normal(col_add[2], col_add[0])
self.add_nomean[2] =np.random.normal(col_add[2], col_add[0])
if not ladd_mult is None:
self.mult_nomean[0] =np.exp(np.random.normal(ladd_mult[2], ladd_mult[0]))
if not col_mult is None:
self.mult_nomean[1] =np.exp(np.random.normal(col_mult[2], col_mult[0]))
self.mult_nomean[2] =np.exp(np.random.normal(col_mult[2], col_mult[0]))
# with mean
if not sat_pow is None:
self.pow_withmean[1] =np.exp(np.random.uniform(sat_pow[2]-sat_pow[0], sat_pow[2]+sat_pow[0]))
self.pow_withmean[2] =self.pow_withmean[1]
if not sat_add is None:
self.add_withmean[1] =np.random.uniform(sat_add[2]-sat_add[0], sat_add[2]+sat_add[0])
self.add_withmean[2] =self.add_withmean[1]
if not sat_mult is None:
self.mult_withmean[1] = np.exp(np.random.uniform(sat_mult[2]-sat_mult[0], sat_mult[2]+sat_mult[0]))
self.mult_withmean[2] = self.mult_withmean[1]
if not lmult_pow is None:
self.lmult_pow = np.exp(np.random.uniform(lmult_pow[2]-lmult_pow[0], lmult_pow[2]+lmult_pow[0]))
if not lmult_mult is None:
self.lmult_mult= np.exp(np.random.uniform(lmult_mult[2]-lmult_mult[0], lmult_mult[2]+lmult_mult[0]))
if not lmult_add is None:
self.lmult_add = np.random.uniform(lmult_add[2]-lmult_add[0], lmult_add[2]+lmult_add[0])
if not col_rotate is None:
self.col_angle= np.random.uniform(col_rotate[2]-col_rotate[0], col_rotate[2]+col_rotate[0])
# eigen vectors
self.eigvec = np.reshape([0.51,0.56,0.65,0.79,0.01,-0.62,0.35,-0.83,0.44],[3,3]).transpose()
def __call__(self, inputs, target):
inputs[0] = self.pca_image(inputs[0])
inputs[1] = self.pca_image(inputs[1])
return inputs,target
def pca_image(self, rgb):
eig = np.dot(rgb, self.eigvec)
max_rgb = np.clip(rgb,0,np.inf).max((0,1))
min_rgb = rgb.min((0,1))
mean_rgb = rgb.mean((0,1))
max_abs_eig = np.abs(eig).max((0,1))
max_l = np.sqrt(np.sum(max_abs_eig*max_abs_eig))
mean_eig = np.dot(mean_rgb, self.eigvec)
# no-mean stuff
eig -= mean_eig[np.newaxis, np.newaxis]
for c in range(3):
if max_abs_eig[c] > 1e-2:
mean_eig[c] /= max_abs_eig[c]
eig[:,:,c] = eig[:,:,c] / max_abs_eig[c];
eig[:,:,c] = np.power(np.abs(eig[:,:,c]),self.pow_nomean[c]) *\
((eig[:,:,c] > 0) -0.5)*2
eig[:,:,c] = eig[:,:,c] + self.add_nomean[c]
eig[:,:,c] = eig[:,:,c] * self.mult_nomean[c]
eig += mean_eig[np.newaxis,np.newaxis]
# withmean stuff
if max_abs_eig[0] > 1e-2:
eig[:,:,0] = np.power(np.abs(eig[:,:,0]),self.pow_withmean[0]) * \
((eig[:,:,0]>0)-0.5)*2;
eig[:,:,0] = eig[:,:,0] + self.add_withmean[0];
eig[:,:,0] = eig[:,:,0] * self.mult_withmean[0];
s = np.sqrt(eig[:,:,1]*eig[:,:,1] + eig[:,:,2] * eig[:,:,2])
smask = s > 1e-2
s1 = np.power(s, self.pow_withmean[1]);
s1 = np.clip(s1 + self.add_withmean[1], 0,np.inf)
s1 = s1 * self.mult_withmean[1]
s1 = s1 * smask + s*(1-smask)
# color angle
if self.col_angle!=0:
temp1 = np.cos(self.col_angle) * eig[:,:,1] - np.sin(self.col_angle) * eig[:,:,2]
temp2 = np.sin(self.col_angle) * eig[:,:,1] + np.cos(self.col_angle) * eig[:,:,2]
eig[:,:,1] = temp1
eig[:,:,2] = temp2
# to origin magnitude
for c in range(3):
if max_abs_eig[c] > 1e-2:
eig[:,:,c] = eig[:,:,c] * max_abs_eig[c]
if max_l > 1e-2:
l1 = np.sqrt(eig[:,:,0]*eig[:,:,0] + eig[:,:,1]*eig[:,:,1] + eig[:,:,2]*eig[:,:,2])
l1 = l1 / max_l
eig[:,:,1][smask] = (eig[:,:,1] / s * s1)[smask]
eig[:,:,2][smask] = (eig[:,:,2] / s * s1)[smask]
#eig[:,:,1] = (eig[:,:,1] / s * s1) * smask + eig[:,:,1] * (1-smask)
#eig[:,:,2] = (eig[:,:,2] / s * s1) * smask + eig[:,:,2] * (1-smask)
if max_l > 1e-2:
l = np.sqrt(eig[:,:,0]*eig[:,:,0] + eig[:,:,1]*eig[:,:,1] + eig[:,:,2]*eig[:,:,2])
l1 = np.power(l1, self.lmult_pow)
l1 = np.clip(l1 + self.lmult_add, 0, np.inf)
l1 = l1 * self.lmult_mult
l1 = l1 * max_l
lmask = l > 1e-2
eig[lmask] = (eig / l[:,:,np.newaxis] * l1[:,:,np.newaxis])[lmask]
for c in range(3):
eig[:,:,c][lmask] = (np.clip(eig[:,:,c], -np.inf, max_abs_eig[c]))[lmask]
# for c in range(3):
# # eig[:,:,c][lmask] = (eig[:,:,c] / l * l1)[lmask] * lmask + eig[:,:,c] * (1-lmask)
# eig[:,:,c][lmask] = (eig[:,:,c] / l * l1)[lmask]
# eig[:,:,c] = (np.clip(eig[:,:,c], -np.inf, max_abs_eig[c])) * lmask + eig[:,:,c] * (1-lmask)
return np.clip(np.dot(eig, self.eigvec.transpose()), 0, 1)
class ChromaticAug(object):
"""
Chromatic augmentation: https://github.com/lmb-freiburg/flownet2/blob/master/src/caffe/layers/data_augmentation_layer.cu
"""
def __init__(self, noise = 0.06,
gamma = 0.02,
brightness = 0.02,
contrast = 0.02,
color = 0.02,
schedule_coeff=1):
self.noise = np.random.uniform(0,noise)
self.gamma = np.exp(np.random.normal(0, gamma*schedule_coeff))
self.brightness = np.random.normal(0, brightness*schedule_coeff)
self.contrast = np.exp(np.random.normal(0, contrast*schedule_coeff))
self.color = np.exp(np.random.normal(0, color*schedule_coeff,3))
def __call__(self, inputs, target):
inputs[1] = self.chrom_aug(inputs[1])
# noise
inputs[0]+=np.random.normal(0, self.noise, inputs[0].shape)
inputs[1]+=np.random.normal(0, self.noise, inputs[0].shape)
return inputs,target
def chrom_aug(self, rgb):
# color change
mean_in = rgb.sum(-1)
rgb = rgb*self.color[np.newaxis,np.newaxis]
brightness_coeff = mean_in / (rgb.sum(-1)+0.01)
rgb = np.clip(rgb*brightness_coeff[:,:,np.newaxis],0,1)
# gamma
rgb = np.power(rgb,self.gamma)
# brightness
rgb += self.brightness
# contrast
rgb = 0.5 + ( rgb-0.5)*self.contrast
rgb = np.clip(rgb, 0, 1)
return rgb
| 42.809524
| 161
| 0.530907
|
acfd22eb528cefba52672bbd94360ccc98021c88
| 1,794
|
py
|
Python
|
wis2box/metadata/oscar.py
|
webb-ben/wis2node
|
e577d2bea5524f74872f47eee9deb35d6c510460
|
[
"Apache-2.0"
] | 7
|
2021-10-05T11:48:50.000Z
|
2022-02-04T12:47:15.000Z
|
wis2box/metadata/oscar.py
|
webb-ben/wis2node
|
e577d2bea5524f74872f47eee9deb35d6c510460
|
[
"Apache-2.0"
] | 47
|
2022-02-15T18:24:22.000Z
|
2022-03-31T11:32:52.000Z
|
wis2box/metadata/oscar.py
|
webb-ben/wis2node
|
e577d2bea5524f74872f47eee9deb35d6c510460
|
[
"Apache-2.0"
] | 3
|
2022-02-16T19:36:36.000Z
|
2022-03-14T08:14:20.000Z
|
###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import logging
from pyoscar import OSCARClient
from ..env import OSCAR_API_TOKEN
LOGGER = logging.getLogger(__name__)
def upload_station_metadata(record: str) -> None:
"""
Uploads a WIGOS Metadata Record (WMDR) to WMO OSCAR/Surface
:param record: `str` of WMDR
:returns: None
"""
client = OSCARClient(api_token=OSCAR_API_TOKEN)
LOGGER.debug(f'Uploading metadata to OSCAR {client.api_url}')
return client.upload(record)
def get_station_report(identifier: str) -> dict:
"""
Fetch OSCAR/Surface station metadata report
:param identifier: WIGOS Station Identifier (WSI)
:returns: `dict` of station metadata report
"""
client = OSCARClient(api_token=OSCAR_API_TOKEN)
LOGGER.debug(f'Fetching station report for {identifier}')
return client.get_station_report(identifier)
| 30.40678
| 79
| 0.677258
|
acfd23325de07dd177d401d1ca18fa9942a9d999
| 2,086
|
py
|
Python
|
main_page/views.py
|
slusarczyk41/my_django_website
|
1ae95297a1a5901a57f858cca1cf90fbd8c2dfb1
|
[
"MIT"
] | null | null | null |
main_page/views.py
|
slusarczyk41/my_django_website
|
1ae95297a1a5901a57f858cca1cf90fbd8c2dfb1
|
[
"MIT"
] | null | null | null |
main_page/views.py
|
slusarczyk41/my_django_website
|
1ae95297a1a5901a57f858cca1cf90fbd8c2dfb1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.db.models import F
from django.utils import timezone
from django_ajax.decorators import ajax
from django.core.mail import EmailMessage
from wsgiref.util import FileWrapper
from django.http import HttpResponse
import requests
import os
def index(request):
context = {
'': ''
}
return render(request, 'main_page/index.html', context)
def services(request):
context = {
'': ''
}
return render(request, 'main_page/services.html', context)
def about(request):
context = {
'': ''
}
return render(request, 'main_page/about.html', context)
def contact(request):
'''
recaptcha_response = request.POST.get('g-recaptcha-response')
data = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)
result = r.json()
'''
context = {
'': ''
}
return render(request, 'main_page/contact.html', context)
def valuation(request):
context = {
'': ''
}
return render(request, 'main_page/valuation.html', context)
@ajax
def contact_endpoint(request):
email = EmailMessage(
subject=request.POST['subject'],
body="Message from "+request.POST['name']+"\n\n"+request.POST['message'],
from_email='contact@dataguy.pl',
to=['slusarczyk41@gmail.com'],
reply_to=[request.POST['email']],
headers={'Content-Type': 'text/plain'},
)
try:
email.send()
return True
except ValueError:
return False
@ajax
def download_cv_endpoint(request):
filename = 'media/to_download/Ślusarczyk_Jacek_CV_ENG.pdf'
content = FileWrapper(filename)
response = HttpResponse(content, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=%s' % 'Ślusarczyk_Jacek_CV_ENG.pdf'
return response
| 23.438202
| 95
| 0.663471
|
acfd25cf459db9a757b08d019e5ca1ccf31c3e8e
| 3,944
|
py
|
Python
|
dovetail/tests/unit/test_parser.py
|
xudan2189/dovetail
|
89beb8e201e69fa5af1d7a38928e7ea8a501d755
|
[
"Apache-2.0"
] | null | null | null |
dovetail/tests/unit/test_parser.py
|
xudan2189/dovetail
|
89beb8e201e69fa5af1d7a38928e7ea8a501d755
|
[
"Apache-2.0"
] | null | null | null |
dovetail/tests/unit/test_parser.py
|
xudan2189/dovetail
|
89beb8e201e69fa5af1d7a38928e7ea8a501d755
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
##############################################################################
# Copyright (c) 2016 lingui.zeng@huawei.com and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
#
"""
Test 'parser' module
"""
import logging
import os
import unittest
import yaml
import mock
from dovetail import parser
from dovetail.utils.dovetail_config import DovetailConfig as dt_cfg
@mock.patch('dovetail.parser.Parser.logger')
class TestParser(unittest.TestCase):
test_path = os.path.dirname(os.path.realpath(__file__))
def setUp(self):
"""Test case setup"""
conf_path = os.path.join(self.test_path,
os.pardir, os.pardir, os.pardir,
'etc/conf')
dt_cfg.load_config_files(conf_path)
logging.disable(logging.CRITICAL)
def test_parser_cmd(self, mock_logger):
"""Test whether the command is correctly parsed."""
mock_cmd = "python /functest/ci/run_tests.py "\
"-t {{validate_testcase}} -r"
with open(os.path.join(self.test_path, 'test_testcase.yaml')) as f:
mock_testcase_yaml = yaml.safe_load(f)
MockTestcase = type('Testcase', (object,), {})
mock_testcase = MockTestcase()
mock_testcase.testcase = mock_testcase_yaml.values()[0]
output = parser.Parser.parse_cmd(mock_cmd, mock_testcase)
expected_output = ("python /functest/ci/run_tests.py -t "
"tempest_smoke_serial -r")
self.assertEqual(expected_output, output)
def test_parser_cmd_fail(self, mock_logger):
"""Test whether the command is correctly parsed."""
mock_cmd = "python /functest/ci/run_tests.py "\
"-t {{validate_testcase}} -r"
mock_testcase_yaml = {}
MockTestcase = type('Testcase', (object,), {})
mock_testcase = MockTestcase()
mock_testcase.testcase = mock_testcase_yaml.values()
output = parser.Parser.parse_cmd(mock_cmd, mock_testcase)
expected_output = ("python /functest/ci/run_tests.py -t "
"None -r")
self.assertEqual(expected_output, output)
@mock.patch('dovetail.parser.jinja2')
def test_parse_cmd_exception(self, mock_jinja, mock_logger):
errorMSG = 'Exception was raised'
exception = Exception(errorMSG)
command = 'cmd'
undefined_obj = mock.Mock()
mock_jinja.StrictUndefined = undefined_obj
mock_jinja.Template.side_effect = exception
expected = None
dovetail_parser = parser.Parser()
exception_obj = mock.Mock()
dovetail_parser.logger.exception = exception_obj
result = dovetail_parser.parse_cmd(command, 'testcase')
mock_jinja.Template.assert_called_once_with(command,
undefined=undefined_obj)
exception_obj.assert_called_once_with(
'Failed to parse cmd {}, exception: {}'.format(command, errorMSG))
self.assertEqual(expected, result)
@mock.patch('dovetail.parser.dt_logger.Logger')
def test_create_log(self, mock_dt_logger, mock_logger):
mock_dt_logger_obj = mock.Mock()
logger_obj = mock.Mock()
mock_dt_logger_obj.getLogger.return_value = logger_obj
mock_dt_logger.return_value = mock_dt_logger_obj
dovetail_parser = parser.Parser()
dovetail_parser.create_log()
mock_dt_logger.assert_called_once_with('dovetail.parser.Parser')
mock_dt_logger_obj.getLogger.assert_called_once_with()
self.assertEqual(dovetail_parser.logger, logger_obj)
| 39.049505
| 78
| 0.63286
|
acfd262a7340bdfc0166fedf2449a5bf78dc30a5
| 3,533
|
py
|
Python
|
src/cone/app/browser/utils.py
|
timgates42/cone.app
|
2a84a1ddc87930050780041eb7c05032e7bb8dbb
|
[
"BSD-3-Clause"
] | null | null | null |
src/cone/app/browser/utils.py
|
timgates42/cone.app
|
2a84a1ddc87930050780041eb7c05032e7bb8dbb
|
[
"BSD-3-Clause"
] | null | null | null |
src/cone/app/browser/utils.py
|
timgates42/cone.app
|
2a84a1ddc87930050780041eb7c05032e7bb8dbb
|
[
"BSD-3-Clause"
] | null | null | null |
from cone.app import compat
from cone.app.utils import app_config
from cone.app.utils import format_traceback as _format_traceback
from cone.app.utils import safe_encode
from cone.app.utils import safe_decode
from pyramid.i18n import TranslationStringFactory
import copy
import datetime
import re
_ = TranslationStringFactory('cone.app')
# B/C. use ``authenticated_userid`` directly.
def authenticated(request):
return request.authenticated_userid
def node_path(node):
# XXX: implement in ``BaseNode``.
return [safe_decode(p) for p in node.path if p is not None]
# B/C, removed as of cone.app 1.1
nodepath = node_path
# default query parameters to quote
QUOTE_PARAMS = ('came_from',)
def make_query(quote_params=QUOTE_PARAMS, **kw):
query = list()
for name, param in sorted(kw.items()):
if param is None:
continue
if isinstance(param, compat.STR_TYPE):
param = [param]
if type(param) in compat.NUMBER_TYPES:
param = [str(param)]
quote = name in quote_params
for p in param:
p = safe_encode(p) if compat.IS_PY2 else p
query.append('{}={}'.format(name, compat.quote(p) if quote else p))
query = '&'.join(query)
if query:
return '?{}'.format(query)
def make_url(request, path=None, node=None, resource=None, query=None):
# if path=[] in signature, path gets aggregated in recursive calls ???
# happens on icon lookup in navtree.
# ^^^ that is because the [] (a list, mutable) is generated at compile
# time. mutable values should not be in function signatures to avoid this.
if path is None:
path = []
else:
path = copy.copy(path)
if node is not None:
path = node_path(node)
if resource is not None:
path.append(resource)
path = [compat.quote(safe_encode(it)) for it in path]
url = '{}/{}'.format(request.application_url, '/'.join(path))
if not query:
return url
return '{}{}'.format(url, query)
def choose_name(container, name):
name = re.sub(
r'-{2,}', '-',
re.sub('^\w-|-\w-|-\w$', '-',
re.sub(r'\W', '-', name.strip()))).strip('-').lower()
n = name
i = 0
while n in container:
i += 1
n = u'{}-{}'.format(name, i)
return n.replace('/', '-').lstrip('+@')
def format_date(dt, long=True):
if not isinstance(dt, datetime.datetime):
return _('unknown', default='Unknown')
return long and dt.strftime('%d.%m.%Y %H:%M') or dt.strftime('%d.%m.%Y')
def node_icon(node):
if node.properties.icon:
return node.properties.icon
info = node.nodeinfo
if not info.icon:
return app_config().default_node_icon
return info.icon
def request_property(func):
"""Decorator like ``property``, but underlying function is only called once
per request.
Cache attribute on request.environ under key
``instanceid.classname.funcname``.
Works only on instances providing a request attribute.
"""
def wrapper(self):
cache_key = '{}.{}.{}'.format(
str(id(self)),
self.__class__.__name__,
func.__name__
)
try:
return self.request.environ[cache_key]
except KeyError:
val = self.request.environ[cache_key] = func(self)
return val
wrapper.__doc__ = func.__doc__
return property(wrapper)
def format_traceback():
return '<pre>{}</pre>'.format(_format_traceback())
| 28.264
| 79
| 0.624115
|
acfd2650dc897d089f8546bfb138545ba8309399
| 7,339
|
py
|
Python
|
session3/complete/tx.py
|
casey-bowman/pb-exercises
|
00b814f2d1b8827db0ca79180db4b88a2cdfa087
|
[
"BSD-2-Clause"
] | null | null | null |
session3/complete/tx.py
|
casey-bowman/pb-exercises
|
00b814f2d1b8827db0ca79180db4b88a2cdfa087
|
[
"BSD-2-Clause"
] | null | null | null |
session3/complete/tx.py
|
casey-bowman/pb-exercises
|
00b814f2d1b8827db0ca79180db4b88a2cdfa087
|
[
"BSD-2-Clause"
] | null | null | null |
from io import BytesIO
from unittest import TestCase
from helper import (
little_endian_to_int,
read_varint,
)
from script import Script
class Tx:
def __init__(self, version, tx_ins, tx_outs, locktime):
self.version = version
self.tx_ins = tx_ins
self.tx_outs = tx_outs
self.locktime = locktime
def __repr__(self):
tx_ins = ''
for tx_in in self.tx_ins:
tx_ins += tx_in.__repr__() + '\n'
tx_outs = ''
for tx_out in self.tx_outs:
tx_outs += tx_out.__repr__() + '\n'
return 'version: {}\ntx_ins:\n{}\ntx_outs:\n{}\nlocktime: {}\n'.format(
self.version,
tx_ins,
tx_outs,
self.locktime,
)
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses the transaction at the start
return a Tx object
'''
# s.read(n) will return n bytes
# version has 4 bytes, little-endian, interpret as int
version = little_endian_to_int(s.read(4))
# num_inputs is a varint, use read_varint(s)
num_inputs = read_varint(s)
# each input needs parsing
inputs = []
for _ in range(num_inputs):
inputs.append(TxIn.parse(s))
# num_outputs is a varint, use read_varint(s)
num_outputs = read_varint(s)
# each output needs parsing
outputs = []
for _ in range(num_outputs):
outputs.append(TxOut.parse(s))
# locktime is 4 bytes, little-endian
locktime = little_endian_to_int(s.read(4))
# return an instance of the class (cls(...))
return cls(version, inputs, outputs, locktime)
class TxIn:
def __init__(self, prev_tx, prev_index, script_sig, sequence):
self.prev_tx = prev_tx
self.prev_index = prev_index
self.script_sig = Script.parse(script_sig)
self.sequence = sequence
def __repr__(self):
return '{}:{}'.format(
self.prev_tx.hex(),
self.prev_index,
)
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses the tx_input at the start
return a TxIn object
'''
# s.read(n) will return n bytes
# prev_tx is 32 bytes, little endian
prev_tx = s.read(32)[::-1]
# prev_index is 4 bytes, little endian, interpret as int
prev_index = little_endian_to_int(s.read(4))
# script_sig is a variable field (length followed by the data)
# get the length by using read_varint(s)
script_sig_length = read_varint(s)
script_sig = s.read(script_sig_length)
# sequence is 4 bytes, little-endian, interpret as int
sequence = little_endian_to_int(s.read(4))
# return an instance of the class (cls(...))
return cls(prev_tx, prev_index, script_sig, sequence)
class TxOut:
def __init__(self, amount, script_pubkey):
self.amount = amount
self.script_pubkey = Script.parse(script_pubkey)
def __repr__(self):
return '{}:{}'.format(self.amount, self.script_pubkey)
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses the tx_output at the start
return a TxOut object
'''
# s.read(n) will return n bytes
# amount is 8 bytes, little endian, interpret as int
amount = little_endian_to_int(s.read(8))
# script_pubkey is a variable field (length followed by the data)
# get the length by using read_varint(s)
script_pubkey_length = read_varint(s)
script_pubkey = s.read(script_pubkey_length)
# return an instance of the class (cls(...))
return cls(amount, script_pubkey)
class TxTest(TestCase):
def test_parse_version(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.version, 1)
def test_parse_inputs(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(len(tx.tx_ins), 1)
want = bytes.fromhex('d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81')
self.assertEqual(tx.tx_ins[0].prev_tx, want)
self.assertEqual(tx.tx_ins[0].prev_index, 0)
want = bytes.fromhex('483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278a')
self.assertEqual(tx.tx_ins[0].script_sig.serialize(), want)
self.assertEqual(tx.tx_ins[0].sequence, 0xfffffffe)
def test_parse_outputs(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(len(tx.tx_outs), 2)
want = 32454049
self.assertEqual(tx.tx_outs[0].amount, want)
want = bytes.fromhex('76a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac')
self.assertEqual(tx.tx_outs[0].script_pubkey.serialize(), want)
want = 10011545
self.assertEqual(tx.tx_outs[1].amount, want)
want = bytes.fromhex('76a9141c4bc762dd5423e332166702cb75f40df79fea1288ac')
self.assertEqual(tx.tx_outs[1].script_pubkey.serialize(), want)
def test_parse_locktime(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.locktime, 410393)
| 47.348387
| 486
| 0.725303
|
acfd276d1fbaa11bb459b10378db0ddd82e021e6
| 6,044
|
py
|
Python
|
homeassistant/components/cover/device_trigger.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/cover/device_trigger.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 77
|
2020-07-16T16:43:09.000Z
|
2022-03-31T06:14:37.000Z
|
homeassistant/components/cover/device_trigger.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 7
|
2021-03-20T12:34:01.000Z
|
2021-12-02T10:13:52.000Z
|
"""Provides device automations for Cover."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
state as state_trigger,
)
from homeassistant.const import (
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_TYPE,
CONF_VALUE_TEMPLATE,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.entity import get_supported_features
from homeassistant.helpers.typing import ConfigType
from . import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
)
POSITION_TRIGGER_TYPES = {"position", "tilt_position"}
STATE_TRIGGER_TYPES = {"opened", "closed", "opening", "closing"}
POSITION_TRIGGER_SCHEMA = vol.All(
DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(POSITION_TRIGGER_TYPES),
vol.Optional(CONF_ABOVE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(STATE_TRIGGER_TYPES),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
TRIGGER_SCHEMA = vol.Any(POSITION_TRIGGER_SCHEMA, STATE_TRIGGER_SCHEMA)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for Cover devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
supported_features = get_supported_features(hass, entry.entity_id)
supports_open_close = supported_features & (SUPPORT_OPEN | SUPPORT_CLOSE)
# Add triggers for each entity that belongs to this integration
base_trigger = {
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
}
if supports_open_close:
triggers += [
{
**base_trigger,
CONF_TYPE: trigger,
}
for trigger in STATE_TRIGGER_TYPES
]
if supported_features & SUPPORT_SET_POSITION:
triggers.append(
{
**base_trigger,
CONF_TYPE: "position",
}
)
if supported_features & SUPPORT_SET_TILT_POSITION:
triggers.append(
{
**base_trigger,
CONF_TYPE: "tilt_position",
}
)
return triggers
async def async_get_trigger_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List trigger capabilities."""
if config[CONF_TYPE] not in POSITION_TRIGGER_TYPES:
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
return {
"extra_fields": vol.Schema(
{
vol.Optional(CONF_ABOVE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW, default=100): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
)
}
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if config[CONF_TYPE] in STATE_TRIGGER_TYPES:
if config[CONF_TYPE] == "opened":
to_state = STATE_OPEN
elif config[CONF_TYPE] == "closed":
to_state = STATE_CLOSED
elif config[CONF_TYPE] == "opening":
to_state = STATE_OPENING
elif config[CONF_TYPE] == "closing":
to_state = STATE_CLOSING
state_config = {
CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: to_state,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
if config[CONF_TYPE] == "position":
position = "current_position"
if config[CONF_TYPE] == "tilt_position":
position = "current_tilt_position"
min_pos = config.get(CONF_ABOVE, -1)
max_pos = config.get(CONF_BELOW, 101)
value_template = f"{{{{ state.attributes.{position} }}}}"
numeric_state_config = {
CONF_PLATFORM: "numeric_state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
CONF_BELOW: max_pos,
CONF_ABOVE: min_pos,
CONF_VALUE_TEMPLATE: value_template,
}
numeric_state_config = numeric_state_trigger.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_trigger.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
| 32.148936
| 85
| 0.636664
|
acfd2914ae84739f445ea30fb86d30d4b35bb4be
| 22,214
|
py
|
Python
|
art/estimators/classification/mxnet.py
|
mcguires5/adversarial-robustness-toolbox
|
f8b0552859eaf31c5b66e1d14d28b89178795ad0
|
[
"MIT"
] | 1
|
2020-07-12T03:45:23.000Z
|
2020-07-12T03:45:23.000Z
|
art/estimators/classification/mxnet.py
|
mcguires5/adversarial-robustness-toolbox
|
f8b0552859eaf31c5b66e1d14d28b89178795ad0
|
[
"MIT"
] | 105
|
2020-08-24T06:15:43.000Z
|
2022-03-24T08:03:16.000Z
|
art/estimators/classification/mxnet.py
|
mcguires5/adversarial-robustness-toolbox
|
f8b0552859eaf31c5b66e1d14d28b89178795ad0
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the classifier `MXClassifier` for MXNet Gluon models.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from typing import List, Optional, Tuple, Union, TYPE_CHECKING
import numpy as np
import six
from art.config import (
ART_NUMPY_DTYPE,
ART_DATA_PATH,
CLIP_VALUES_TYPE,
PREPROCESSING_TYPE,
)
from art.estimators.mxnet import MXEstimator
from art.estimators.classification.classifier import (
ClassGradientsMixin,
ClassifierMixin,
)
from art.utils import Deprecated, deprecated_keyword_arg, check_and_transform_label_format
if TYPE_CHECKING:
import mxnet as mx
from art.data_generators import DataGenerator
from art.defences.preprocessor import Preprocessor
from art.defences.postprocessor import Postprocessor
logger = logging.getLogger(__name__)
class MXClassifier(ClassGradientsMixin, ClassifierMixin, MXEstimator): # lgtm [py/missing-call-to-init]
"""
Wrapper class for importing MXNet Gluon models.
"""
@deprecated_keyword_arg("channel_index", end_version="1.5.0", replaced_by="channels_first")
def __init__(
self,
model: "mx.gluon.Block",
loss: Union["mx.nd.loss", "mx.gluon.loss"],
input_shape: Tuple[int, ...],
nb_classes: int,
optimizer: Optional["mx.gluon.Trainer"] = None,
ctx: Optional["mx.context.Context"] = None,
channel_index=Deprecated,
channels_first: bool = True,
clip_values: Optional[CLIP_VALUES_TYPE] = None,
preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None,
postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None,
preprocessing: PREPROCESSING_TYPE = (0, 1),
) -> None:
"""
Initialize an `MXClassifier` object. Assumes the `model` passed as parameter is a Gluon model.
:param model: The Gluon model. The output of the model can be logits, probabilities or anything else. Logits
output should be preferred where possible to ensure attack efficiency.
:param loss: The loss function for which to compute gradients for training.
:param input_shape: The shape of one input instance.
:param nb_classes: The number of classes of the model.
:param optimizer: The optimizer used to train the classifier. This parameter is only required if fitting will
be done with method fit.
:param ctx: The device on which the model runs (CPU or GPU). If not provided, CPU is assumed.
:param channel_index: Index of the axis in data containing the color channels or features.
:type channel_index: `int`
:param channels_first: Set channels first or last.
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
maximum values allowed for features. If floats are provided, these will be used as the range of all
features. If arrays are provided, each value will be considered the bound for a feature, thus
the shape of clip values needs to match the total number of features.
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
"""
import mxnet as mx # lgtm [py/repeated-import]
# Remove in 1.5.0
if channel_index == 3:
channels_first = False
elif channel_index == 1:
channels_first = True
elif channel_index is not Deprecated:
raise ValueError("Not a proper channel_index. Use channels_first.")
super(MXClassifier, self).__init__(
clip_values=clip_values,
channel_index=channel_index,
channels_first=channels_first,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
self._model = model
self._loss = loss
self._nb_classes = nb_classes
self._input_shape = input_shape
self._device = ctx
self._optimizer = optimizer
if ctx is None:
self._ctx = mx.cpu()
else:
self._ctx = ctx
# Get the internal layer
self._layer_names = self._get_layers()
def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, **kwargs) -> None:
"""
Fit the classifier on the training set `(inputs, outputs)`.
:param x: Training data.
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for MXNet
and providing it takes no effect.
"""
import mxnet as mx # lgtm [py/repeated-import]
if self._optimizer is None:
raise ValueError("An MXNet optimizer is required for fitting the model.")
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else True
y = check_and_transform_label_format(y, self.nb_classes)
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
y_preprocessed = np.argmax(y_preprocessed, axis=1)
nb_batch = int(np.ceil(len(x_preprocessed) / batch_size))
ind = np.arange(len(x_preprocessed))
for _ in range(nb_epochs):
# Shuffle the examples
np.random.shuffle(ind)
# Train for one epoch
for m in range(nb_batch):
x_batch = mx.nd.array(
x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]].astype(ART_NUMPY_DTYPE)
).as_in_context(self._ctx)
y_batch = mx.nd.array(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).as_in_context(
self._ctx
)
with mx.autograd.record(train_mode=train_mode):
# Perform prediction
preds = self._model(x_batch)
# Apply postprocessing
preds = self._apply_postprocessing(preds=preds, fit=True)
# Form the loss function
loss = self._loss(preds, y_batch)
loss.backward()
# Update parameters
self._optimizer.step(batch_size)
def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None:
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch.
:param nb_epochs: Number of epochs to use for training.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for MXNet
and providing it takes no effect.
"""
import mxnet as mx # lgtm [py/repeated-import]
from art.data_generators import MXDataGenerator
if self._optimizer is None:
raise ValueError("An MXNet optimizer is required for fitting the model.")
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else True
if (
isinstance(generator, MXDataGenerator)
and (self.preprocessing_defences is None or self.preprocessing_defences == [])
and self.preprocessing == (0, 1)
):
# Train directly in MXNet
for _ in range(nb_epochs):
for x_batch, y_batch in generator.iterator:
x_batch = mx.nd.array(x_batch.astype(ART_NUMPY_DTYPE)).as_in_context(self._ctx)
y_batch = mx.nd.argmax(y_batch, axis=1)
y_batch = mx.nd.array(y_batch).as_in_context(self._ctx)
with mx.autograd.record(train_mode=train_mode):
# Perform prediction
preds = self._model(x_batch)
# Form the loss function
loss = self._loss(preds, y_batch)
loss.backward()
# Update parameters
self._optimizer.step(x_batch.shape[0])
else:
# Fit a generic data generator through the API
super(MXClassifier, self).fit_generator(generator, nb_epochs=nb_epochs)
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:param batch_size: Size of batches.
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
"""
import mxnet as mx # lgtm [py/repeated-import]
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else False
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Run prediction with batch processing
results = np.zeros((x_preprocessed.shape[0], self.nb_classes), dtype=np.float32)
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
for m in range(num_batch):
# Batch indexes
begin, end = (
m * batch_size,
min((m + 1) * batch_size, x_preprocessed.shape[0]),
)
# Predict
x_batch = mx.nd.array(x_preprocessed[begin:end].astype(ART_NUMPY_DTYPE), ctx=self._ctx)
x_batch.attach_grad()
with mx.autograd.record(train_mode=train_mode):
preds = self._model(x_batch)
results[begin:end] = preds.asnumpy()
# Apply postprocessing
predictions = self._apply_postprocessing(preds=results, fit=False)
return predictions
def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs) -> np.ndarray:
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
"""
import mxnet as mx # lgtm [py/repeated-import]
# Check value of label for computing gradients
if not (
label is None
or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes))
or (
isinstance(label, np.ndarray)
and len(label.shape) == 1
and (label < self.nb_classes).all()
and label.shape[0] == x.shape[0]
)
):
raise ValueError("Label %s is out of range." % str(label))
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else False
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
x_preprocessed = mx.nd.array(x_preprocessed.astype(ART_NUMPY_DTYPE), ctx=self._ctx)
x_preprocessed.attach_grad()
if label is None:
with mx.autograd.record(train_mode=False):
preds = self._model(x_preprocessed)
class_slices = [preds[:, i] for i in range(self.nb_classes)]
grads = []
for slice_ in class_slices:
slice_.backward(retain_graph=True)
grad = x_preprocessed.grad.asnumpy()
grads.append(grad)
grads = np.swapaxes(np.array(grads), 0, 1)
elif isinstance(label, (int, np.integer)):
with mx.autograd.record(train_mode=train_mode):
preds = self._model(x_preprocessed)
class_slice = preds[:, label]
class_slice.backward()
grads = np.expand_dims(x_preprocessed.grad.asnumpy(), axis=1)
else:
unique_labels = list(np.unique(label))
with mx.autograd.record(train_mode=train_mode):
preds = self._model(x_preprocessed)
class_slices = [preds[:, i] for i in unique_labels]
grads = []
for slice_ in class_slices:
slice_.backward(retain_graph=True)
grad = x_preprocessed.grad.asnumpy()
grads.append(grad)
grads = np.swapaxes(np.array(grads), 0, 1)
lst = [unique_labels.index(i) for i in label]
grads = grads[np.arange(len(grads)), lst]
grads = np.expand_dims(grads, axis=1)
grads = self._apply_preprocessing_gradient(x, grads)
return grads
def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
`(nb_samples,)`.
:return: Array of gradients of the same shape as `x`.
"""
import mxnet as mx # lgtm [py/repeated-import]
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else False
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
y_preprocessed = mx.nd.array([np.argmax(y_preprocessed, axis=1)], ctx=self._ctx).T
x_preprocessed = mx.nd.array(x_preprocessed.astype(ART_NUMPY_DTYPE), ctx=self._ctx)
x_preprocessed.attach_grad()
with mx.autograd.record(train_mode=train_mode):
preds = self._model(x_preprocessed)
loss = self._loss(preds, y_preprocessed)
loss.backward()
# Compute gradients
grads = x_preprocessed.grad.asnumpy()
grads = self._apply_preprocessing_gradient(x, grads)
assert grads.shape == x.shape
return grads
@property
def layer_names(self) -> List[str]:
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
.. warning:: `layer_names` tries to infer the internal structure of the model.
This feature comes with no guarantees on the correctness of the result.
The intended order of the layers tries to match their order in the model, but this is not
guaranteed either.
"""
return self._layer_names
def get_activations(
self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False
) -> np.ndarray:
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:param layer: Layer for computing the activations
:param batch_size: Size of batches.
:param framework: If true, return the intermediate tensor representation of the activation.
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
"""
import mxnet as mx # lgtm [py/repeated-import]
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else False
if isinstance(layer, six.string_types):
if layer not in self._layer_names:
raise ValueError("Layer name %s is not part of the model." % layer)
layer_ind = self._layer_names.index(layer)
elif isinstance(layer, int):
if layer < 0 or layer >= len(self._layer_names):
raise ValueError(
"Layer index %d is outside of range (0 to %d included)." % (layer, len(self._layer_names) - 1)
)
layer_ind = layer
else:
raise TypeError("Layer must be of type `str` or `int`.")
# Apply preprocessing and defences
if x.shape == self.input_shape:
x_expanded = np.expand_dims(x, 0)
else:
x_expanded = x
x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False)
if framework:
return self._model[layer_ind]
# Compute activations with batching
activations = []
nb_batches = int(np.ceil(len(x_preprocessed) / float(batch_size)))
for batch_index in range(nb_batches):
# Batch indexes
begin, end = (
batch_index * batch_size,
min((batch_index + 1) * batch_size, x_preprocessed.shape[0]),
)
# Predict
x_batch = mx.nd.array(x_preprocessed[begin:end].astype(ART_NUMPY_DTYPE), ctx=self._ctx)
x_batch.attach_grad()
with mx.autograd.record(train_mode=train_mode):
preds = self._model[layer_ind](x_batch)
activations.append(preds.asnumpy())
activations = np.vstack(activations)
return activations
def set_learning_phase(self, train: bool) -> None:
"""
Set the learning phase for the backend framework.
:param train: True to set the learning phase to training, False to set it to prediction.
"""
if isinstance(train, bool):
self._learning_phase = train
def save(self, filename: str, path: Optional[str] = None) -> None:
"""
Save a model to file in the format specific to the backend framework. For Gluon, only parameters are saved in
file with name `<filename>.params` at the specified path. To load the saved model, the original model code needs
to be run before calling `load_parameters` on the generated Gluon model.
:param filename: Name of the file where to store the model.
:param path: Path of the folder where to store the model. If no path is specified, the model will be stored in
the default data location of the library `ART_DATA_PATH`.
"""
if path is None:
full_path = os.path.join(ART_DATA_PATH, filename)
else:
full_path = os.path.join(path, filename)
folder = os.path.split(full_path)[0]
if not os.path.exists(folder):
os.makedirs(folder)
self._model.save_parameters(full_path + ".params")
logger.info("Model parameters saved in path: %s.params.", full_path)
def __repr__(self):
repr_ = (
"%s(model=%r, loss=%r, input_shape=%r, nb_classes=%r, optimizer=%r, ctx=%r, channel_index=%r,"
" channels_first=%r, clip_values=%r, preprocessing_defences=%r, postprocessing_defences=%r,"
" preprocessing=%r)"
% (
self.__module__ + "." + self.__class__.__name__,
self._model,
self._loss,
self.input_shape,
self.nb_classes,
self._optimizer,
self._ctx,
self.channel_index,
self.channels_first,
self.clip_values,
self.preprocessing_defences,
self.postprocessing_defences,
self.preprocessing,
)
)
return repr_
def _get_layers(self) -> list:
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
"""
layer_names = [layer.name for layer in self._model[:-1]]
logger.info("Inferred %i hidden layers on MXNet classifier.", len(layer_names))
return layer_names
| 43.133981
| 120
| 0.626047
|
acfd29c6f1293a18c91987cef718f1c42e92de9a
| 34,549
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/sqlvm/tests/latest/test_sqlvm_commands.py
|
tyler-lloyd/azure-cli
|
5e999e49594ad51557c05b9c55e00c3c16932575
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/sqlvm/tests/latest/test_sqlvm_commands.py
|
tyler-lloyd/azure-cli
|
5e999e49594ad51557c05b9c55e00c3c16932575
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/sqlvm/tests/latest/test_sqlvm_commands.py
|
tyler-lloyd/azure-cli
|
5e999e49594ad51557c05b9c55e00c3c16932575
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.azclierror import (
RequiredArgumentMissingError
)
from azure.cli.core.mock import DummyCli
from azure.cli.testsdk.base import execute
from azure.cli.testsdk.exceptions import CliTestError
from azure.cli.testsdk import (
JMESPathCheck,
JMESPathCheckExists,
NoneCheck,
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer,
LogAnalyticsWorkspacePreparer)
from azure.cli.testsdk.preparers import (
AbstractPreparer,
SingleValueReplacer)
from time import sleep
# Constants
sqlvm_name_prefix = 'clisqlvm'
sqlvm_domain_prefix = 'domainvm'
sqlvm_group_prefix = 'sqlgroup'
sqlvm_max_length = 15
class SqlVirtualMachinePreparer(AbstractPreparer, SingleValueReplacer):
def __init__(self, name_prefix=sqlvm_name_prefix, location='westus',
vm_user='admin123', vm_password='SecretPassword123', parameter_name='sqlvm',
resource_group_parameter_name='resource_group', skip_delete=True):
super(SqlVirtualMachinePreparer, self).__init__(name_prefix, sqlvm_max_length)
self.location = location
self.parameter_name = parameter_name
self.vm_user = vm_user
self.vm_password = vm_password
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
def create_resource(self, name, **kwargs):
group = self._get_resource_group(**kwargs)
template = ('az vm create -l {} -g {} -n {} --admin-username {} --admin-password {} --image MicrosoftSQLServer:SQL2017-WS2016:Enterprise:latest'
' --size Standard_DS2_v2 --nsg-rule NONE')
execute(DummyCli(), template.format(self.location, group, name, self.vm_user, self.vm_password))
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.skip_delete:
group = self._get_resource_group(**kwargs)
execute(DummyCli(), 'az vm delete -g {} -n {} --yes --no-wait'.format(group, name))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a virtual machine a resource group is required. Please add ' \
'decorator @{} in front of this preparer.'
raise CliTestError(template.format(ResourceGroupPreparer.__name__,
self.resource_group_parameter_name))
class DomainPreparer(AbstractPreparer, SingleValueReplacer):
import string
def __init__(self, name_prefix=sqlvm_domain_prefix, location='westus',
vm_user='admin123', vm_password='SecretPassword123', parameter_name='domainvm',
resource_group_parameter_name='resource_group', skip_delete=True):
super(DomainPreparer, self).__init__(name_prefix, sqlvm_max_length)
self.location = location
self.parameter_name = parameter_name
self.vm_user = vm_user
self.vm_password = vm_password
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
def id_generator(self, size=6, chars=string.ascii_lowercase + string.digits):
import random
return ''.join(random.choice(chars) for _ in range(size))
def create_resource(self, name, **kwargs):
group = self._get_resource_group(**kwargs)
dns_name = self.id_generator()
parameters = ('adminUsername=admin123 adminPassword=SecretPassword123 location=westus '
'domainName=domain.com dnsPrefix={}').format(dns_name)
template = 'az deployment group create --name {} -g {} --template-uri {} --parameters {}'
execute(DummyCli(), template.format('domaintemplate', group,
'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/application-workloads/active-directory/active-directory-new-domain/azuredeploy.json',
parameters))
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.skip_delete:
group = self._get_resource_group(**kwargs)
execute(DummyCli(), 'az group delete -g {}'.format(group))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a virtual machine a resource group is required. Please add ' \
'decorator @{} in front of this preparer.'
raise CliTestError(template.format(ResourceGroupPreparer.__name__,
self.resource_group_parameter_name))
class SqlVmScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlVirtualMachinePreparer()
@LogAnalyticsWorkspacePreparer(location="westus")
def test_sqlvm_mgmt_assessment(self, resource_group, resource_group_location, sqlvm, laworkspace):
# create sqlvm1 with minimal required parameters
self.cmd('sql vm create -n {} -g {} -l {} --license-type {} --sql-mgmt-type {}'
.format(sqlvm, resource_group, resource_group_location, 'PAYG', 'Full'),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'PAYG')
])
# test assessment schedule enabling succeeds
with self.assertRaisesRegex(RequiredArgumentMissingError, "Assessment requires a Log Analytics workspace and Log Analytics extension on VM"):
self.cmd('sql vm update -n {} -g {} --assessment-weekly-interval {} --assessment-day-of-week {} --assessment-start-time-local {} '
.format(sqlvm, resource_group, 1, 'Monday', '20:30'))
# test assessment schedule enabling succeeds
self.cmd('sql vm update -n {} -g {} --assessment-weekly-interval {} --assessment-day-of-week {} --assessment-start-time-local {} '
'--workspace-rg {} --workspace-name {}'
.format(sqlvm, resource_group, 1, 'Monday', '20:30', resource_group, laworkspace),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded")
])
# verify assessment settings were processed
expand_all = self.cmd('sql vm show -n {} -g {} --expand {}'
.format(sqlvm, resource_group, 'AssessmentSettings')
).get_output_in_json()
assessment_settings = expand_all['assessmentSettings']
self.assertTrue(assessment_settings['enable'])
self.assertTrue(assessment_settings['schedule']['enable'])
self.assertEqual(1, assessment_settings['schedule']['weeklyInterval'])
self.assertEqual("Monday", assessment_settings['schedule']['dayOfWeek'])
self.assertEqual("20:30", assessment_settings['schedule']['startTimeLocal'])
# test start-assessment succeeds
self.cmd('sql vm start-assessment -n {} -g {}'
.format(sqlvm, resource_group))
# verify start-assessment succeeded
self.cmd('sql vm show -n {} -g {}'
.format(sqlvm, resource_group),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded")
])
# test assessment disabling succeeds
self.cmd('sql vm update -n {} -g {} --enable-assessment {}'
.format(sqlvm, resource_group, False),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded")
])
@ResourceGroupPreparer()
@SqlVirtualMachinePreparer()
@StorageAccountPreparer()
def test_sqlvm_mgmt(self, resource_group, resource_group_location, sqlvm, storage_account):
loc = 'westus'
self.cmd('storage account update -n {} -g {} --set kind=StorageV2'.format(storage_account, resource_group))
sa = self.cmd('storage account show -n {} -g {}'
.format(storage_account, resource_group)).get_output_in_json()
key = self.cmd('storage account keys list -n {} -g {}'
.format(storage_account, resource_group)).get_output_in_json()
# Assert customer cannot create a SQL vm with no agent and do not provide offer and sku
with self.assertRaisesRegex(RequiredArgumentMissingError, "usage error: --sql-mgmt-type NoAgent --image-sku NAME --image-offer NAME"):
self.cmd('sql vm create -n {} -g {} -l {} --license-type {} --sql-mgmt-type {}'
.format(sqlvm, resource_group, loc, 'PAYG', 'NoAgent'))
# test create sqlvm with minimal required parameters
sqlvm_1 = self.cmd('sql vm create -n {} -g {} -l {} --license-type {}'
.format(sqlvm, resource_group, loc, 'PAYG'),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlManagement', 'LightWeight')
]).get_output_in_json()
# test list sqlvm should be 1
self.cmd('sql vm list -g {}'.format(resource_group), checks=[JMESPathCheck('length(@)', 1)])
# test show of vm
self.cmd('sql vm show -n {} -g {}'
.format(sqlvm, resource_group),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('id', sqlvm_1['id'])
])
# Check the id of the vm is correct in show
self.cmd('sql vm show -n {} -g {}'
.format(sqlvm, resource_group),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('id', sqlvm_1['id'])
])
# test update sqlvm with management mode to make sure it updates to full.
self.cmd('sql vm update -n {} -g {} --sql-mgmt-type {} --yes'
.format(sqlvm, resource_group, 'Full'),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('sqlManagement', 'Full')
]).get_output_in_json()
# test expand parameter: * - all settings exist
expand_all = self.cmd('sql vm show -n {} -g {} --expand {}'
.format(sqlvm, resource_group, '*')
).get_output_in_json()
assert 'assessmentSettings' in expand_all
assert 'autoBackupSettings' in expand_all
assert 'autoPatchingSettings' in expand_all
assert 'keyVaultCredentialSettings' in expand_all
assert 'serverConfigurationsManagementSettings' in expand_all
# test expand parameter: single value - only specified setting exists
expand_one = self.cmd('sql vm show -n {} -g {} --expand {}'
.format(sqlvm, resource_group, 'AutoBackupSettings')
).get_output_in_json()
assert 'assessmentSettings' not in expand_one
assert 'autoBackupSettings' in expand_one
assert 'autoPatchingSettings' not in expand_one
assert 'keyVaultCredentialSettings' not in expand_one
assert 'serverConfigurationsManagementSettings' not in expand_one
# test expand parameter: comma-separated values - all specificed settings exist
expand_comma = self.cmd('sql vm show -n {} -g {} --expand {}'
.format(sqlvm, resource_group, 'AutoPatchingSettings AutoBackupSettings')
).get_output_in_json()
assert 'assessmentSettings' not in expand_comma
assert 'autoBackupSettings' in expand_comma
assert 'autoPatchingSettings' in expand_comma
assert 'keyVaultCredentialSettings' not in expand_comma
assert 'serverConfigurationsManagementSettings' not in expand_comma
# test expand parameter: comma-separated values with * - all settings exist
expand_comma_all = self.cmd('sql vm show -n {} -g {} --expand {}'
.format(sqlvm, resource_group, 'AutoPatchingSettings * AutoBackupSettings')
).get_output_in_json()
assert 'assessmentSettings' in expand_comma_all
assert 'autoBackupSettings' in expand_comma_all
assert 'autoPatchingSettings' in expand_comma_all
assert 'keyVaultCredentialSettings' in expand_comma_all
assert 'serverConfigurationsManagementSettings' in expand_comma_all
# test license change
self.cmd('sql vm update -n {} -g {} --license-type {}'
.format(sqlvm, resource_group, 'AHUB'),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('id', sqlvm_1['id']),
JMESPathCheck('sqlServerLicenseType', 'AHUB')
])
# test enabling R services
self.cmd('sql vm update -n {} -g {} --enable-r-services {}'
.format(sqlvm, resource_group, True),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('id', sqlvm_1['id'])
])
# test autopatching enabling succeeds
self.cmd('sql vm update -n {} -g {} --day-of-week {} --maintenance-window-duration {} --maintenance-window-start-hour {}'
.format(sqlvm, resource_group, 'Monday', 60, 22),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('id', sqlvm_1['id'])
])
# test autopatching disabling succeeds
self.cmd('sql vm update -n {} -g {} --enable-auto-patching {}'
.format(sqlvm, resource_group, False),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('id', sqlvm_1['id'])
])
# test backup enabling works
self.cmd('sql vm update -n {} -g {} --backup-schedule-type {} --full-backup-frequency {} --full-backup-start-hour {} --full-backup-duration {} '
'--sa-key {} --storage-account {} --retention-period {} --log-backup-frequency {}'
.format(sqlvm, resource_group, 'Manual', 'Weekly', 2, 2, key[0]['value'], sa['primaryEndpoints']['blob'], 30, 60),
checks=[
JMESPathCheck('name', sqlvm),
JMESPathCheck('location', loc),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('id', sqlvm_1['id'])
])
# test delete vm
self.cmd('sql vm delete -n {} -g {} --yes'
.format(sqlvm, resource_group),
checks=NoneCheck())
# test list sql vm should be empty
self.cmd('sql vm list -g {}'.format(resource_group), checks=[NoneCheck()])
@ResourceGroupPreparer(name_prefix='sqlvm_cli_test_create')
@SqlVirtualMachinePreparer(parameter_name='sqlvm1')
@SqlVirtualMachinePreparer(parameter_name='sqlvm2')
@SqlVirtualMachinePreparer(parameter_name='sqlvm3')
@StorageAccountPreparer()
def test_sqlvm_create_and_delete(self, resource_group, resource_group_location, sqlvm1, sqlvm2, sqlvm3, storage_account):
# test create sqlvm1 with minimal required parameters
self.cmd('sql vm create -n {} -g {} -l {} --license-type {}'
.format(sqlvm1, resource_group, resource_group_location, 'PAYG'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'PAYG')
])
# test create sqlvm2 with AHUB changes inmediately
self.cmd('sql vm create -n {} -g {} -l {} --license-type {}'
.format(sqlvm2, resource_group, resource_group_location, 'AHUB'),
checks=[
JMESPathCheck('name', sqlvm2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'AHUB')
])
# test create sqlvm with sql connectivity settings
self.cmd('sql vm create -n {} -g {} -l {} --license-type {} --sql-mgmt-type {} --connectivity-type {} --port {} --sql-auth-update-username {} --sql-auth-update-pwd {}'
.format(sqlvm3, resource_group, resource_group_location, 'PAYG', 'Full', 'PUBLIC', 1433, 'sqladmin123', 'SecretPassword123'),
checks=[
JMESPathCheck('name', sqlvm3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlManagement', 'Full')
])
# For allocation purposes, will delete the vms and re create them with different settings.
# delete sqlvm1
self.cmd('sql vm delete -n {} -g {} --yes'
.format(sqlvm1, resource_group),
checks=NoneCheck())
# delete sqlvm2
self.cmd('sql vm delete -n {} -g {} --yes'
.format(sqlvm2, resource_group),
checks=NoneCheck())
# delete sqlvm3
self.cmd('sql vm delete -n {} -g {} --yes'
.format(sqlvm3, resource_group),
checks=NoneCheck())
# test create sqlvm1 with auto patching
self.cmd('sql vm create -n {} -g {} -l {} --license-type {} --sql-mgmt-type {} --day-of-week {} --maintenance-window-duration {} --maintenance-window-start-hour {}'
.format(sqlvm1, resource_group, resource_group_location, 'PAYG', 'Full', 'Monday', 60, 22),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlManagement', 'Full')
])
# test create sqlvm1 with auto backup
self.cmd('storage account update -n {} -g {} --set kind=StorageV2'.format(storage_account, resource_group))
sa = self.cmd('storage account show -n {} -g {}'
.format(storage_account, resource_group)).get_output_in_json()
key = self.cmd('storage account keys list -n {} -g {}'
.format(storage_account, resource_group)).get_output_in_json()
self.cmd('sql vm create -n {} -g {} -l {} --license-type {} --backup-schedule-type {} --full-backup-frequency {} --full-backup-start-hour {} --full-backup-duration {} '
'--sa-key {} --storage-account {} --retention-period {} --log-backup-frequency {} --sql-mgmt-type {}'
.format(sqlvm2, resource_group, resource_group_location, 'PAYG', 'Manual', 'Weekly', 2, 2, key[0]['value'], sa['primaryEndpoints']['blob'], 30, 60, 'Full'),
checks=[
JMESPathCheck('name', sqlvm2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlManagement', 'Full')
])
# test create sqlvm1 with R services on
self.cmd('sql vm create -n {} -g {} -l {} --license-type {} --enable-r-services {} --sql-mgmt-type {}'
.format(sqlvm3, resource_group, resource_group_location, 'PAYG', True, 'Full'),
checks=[
JMESPathCheck('name', sqlvm3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlManagement', 'Full')
])
@ResourceGroupPreparer(name_prefix='sqlvm_cli_test_license')
@SqlVirtualMachinePreparer(parameter_name='sqlvm1')
def test_sqlvm_update_license_and_sku(self, resource_group, resource_group_location, sqlvm1):
# test create sqlvm with sql license type and sku type.
self.cmd('sql vm create -n {} -g {} -l {} --image-sku {} --license-type {}'
.format(sqlvm1, resource_group, resource_group_location, 'Enterprise', 'AHUB'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'AHUB'),
JMESPathCheck('sqlImageSku', 'Enterprise'),
])
# test sku change with license change.
self.cmd('sql vm update -n {} -g {} --image-sku {} --license-type {}'
.format(sqlvm1, resource_group, 'Enterprise', 'PAYG'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlImageSku', 'Enterprise'),
JMESPathCheck('sqlServerLicenseType', 'PAYG')
])
# delete sqlvm
self.cmd('sql vm delete -n {} -g {} --yes'
.format(sqlvm1, resource_group),
checks=NoneCheck())
# test create sqlvm with sql license type PAYG and sku type.
self.cmd('sql vm create -n {} -g {} -l {} --image-sku {} --license-type {}'
.format(sqlvm1, resource_group, resource_group_location, 'Enterprise', 'PAYG'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'PAYG'),
JMESPathCheck('sqlImageSku', 'Enterprise'),
])
# test sku change without license change.
self.cmd('sql vm update -n {} -g {} --image-sku {}'
.format(sqlvm1, resource_group, 'Enterprise'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlImageSku', 'Enterprise'),
JMESPathCheck('sqlServerLicenseType', 'PAYG')
])
# test sku change with license change.
self.cmd('sql vm update -n {} -g {} --image-sku {} --license-type {}'
.format(sqlvm1, resource_group, 'Enterprise', 'AHUB'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlImageSku', 'Enterprise'),
JMESPathCheck('sqlServerLicenseType', 'AHUB')
])
# test license change for DR only.
self.cmd('sql vm update -n {} -g {} --license-type {}'
.format(sqlvm1, resource_group, 'DR'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlImageSku', 'Enterprise'),
JMESPathCheck('sqlServerLicenseType', 'DR')
])
# delete sqlvm
self.cmd('sql vm delete -n {} -g {} --yes'
.format(sqlvm1, resource_group),
checks=NoneCheck())
# test create sqlvm with sql license type DR.
self.cmd('sql vm create -n {} -g {} -l {} --license-type {}'
.format(sqlvm1, resource_group, resource_group_location, 'DR'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlServerLicenseType', 'DR'),
])
class SqlVmGroupScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account1', kind='StorageV2')
@StorageAccountPreparer(parameter_name='storage_account2', kind='StorageV2')
def test_sqlvm_group_mgmt(self, resource_group, resource_group_location, storage_account1, storage_account2):
name = 'sqlvmgroup'
image_offer = 'SQL2017-WS2016'
image_sku = 'Enterprise'
domain = 'domain.com'
operator_acc = 'myvmadmin'
sql_service_acc = 'sqlservice'
sa_1 = self.cmd('storage account show -n {} -g {}'
.format(storage_account1, resource_group)).get_output_in_json()
key_1 = self.cmd('storage account keys list -n {} -g {}'
.format(storage_account1, resource_group)).get_output_in_json()
sa_2 = self.cmd('storage account show -n {} -g {}'
.format(storage_account2, resource_group)).get_output_in_json()
key_2 = self.cmd('storage account keys list -n {} -g {}'
.format(storage_account2, resource_group)).get_output_in_json()
# create sql vm group
sqlvmgroup = self.cmd('sql vm group create -n {} -g {} -l {} -i {} -s {} -f {} -p {} -k {} -e {} -u {}'
.format(name, resource_group, resource_group_location, image_offer, image_sku,
domain, operator_acc, key_1[0]['value'], sql_service_acc, sa_1['primaryEndpoints']['blob']),
checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded")
]).get_output_in_json()
# test list sqlvm should be 1
self.cmd('sql vm group list -g {}'.format(resource_group), checks=[JMESPathCheck('length(@)', 1)])
# test show of the group
self.cmd('sql vm group show -n {} -g {}'
.format(name, resource_group),
checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheck('sqlImageOffer', image_offer),
JMESPathCheck('sqlImageSku', image_sku),
JMESPathCheck('id', sqlvmgroup['id'])
])
# Change the storage account url and key
self.cmd('sql vm group update -n {} -g {} -u {} -k {}'
.format(name, resource_group, sa_2['primaryEndpoints']['blob'], key_2[0]['value']),
checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('wsfcDomainProfile.storageAccountUrl', sa_2['primaryEndpoints']['blob'])
])
# change the domain
self.cmd('sql vm group update -n {} -g {} -f {} -k {}'
.format(name, resource_group, 'my' + domain, key_2[0]['value']),
checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('wsfcDomainProfile.domainFqdn', 'my' + domain)
])
# change the operator account
self.cmd('sql vm group update -n {} -g {} -p {} -k {}'
.format(name, resource_group, 'my' + operator_acc, key_2[0]['value']),
checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('wsfcDomainProfile.clusterOperatorAccount', 'my' + operator_acc)
])
# test delete vm
self.cmd('sql vm group delete -n {} -g {} --yes'
.format(name, resource_group),
checks=NoneCheck())
# test list sql vm should be empty
self.cmd('sql vm group list -g {}'.format(resource_group), checks=[NoneCheck()])
class SqlVmAndGroupScenarioTest(ScenarioTest):
"""
This is a very lengthy test, it may take more than 45 minutes to run.
"""
@ResourceGroupPreparer()
@DomainPreparer()
@SqlVirtualMachinePreparer(parameter_name='sqlvm1')
@StorageAccountPreparer(kind='StorageV2')
def test_sqlvm_add_and_remove(self, resource_group, resource_group_location, domainvm, sqlvm1, storage_account):
add_account_script = '\"Set-AdUser -UserPrincipalName admin123@domain.com -Identity admin123 -PasswordNeverExpires $true\"'
# add account to domain controller
self.cmd('vm run-command invoke -n {} -g {} --command-id RunPowerShellScript --scripts {}'
.format('adVM', resource_group, add_account_script))
parameters_string = ('location={} domainJoinUserName=domain\\\\admin123 domainJoinUserPassword=SecretPassword123 '
'domainFQDN=domain.com vmList={}').format(resource_group_location, sqlvm1)
# join vms to the domain
self.cmd('deployment group create --name {} -g {} --template-uri {} --parameters {}'
.format('joinvms',
resource_group,
'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/quickstarts/microsoft.compute/vm-domain-join-existing/azuredeploy.json',
parameters_string))
# Create the sqlvm group
sa = self.cmd('storage account show -n {} -g {}'
.format(storage_account, resource_group)).get_output_in_json()
key = self.cmd('storage account keys list -n {} -g {}'
.format(storage_account, resource_group)).get_output_in_json()
sqlvmgroup = self.cmd('sql vm group create -n {} -g {} -l {} -i {} -s {} -f {} -p {} -k {} -e {} -u {} --bootstrap-acc {}'
.format('cligroup', resource_group, resource_group_location, 'SQL2017-WS2016', 'Enterprise',
'domain.com', 'admin123', key[0]['value'], 'admin123', sa['primaryEndpoints']['blob'], 'admin123')).get_output_in_json()
# test create sqlvm1
self.cmd('sql vm create -n {} -g {} -l {} --license-type {} --connectivity-type {} --port {} --sql-auth-update-pwd {} --sql-auth-update-username {} --sql-mgmt-type {}'
.format(sqlvm1, resource_group, resource_group_location, 'PAYG', 'PRIVATE', 1433, 'admin123', 'SecretPassword123', 'Full'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sqlServerLicenseType', 'PAYG')
]).get_output_in_json()
self.cmd('sql vm add-to-group -n {} -g {} -r {} -p {} -s {} -b {}'
.format(sqlvm1, resource_group, sqlvmgroup['id'], 'SecretPassword123', 'SecretPassword123', 'SecretPassword123'),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded"),
JMESPathCheckExists('sqlVirtualMachineGroupResourceId')
])
# Remove from group
self.cmd('sql vm remove-from-group -n {} -g {}'
.format(sqlvm1, resource_group),
checks=[
JMESPathCheck('name', sqlvm1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('provisioningState', "Succeeded")
])
| 51.565672
| 204
| 0.570176
|
acfd29d3883b6bda928babedf15acbbb63244928
| 7,030
|
py
|
Python
|
preprocessing/xml_decoder.py
|
KnottLab/codex
|
f92c0af3116118e56a3feb81c6cc9dbd946f462a
|
[
"MIT"
] | null | null | null |
preprocessing/xml_decoder.py
|
KnottLab/codex
|
f92c0af3116118e56a3feb81c6cc9dbd946f462a
|
[
"MIT"
] | 3
|
2021-08-15T19:53:07.000Z
|
2021-08-30T16:55:28.000Z
|
preprocessing/xml_decoder.py
|
KnottLab/codex
|
f92c0af3116118e56a3feb81c6cc9dbd946f462a
|
[
"MIT"
] | null | null | null |
"""XML decoder for CODEX"""
import xml.etree.ElementTree as ET
import numpy as np
import math
class XMLDecoder:
def __init__(self):
self.decoded_content = dict()
def _number_of_cycles(self, root):
number = 0
exposures = root.find('Exposures')
for item in exposures.findall('ExposureItem'):
active = item.find('Active').text
if active == 'true':
number += 1
return number
def _number_of_channels(self, root):
channels = root.find('Channels')
number = len(channels.findall('string'))
return number
# https://github.com/KnottLab/codex/blob/2ff63079a3964dea6e242a20defec5851630042e/functions/data_utils/create_CODEX_object.m#L215
def _number_of_xy_tiles(self, root):
attachment = root.find('Element').find('Data').find('Image').find('Attachment')
px = []
py = []
for tile in attachment.findall('Tile'):
# flip X and Y here too??
px.append(tile.get('PosY'))
py.append(tile.get('PosX'))
positions = [(i,j) for i,j in zip(px,py)]
Upx = np.unique(px)
Upy = np.unique(py)
x = len(Upx)
y = len(Upy)
real_tiles = np.zeros((x,y), dtype=object)
real_tiles[:] = 'x'
print(f'Building real tiles array: {real_tiles.shape}')
# snakes like this:
# 01 02 03 04
# 08 07 06 05
# 09 10 11 12
tile_num = 0 # start tile numbering at 0
for i in range(x):
Ry = np.arange(y) if i%2==0 else np.arange(y)[::-1]
for j in Ry:
if (Upx[i], Upy[j]) in positions:
real_tiles[i,j] = f'{tile_num:02d}'
tile_num += 1
Ntiles = len(positions)
return x, y, real_tiles, Ntiles
def _number_of_z_stacks(self, root):
z_stacks = int(root.find('ZstackDepth').text)
return z_stacks
def _get_tile_width(self, root):
dimension = root.find('Element').find('Data').find('Image').find('ImageDescription').find('Dimensions')
width = int(dimension.find('DimensionDescription').get("NumberOfElements"))
height = int(dimension.find('DimensionDescription').get('NumberOfElements'))
overlap_x = 0
overlap_y = 0
attachments = root.find('Element').find('Data').find('Image').findall('Attachment')
for a in attachments:
if a.get("Name") == "HardwareSetting":
atl = a.find("ATLCameraSettingDefinition")
xy = atl.find('XYStageConfiguratorSettings')
stitch = xy.find('StitchingSettings')
overlap_x = float(stitch.get('OverlapPercentageX'))
overlap_y = float(stitch.get('OverlapPercentageY'))
overlap_width = width - math.floor((1 - overlap_x) * width)
overlap_height = height - math.floor((1 - overlap_y) * height)
return width, height, overlap_x, overlap_y, overlap_width, overlap_height
def _get_resolutionh(self, root):
dimension = root.find('Element').find('Data').find('Image').find('ImageDescription').find('Dimensions')
width = int(dimension.find('DimensionDescription').get("NumberOfElements"))
length = float(dimension.find('DimensionDescription').get('Length'))
return (10 ** 6) * length / width
def _get_marker_names(self, root, num_cycles, num_channels):
exposure_items = root.find('Exposures').findall('ExposureItem')
marker_list = []
marker_names = []
for item in exposure_items[:num_cycles]:
antibody = item.find('AntiBody').findall('string')
for a in antibody:
marker_names.append(a.text.replace('/', '-').replace(' ', '-'))
for i, marker in enumerate(marker_names):
marker_list.append(marker + '_' + str(i))
marker_names_array = np.array(marker_names)
marker_names_array = marker_names_array.reshape(num_cycles, num_channels)
marker_list = np.array(marker_list)
marker_array = marker_list.reshape(num_cycles, num_channels)
return marker_names, marker_list, marker_array, marker_names_array
def _get_exposure_times(self, root):
exposure_item = root.find('Exposures').find('ExposureItem')
exposure_time = exposure_item.find('ExposuresTime')
decimal_values = []
for decimal in exposure_time.findall('decimal'):
decimal_values.append(int(decimal.text))
return decimal_values
def _get_wavelengths(self, root):
exposure_item = root.find('Exposures').find('ExposureItem')
wavelength = exposure_item.find('WaveLength')
wavelength_values = []
for values in wavelength.findall('decimal'):
wavelength_values.append(int(values.text))
return wavelength_values
def _get_channels(self, root):
channels = root.find("Channels")
channel_names = []
for name in channels.findall('string'):
channel_names.append(name.text)
return channel_names
def decode(self, file_content_xml, file_content_xlif, cycle_folders):
root_xml = ET.fromstring(file_content_xml)
root_xlif = ET.fromstring(file_content_xlif)
self.decoded_content['roi'] = 1
self.decoded_content['ncl'] = self._number_of_cycles(root_xml)
self.decoded_content['cycle_folders'] = cycle_folders
self.decoded_content['nch'] = self._number_of_channels(root_xml)
self.decoded_content['nz'] = self._number_of_z_stacks(root_xml)
tile_info = self._number_of_xy_tiles(root_xlif)
self.decoded_content['nx'] = tile_info[0]
self.decoded_content['ny'] = tile_info[1]
self.decoded_content['real_tiles'] = tile_info[2]
self.decoded_content['Ntiles'] = tile_info[3]
# self.decoded_content['RNx'] = # for dealing with non-rectangular ROIs
# self.decoded_content['RNy'] = # for dealing with non-rectangular ROIs
# self.decoded_content['real_tiles'] = # for dealing with non-rectangular ROIs
self.decoded_content['tileWidth'], self.decoded_content['tileHeight'], self.decoded_content['ox'], \
self.decoded_content['oy'], self.decoded_content['width'], self.decoded_content[
'height'] = self._get_tile_width(root_xlif)
self.decoded_content['exposure_times'] = self._get_exposure_times(root_xml)
self.decoded_content['channels'] = self._get_channels(root_xml)
self.decoded_content['wavelengths'] = self._get_wavelengths(root_xml)
self.decoded_content['resolution'] = self._get_resolutionh(root_xlif)
self.decoded_content['marker_names'], self.decoded_content['markers'], \
self.decoded_content['marker_array'], self.decoded_content['marker_names_array'] = self._get_marker_names(
root_xml, self.decoded_content['ncl'],
self.decoded_content['nch'])
return self.decoded_content
| 42.865854
| 133
| 0.636842
|
acfd2bfed3934a5e95608624afbcaeac9b6a8943
| 1,493
|
py
|
Python
|
grmodel/figures/FigureCommon.py
|
meyer-lab/ps-growth-model
|
3d1b53f4310de3ba42aa1db5847451be1642c5a8
|
[
"MIT"
] | 2
|
2019-10-10T15:40:03.000Z
|
2019-10-21T00:46:54.000Z
|
grmodel/figures/FigureCommon.py
|
meyer-lab/ps-growth-model
|
3d1b53f4310de3ba42aa1db5847451be1642c5a8
|
[
"MIT"
] | 36
|
2019-09-24T03:40:09.000Z
|
2020-02-26T17:18:07.000Z
|
grmodel/figures/FigureCommon.py
|
meyer-lab/ps-growth-model
|
3d1b53f4310de3ba42aa1db5847451be1642c5a8
|
[
"MIT"
] | 1
|
2020-02-03T00:14:08.000Z
|
2020-02-03T00:14:08.000Z
|
from string import ascii_lowercase
from matplotlib import gridspec, pyplot as plt, rcParams
import seaborn as sns
import svgutils.transform as st
rcParams["xtick.major.pad"] = 1.5
rcParams["ytick.major.pad"] = 1.5
rcParams["xtick.minor.pad"] = 1.5
rcParams["ytick.minor.pad"] = 1.5
rcParams["legend.labelspacing"] = 0.06
rcParams["legend.handlelength"] = 1.0
rcParams["legend.handletextpad"] = 0.6
rcParams["legend.borderaxespad"] = 0.25
def getSetup(figsize, gridd):
""" Establish figure set-up with subplots. """
sns.set(style="whitegrid", font_scale=0.7, color_codes=True, palette="colorblind", rc={"grid.linestyle": "dotted", "axes.linewidth": 0.6})
# Setup plotting space and grid
f = plt.figure(figsize=figsize, constrained_layout=True)
gs1 = gridspec.GridSpec(*gridd, figure=f)
# Get list of axis objects
ax = list()
for x in range(gridd[0] * gridd[1]):
ax.append(f.add_subplot(gs1[x]))
return (ax, f)
def subplotLabel(axs):
""" Place subplot labels on figure. """
for ii, ax in enumerate(axs):
ax.text(-0.2, 1.25, ascii_lowercase[ii], transform=ax.transAxes, fontsize=16, fontweight="bold", va="top")
def overlayCartoon(figFile, cartoonFile, x, y, scalee=1):
""" Add cartoon to a figure file. """
# Overlay Figure 4 cartoon
template = st.fromfile(figFile)
cartoon = st.fromfile(cartoonFile).getroot()
cartoon.moveto(x, y, scale=scalee)
template.append(cartoon)
template.save(figFile)
| 30.469388
| 142
| 0.687207
|
acfd2c0e3f5b6e7a44f89f2ee1a5f1381044a640
| 33,367
|
py
|
Python
|
scripts/pyqtgraph-develop/pyqtgraph/parametertree/Parameter.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/parametertree/Parameter.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/parametertree/Parameter.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
from ..Qt import QtGui, QtCore
import os, weakref, re
from ..pgcollections import OrderedDict
from ..python2_3 import asUnicode, basestring
from .ParameterItem import ParameterItem
PARAM_TYPES = {}
PARAM_NAMES = {}
def registerParameterType(name, cls, override=False):
global PARAM_TYPES
if name in PARAM_TYPES and not override:
raise Exception("Parameter type '%s' already exists (use override=True to replace)" % name)
PARAM_TYPES[name] = cls
PARAM_NAMES[cls] = name
def __reload__(old):
PARAM_TYPES.update(old.get('PARAM_TYPES', {}))
PARAM_NAMES.update(old.get('PARAM_NAMES', {}))
class Parameter(QtCore.QObject):
"""
A Parameter is the basic unit of data in a parameter tree. Each parameter has
a name, a type, a value, and several other properties that modify the behavior of the
Parameter. Parameters may have parent / child / sibling relationships to construct
organized hierarchies. Parameters generally do not have any inherent GUI or visual
interpretation; instead they manage ParameterItem instances which take care of
display and user interaction.
Note: It is fairly uncommon to use the Parameter class directly; mostly you
will use subclasses which provide specialized type and data handling. The static
pethod Parameter.create(...) is an easy way to generate instances of these subclasses.
For more Parameter types, see ParameterTree.parameterTypes module.
=================================== =========================================================
**Signals:**
sigStateChanged(self, change, info) Emitted when anything changes about this parameter at
all.
The second argument is a string indicating what changed
('value', 'childAdded', etc..)
The third argument can be any extra information about
the change
sigTreeStateChanged(self, changes) Emitted when any child in the tree changes state
(but only if monitorChildren() is called)
the format of *changes* is [(param, change, info), ...]
sigValueChanged(self, value) Emitted when value is finished changing
sigValueChanging(self, value) Emitted immediately for all value changes,
including during editing.
sigChildAdded(self, child, index) Emitted when a child is added
sigChildRemoved(self, child) Emitted when a child is removed
sigRemoved(self) Emitted when this parameter is removed
sigParentChanged(self, parent) Emitted when this parameter's parent has changed
sigLimitsChanged(self, limits) Emitted when this parameter's limits have changed
sigDefaultChanged(self, default) Emitted when this parameter's default value has changed
sigNameChanged(self, name) Emitted when this parameter's name has changed
sigOptionsChanged(self, opts) Emitted when any of this parameter's options have changed
=================================== =========================================================
"""
## name, type, limits, etc.
## can also carry UI hints (slider vs spinbox, etc.)
sigValueChanged = QtCore.Signal(object, object) ## self, value emitted when value is finished being edited
sigValueChanging = QtCore.Signal(object, object) ## self, value emitted as value is being edited
sigChildAdded = QtCore.Signal(object, object, object) ## self, child, index
sigChildRemoved = QtCore.Signal(object, object) ## self, child
sigRemoved = QtCore.Signal(object) ## self
sigParentChanged = QtCore.Signal(object, object) ## self, parent
sigLimitsChanged = QtCore.Signal(object, object) ## self, limits
sigDefaultChanged = QtCore.Signal(object, object) ## self, default
sigNameChanged = QtCore.Signal(object, object) ## self, name
sigOptionsChanged = QtCore.Signal(object, object) ## self, {opt:val, ...}
## Emitted when anything changes about this parameter at all.
## The second argument is a string indicating what changed ('value', 'childAdded', etc..)
## The third argument can be any extra information about the change
sigStateChanged = QtCore.Signal(object, object, object) ## self, change, info
## emitted when any child in the tree changes state
## (but only if monitorChildren() is called)
sigTreeStateChanged = QtCore.Signal(object, object) # self, changes
# changes = [(param, change, info), ...]
# bad planning.
#def __new__(cls, *args, **opts):
#try:
#cls = PARAM_TYPES[opts['type']]
#except KeyError:
#pass
#return QtCore.QObject.__new__(cls, *args, **opts)
@staticmethod
def create(**opts):
"""
Static method that creates a new Parameter (or subclass) instance using
opts['type'] to select the appropriate class.
All options are passed directly to the new Parameter's __init__ method.
Use registerParameterType() to add new class types.
"""
typ = opts.get('type', None)
if typ is None:
cls = Parameter
else:
cls = PARAM_TYPES[opts['type']]
return cls(**opts)
def __init__(self, **opts):
"""
Initialize a Parameter object. Although it is rare to directly create a
Parameter instance, the options available to this method are also allowed
by most Parameter subclasses.
======================= =========================================================
**Keyword Arguments:**
name The name to give this Parameter. This is the name that
will appear in the left-most column of a ParameterTree
for this Parameter.
value The value to initially assign to this Parameter.
default The default value for this Parameter (most Parameters
provide an option to 'reset to default').
children A list of children for this Parameter. Children
may be given either as a Parameter instance or as a
dictionary to pass to Parameter.create(). In this way,
it is possible to specify complex hierarchies of
Parameters from a single nested data structure.
readonly If True, the user will not be allowed to edit this
Parameter. (default=False)
enabled If False, any widget(s) for this parameter will appear
disabled. (default=True)
visible If False, the Parameter will not appear when displayed
in a ParameterTree. (default=True)
renamable If True, the user may rename this Parameter.
(default=False)
removable If True, the user may remove this Parameter.
(default=False)
expanded If True, the Parameter will appear expanded when
displayed in a ParameterTree (its children will be
visible). (default=True)
title (str or None) If specified, then the parameter will be
displayed to the user using this string as its name.
However, the parameter will still be referred to
internally using the *name* specified above. Note that
this option is not compatible with renamable=True.
(default=None; added in version 0.9.9)
======================= =========================================================
"""
QtCore.QObject.__init__(self)
self.opts = {
'type': None,
'readonly': False,
'visible': True,
'enabled': True,
'renamable': False,
'removable': False,
'strictNaming': False, # forces name to be usable as a python variable
'expanded': True,
'title': None,
#'limits': None, ## This is a bad plan--each parameter type may have a different data type for limits.
}
value = opts.get('value', None)
name = opts.get('name', None)
self.opts.update(opts)
self.opts['value'] = None # will be set later.
self.opts['name'] = None
self.childs = []
self.names = {} ## map name:child
self.items = weakref.WeakKeyDictionary() ## keeps track of tree items representing this parameter
self._parent = None
self.treeStateChanges = [] ## cache of tree state changes to be delivered on next emit
self.blockTreeChangeEmit = 0
#self.monitoringChildren = False ## prevent calling monitorChildren more than once
if not isinstance(name, basestring):
raise Exception("Parameter must have a string name specified in opts.")
self.setName(name)
self.addChildren(self.opts.get('children', []))
self.opts['value'] = None
if value is not None:
self.setValue(value)
if 'default' not in self.opts:
self.opts['default'] = None
self.setDefault(self.opts['value'])
## Connect all state changed signals to the general sigStateChanged
self.sigValueChanged.connect(lambda param, data: self.emitStateChanged('value', data))
self.sigChildAdded.connect(lambda param, *data: self.emitStateChanged('childAdded', data))
self.sigChildRemoved.connect(lambda param, data: self.emitStateChanged('childRemoved', data))
self.sigParentChanged.connect(lambda param, data: self.emitStateChanged('parent', data))
self.sigLimitsChanged.connect(lambda param, data: self.emitStateChanged('limits', data))
self.sigDefaultChanged.connect(lambda param, data: self.emitStateChanged('default', data))
self.sigNameChanged.connect(lambda param, data: self.emitStateChanged('name', data))
self.sigOptionsChanged.connect(lambda param, data: self.emitStateChanged('options', data))
#self.watchParam(self) ## emit treechange signals if our own state changes
def name(self):
"""Return the name of this Parameter."""
return self.opts['name']
def setName(self, name):
"""Attempt to change the name of this parameter; return the actual name.
(The parameter may reject the name change or automatically pick a different name)"""
if self.opts['strictNaming']:
if len(name) < 1 or re.search(r'\W', name) or re.match(r'\d', name[0]):
raise Exception("Parameter name '%s' is invalid. (Must contain only alphanumeric and underscore characters and may not start with a number)" % name)
parent = self.parent()
if parent is not None:
name = parent._renameChild(self, name) ## first ask parent if it's ok to rename
if self.opts['name'] != name:
self.opts['name'] = name
self.sigNameChanged.emit(self, name)
return name
def type(self):
"""Return the type string for this Parameter."""
return self.opts['type']
def isType(self, typ):
"""
Return True if this parameter type matches the name *typ*.
This can occur either of two ways:
- If self.type() == *typ*
- If this parameter's class is registered with the name *typ*
"""
if self.type() == typ:
return True
global PARAM_TYPES
cls = PARAM_TYPES.get(typ, None)
if cls is None:
raise Exception("Type name '%s' is not registered." % str(typ))
return self.__class__ is cls
def childPath(self, child):
"""
Return the path of parameter names from self to child.
If child is not a (grand)child of self, return None.
"""
path = []
while child is not self:
path.insert(0, child.name())
child = child.parent()
if child is None:
return None
return path
def setValue(self, value, blockSignal=None):
"""
Set the value of this Parameter; return the actual value that was set.
(this may be different from the value that was requested)
"""
try:
if blockSignal is not None:
self.sigValueChanged.disconnect(blockSignal)
value = self._interpretValue(value)
if self.opts['value'] == value:
return value
self.opts['value'] = value
self.sigValueChanged.emit(self, value)
finally:
if blockSignal is not None:
self.sigValueChanged.connect(blockSignal)
return value
def _interpretValue(self, v):
return v
def value(self):
"""
Return the value of this Parameter.
"""
return self.opts['value']
def getValues(self):
"""Return a tree of all values that are children of this parameter"""
vals = OrderedDict()
for ch in self:
vals[ch.name()] = (ch.value(), ch.getValues())
return vals
def saveState(self, filter=None):
"""
Return a structure representing the entire state of the parameter tree.
The tree state may be restored from this structure using restoreState().
If *filter* is set to 'user', then only user-settable data will be included in the
returned state.
"""
if filter is None:
state = self.opts.copy()
if state['type'] is None:
global PARAM_NAMES
state['type'] = PARAM_NAMES.get(type(self), None)
elif filter == 'user':
state = {'value': self.value()}
else:
raise ValueError("Unrecognized filter argument: '%s'" % filter)
ch = OrderedDict([(ch.name(), ch.saveState(filter=filter)) for ch in self])
if len(ch) > 0:
state['children'] = ch
return state
def restoreState(self, state, recursive=True, addChildren=True, removeChildren=True, blockSignals=True):
"""
Restore the state of this parameter and its children from a structure generated using saveState()
If recursive is True, then attempt to restore the state of child parameters as well.
If addChildren is True, then any children which are referenced in the state object will be
created if they do not already exist.
If removeChildren is True, then any children which are not referenced in the state object will
be removed.
If blockSignals is True, no signals will be emitted until the tree has been completely restored.
This prevents signal handlers from responding to a partially-rebuilt network.
"""
state = state.copy()
childState = state.pop('children', [])
## list of children may be stored either as list or dict.
if isinstance(childState, dict):
cs = []
for k,v in childState.items():
cs.append(v.copy())
cs[-1].setdefault('name', k)
childState = cs
if blockSignals:
self.blockTreeChangeSignal()
try:
self.setOpts(**state)
if not recursive:
return
ptr = 0 ## pointer to first child that has not been restored yet
foundChilds = set()
#print "==============", self.name()
for ch in childState:
name = ch['name']
#typ = ch.get('type', None)
#print('child: %s, %s' % (self.name()+'.'+name, typ))
## First, see if there is already a child with this name
gotChild = False
for i, ch2 in enumerate(self.childs[ptr:]):
#print " ", ch2.name(), ch2.type()
if ch2.name() != name: # or not ch2.isType(typ):
continue
gotChild = True
#print " found it"
if i != 0: ## move parameter to next position
#self.removeChild(ch2)
self.insertChild(ptr, ch2)
#print " moved to position", ptr
ch2.restoreState(ch, recursive=recursive, addChildren=addChildren, removeChildren=removeChildren)
foundChilds.add(ch2)
break
if not gotChild:
if not addChildren:
#print " ignored child"
continue
#print " created new"
ch2 = Parameter.create(**ch)
self.insertChild(ptr, ch2)
foundChilds.add(ch2)
ptr += 1
if removeChildren:
for ch in self.childs[:]:
if ch not in foundChilds:
#print " remove:", ch
self.removeChild(ch)
finally:
if blockSignals:
self.unblockTreeChangeSignal()
def defaultValue(self):
"""Return the default value for this parameter."""
return self.opts['default']
def setDefault(self, val):
"""Set the default value for this parameter."""
if self.opts['default'] == val:
return
self.opts['default'] = val
self.sigDefaultChanged.emit(self, val)
def setToDefault(self):
"""Set this parameter's value to the default."""
if self.hasDefault():
self.setValue(self.defaultValue())
def hasDefault(self):
"""Returns True if this parameter has a default value."""
return 'default' in self.opts
def valueIsDefault(self):
"""Returns True if this parameter's value is equal to the default value."""
return self.value() == self.defaultValue()
def setLimits(self, limits):
"""Set limits on the acceptable values for this parameter.
The format of limits depends on the type of the parameter and
some parameters do not make use of limits at all."""
if 'limits' in self.opts and self.opts['limits'] == limits:
return
self.opts['limits'] = limits
self.sigLimitsChanged.emit(self, limits)
return limits
def writable(self):
"""
Returns True if this parameter's value can be changed by the user.
Note that the value of the parameter can *always* be changed by
calling setValue().
"""
return not self.readonly()
def setWritable(self, writable=True):
"""Set whether this Parameter should be editable by the user. (This is
exactly the opposite of setReadonly)."""
self.setOpts(readonly=not writable)
def readonly(self):
"""
Return True if this parameter is read-only. (this is the opposite of writable())
"""
return self.opts.get('readonly', False)
def setReadonly(self, readonly=True):
"""Set whether this Parameter's value may be edited by the user
(this is the opposite of setWritable())."""
self.setOpts(readonly=readonly)
def setOpts(self, **opts):
"""
Set any arbitrary options on this parameter.
The exact behavior of this function will depend on the parameter type, but
most parameters will accept a common set of options: value, name, limits,
default, readonly, removable, renamable, visible, enabled, and expanded.
See :func:`Parameter.__init__ <pyqtgraph.parametertree.Parameter.__init__>`
for more information on default options.
"""
changed = OrderedDict()
for k in opts:
if k == 'value':
self.setValue(opts[k])
elif k == 'name':
self.setName(opts[k])
elif k == 'limits':
self.setLimits(opts[k])
elif k == 'default':
self.setDefault(opts[k])
elif k not in self.opts or self.opts[k] != opts[k]:
self.opts[k] = opts[k]
changed[k] = opts[k]
if len(changed) > 0:
self.sigOptionsChanged.emit(self, changed)
def emitStateChanged(self, changeDesc, data):
## Emits stateChanged signal and
## requests emission of new treeStateChanged signal
self.sigStateChanged.emit(self, changeDesc, data)
#self.treeStateChanged(self, changeDesc, data)
self.treeStateChanges.append((self, changeDesc, data))
self.emitTreeChanges()
def makeTreeItem(self, depth):
"""
Return a TreeWidgetItem suitable for displaying/controlling the content of
this parameter. This is called automatically when a ParameterTree attempts
to display this Parameter.
Most subclasses will want to override this function.
"""
if hasattr(self, 'itemClass'):
#print "Param:", self, "Make item from itemClass:", self.itemClass
return self.itemClass(self, depth)
else:
return ParameterItem(self, depth=depth)
def addChild(self, child, autoIncrementName=None):
"""
Add another parameter to the end of this parameter's child list.
See insertChild() for a description of the *autoIncrementName*
argument.
"""
return self.insertChild(len(self.childs), child, autoIncrementName=autoIncrementName)
def addChildren(self, children):
"""
Add a list or dict of children to this parameter. This method calls
addChild once for each value in *children*.
"""
## If children was specified as dict, then assume keys are the names.
if isinstance(children, dict):
ch2 = []
for name, opts in children.items():
if isinstance(opts, dict) and 'name' not in opts:
opts = opts.copy()
opts['name'] = name
ch2.append(opts)
children = ch2
for chOpts in children:
#print self, "Add child:", type(chOpts), id(chOpts)
self.addChild(chOpts)
def insertChild(self, pos, child, autoIncrementName=None):
"""
Insert a new child at pos.
If pos is a Parameter, then insert at the position of that Parameter.
If child is a dict, then a parameter is constructed using
:func:`Parameter.create <pyqtgraph.parametertree.Parameter.create>`.
By default, the child's 'autoIncrementName' option determines whether
the name will be adjusted to avoid prior name collisions. This
behavior may be overridden by specifying the *autoIncrementName*
argument. This argument was added in version 0.9.9.
"""
if isinstance(child, dict):
child = Parameter.create(**child)
name = child.name()
if name in self.names and child is not self.names[name]:
if autoIncrementName is True or (autoIncrementName is None and child.opts.get('autoIncrementName', False)):
name = self.incrementName(name)
child.setName(name)
else:
raise Exception("Already have child named %s" % str(name))
if isinstance(pos, Parameter):
pos = self.childs.index(pos)
with self.treeChangeBlocker():
if child.parent() is not None:
child.remove()
self.names[name] = child
self.childs.insert(pos, child)
child.parentChanged(self)
self.sigChildAdded.emit(self, child, pos)
child.sigTreeStateChanged.connect(self.treeStateChanged)
return child
def removeChild(self, child):
"""Remove a child parameter."""
name = child.name()
if name not in self.names or self.names[name] is not child:
raise Exception("Parameter %s is not my child; can't remove." % str(child))
del self.names[name]
self.childs.pop(self.childs.index(child))
child.parentChanged(None)
self.sigChildRemoved.emit(self, child)
try:
child.sigTreeStateChanged.disconnect(self.treeStateChanged)
except (TypeError, RuntimeError): ## already disconnected
pass
def clearChildren(self):
"""Remove all child parameters."""
for ch in self.childs[:]:
self.removeChild(ch)
def children(self):
"""Return a list of this parameter's children.
Warning: this overrides QObject.children
"""
return self.childs[:]
def hasChildren(self):
"""Return True if this Parameter has children."""
return len(self.childs) > 0
def parentChanged(self, parent):
"""This method is called when the parameter's parent has changed.
It may be useful to extend this method in subclasses."""
self._parent = parent
self.sigParentChanged.emit(self, parent)
def parent(self):
"""Return the parent of this parameter."""
return self._parent
def remove(self):
"""Remove this parameter from its parent's child list"""
parent = self.parent()
if parent is None:
raise Exception("Cannot remove; no parent.")
parent.removeChild(self)
self.sigRemoved.emit(self)
def incrementName(self, name):
## return an unused name by adding a number to the name given
base, num = re.match('(.*)(\d*)', name).groups()
numLen = len(num)
if numLen == 0:
num = 2
numLen = 1
else:
num = int(num)
while True:
newName = base + ("%%0%dd"%numLen) % num
if newName not in self.names:
return newName
num += 1
def __iter__(self):
for ch in self.childs:
yield ch
def __getitem__(self, names):
"""Get the value of a child parameter. The name may also be a tuple giving
the path to a sub-parameter::
value = param[('child', 'grandchild')]
"""
if not isinstance(names, tuple):
names = (names,)
return self.param(*names).value()
def __setitem__(self, names, value):
"""Set the value of a child parameter. The name may also be a tuple giving
the path to a sub-parameter::
param[('child', 'grandchild')] = value
"""
if isinstance(names, basestring):
names = (names,)
return self.param(*names).setValue(value)
def child(self, *names):
"""Return a child parameter.
Accepts the name of the child or a tuple (path, to, child)
Added in version 0.9.9. Earlier versions used the 'param' method, which is still
implemented for backward compatibility.
"""
try:
param = self.names[names[0]]
except KeyError:
raise KeyError("Parameter %s has no child named %s" % (self.name(), names[0]))
if len(names) > 1:
return param.child(*names[1:])
else:
return param
def param(self, *names):
# for backward compatibility.
return self.child(*names)
def __repr__(self):
return asUnicode("<%s '%s' at 0x%x>") % (self.__class__.__name__, self.name(), id(self))
def __getattr__(self, attr):
## Leaving this undocumented because I might like to remove it in the future..
#print type(self), attr
if 'names' not in self.__dict__:
raise AttributeError(attr)
if attr in self.names:
import traceback
traceback.print_stack()
print("Warning: Use of Parameter.subParam is deprecated. Use Parameter.param(name) instead.")
return self.param(attr)
else:
raise AttributeError(attr)
def _renameChild(self, child, name):
## Only to be called from Parameter.rename
if name in self.names:
return child.name()
self.names[name] = child
del self.names[child.name()]
return name
def registerItem(self, item):
self.items[item] = None
def hide(self):
"""Hide this parameter. It and its children will no longer be visible in any ParameterTree
widgets it is connected to."""
self.show(False)
def show(self, s=True):
"""Show this parameter. """
self.opts['visible'] = s
self.sigOptionsChanged.emit(self, {'visible': s})
def treeChangeBlocker(self):
"""
Return an object that can be used to temporarily block and accumulate
sigTreeStateChanged signals. This is meant to be used when numerous changes are
about to be made to the tree and only one change signal should be
emitted at the end.
Example::
with param.treeChangeBlocker():
param.addChild(...)
param.removeChild(...)
param.setValue(...)
"""
return SignalBlocker(self.blockTreeChangeSignal, self.unblockTreeChangeSignal)
def blockTreeChangeSignal(self):
"""
Used to temporarily block and accumulate tree change signals.
*You must remember to unblock*, so it is advisable to use treeChangeBlocker() instead.
"""
self.blockTreeChangeEmit += 1
def unblockTreeChangeSignal(self):
"""Unblocks enission of sigTreeStateChanged and flushes the changes out through a single signal."""
self.blockTreeChangeEmit -= 1
self.emitTreeChanges()
def treeStateChanged(self, param, changes):
"""
Called when the state of any sub-parameter has changed.
============== ================================================================
**Arguments:**
param The immediate child whose tree state has changed.
note that the change may have originated from a grandchild.
changes List of tuples describing all changes that have been made
in this event: (param, changeDescr, data)
============== ================================================================
This function can be extended to react to tree state changes.
"""
self.treeStateChanges.extend(changes)
self.emitTreeChanges()
def emitTreeChanges(self):
if self.blockTreeChangeEmit == 0:
changes = self.treeStateChanges
self.treeStateChanges = []
if len(changes) > 0:
self.sigTreeStateChanged.emit(self, changes)
class SignalBlocker(object):
def __init__(self, enterFn, exitFn):
self.enterFn = enterFn
self.exitFn = exitFn
def __enter__(self):
self.enterFn()
def __exit__(self, exc_type, exc_value, tb):
self.exitFn()
| 42.833119
| 165
| 0.550574
|
acfd2d205781f0abbb203462608004feb7562b40
| 132,040
|
py
|
Python
|
praw/models/reddit/subreddit.py
|
igosad/praw
|
d1788aa6d7c31a92895df33456ac92241610cf34
|
[
"BSD-2-Clause"
] | null | null | null |
praw/models/reddit/subreddit.py
|
igosad/praw
|
d1788aa6d7c31a92895df33456ac92241610cf34
|
[
"BSD-2-Clause"
] | null | null | null |
praw/models/reddit/subreddit.py
|
igosad/praw
|
d1788aa6d7c31a92895df33456ac92241610cf34
|
[
"BSD-2-Clause"
] | null | null | null |
"""Provide the Subreddit class."""
# pylint: disable=too-many-lines
import socket
from copy import deepcopy
from csv import writer
from io import StringIO
from json import dumps, loads
from os.path import basename, dirname, isfile, join
from typing import List
from urllib.parse import urljoin
from xml.etree.ElementTree import XML
import websocket
from prawcore import Redirect
from ...const import API_PATH, JPEG_HEADER
from ...exceptions import (
ClientException,
InvalidFlairTemplateID,
MediaPostFailed,
RedditAPIException,
TooLargeMediaException,
WebSocketException,
)
from ...util.cache import cachedproperty
from ..listing.generator import ListingGenerator
from ..listing.mixins import SubredditListingMixin
from ..util import permissions_string, stream_generator
from .base import RedditBase
from .emoji import SubredditEmoji
from .inline_media import InlineMedia
from .mixins import FullnameMixin, MessageableMixin
from .modmail import ModmailConversation
from .removal_reasons import SubredditRemovalReasons
from .rules import SubredditRules
from .widgets import SubredditWidgets, WidgetEncoder
from .wikipage import WikiPage
class Subreddit(MessageableMixin, SubredditListingMixin, FullnameMixin, RedditBase):
"""A class for Subreddits.
To obtain an instance of this class for subreddit ``r/redditdev`` execute:
.. code-block:: python
subreddit = reddit.subreddit("redditdev")
While ``r/all`` is not a real subreddit, it can still be treated like one. The
following outputs the titles of the 25 hottest submissions in ``r/all``:
.. code-block:: python
for submission in reddit.subreddit("all").hot(limit=25):
print(submission.title)
Multiple subreddits can be combined with a ``+`` like so:
.. code-block:: python
for submission in reddit.subreddit("redditdev+learnpython").top("all"):
print(submission)
Subreddits can be filtered from combined listings as follows. Note that these
filters are ignored by certain methods, including
:attr:`~praw.models.Subreddit.comments`, :meth:`~praw.models.Subreddit.gilded`, and
:meth:`.SubredditStream.comments`.
.. code-block:: python
for submission in reddit.subreddit("all-redditdev").new():
print(submission)
**Typical Attributes**
This table describes attributes that typically belong to objects of this class.
Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a guarantee that
these attributes will always be present, nor is this list necessarily complete.
========================== =========================================================
Attribute Description
========================== =========================================================
``can_assign_link_flair`` Whether users can assign their own link flair.
``can_assign_user_flair`` Whether users can assign their own user flair.
``created_utc`` Time the subreddit was created, represented in`Unix
Time`_.
``description`` Subreddit description, in Markdown.
``description_html`` Subreddit description, in HTML.
``display_name`` Name of the subreddit.
``id`` ID of the subreddit.
``name`` Fullname of the subreddit.
``over18`` Whether the subreddit is NSFW.
``public_description`` Description of the subreddit, shown in searches and on
the "You must be invited to visit this community" page
(if applicable).
``spoilers_enabled`` Whether the spoiler tag feature is enabled.
``subscribers`` Count of subscribers.
``user_is_banned`` Whether the authenticated user is banned.
``user_is_moderator`` Whether the authenticated user is a moderator.
``user_is_subscriber`` Whether the authenticated user is subscribed.
========================== =========================================================
.. note::
Trying to retrieve attributes of quarantined or private subreddits will result
in a 403 error. Trying to retrieve attributes of a banned subreddit will result
in a 404 error.
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
# pylint: disable=too-many-public-methods
STR_FIELD = "display_name"
MESSAGE_PREFIX = "#"
@staticmethod
def _create_or_update(
_reddit,
allow_images=None,
allow_post_crossposts=None,
allow_top=None,
collapse_deleted_comments=None,
comment_score_hide_mins=None,
description=None,
domain=None,
exclude_banned_modqueue=None,
header_hover_text=None,
hide_ads=None,
lang=None,
key_color=None,
link_type=None,
name=None,
over_18=None,
public_description=None,
public_traffic=None,
show_media=None,
show_media_preview=None,
spam_comments=None,
spam_links=None,
spam_selfposts=None,
spoilers_enabled=None,
sr=None,
submit_link_label=None,
submit_text=None,
submit_text_label=None,
subreddit_type=None,
suggested_comment_sort=None,
title=None,
wiki_edit_age=None,
wiki_edit_karma=None,
wikimode=None,
**other_settings,
):
# pylint: disable=invalid-name,too-many-locals,too-many-arguments
model = {
"allow_images": allow_images,
"allow_post_crossposts": allow_post_crossposts,
"allow_top": allow_top,
"collapse_deleted_comments": collapse_deleted_comments,
"comment_score_hide_mins": comment_score_hide_mins,
"description": description,
"domain": domain,
"exclude_banned_modqueue": exclude_banned_modqueue,
"header-title": header_hover_text, # Remap here - better name
"hide_ads": hide_ads,
"key_color": key_color,
"lang": lang,
"link_type": link_type,
"name": name,
"over_18": over_18,
"public_description": public_description,
"public_traffic": public_traffic,
"show_media": show_media,
"show_media_preview": show_media_preview,
"spam_comments": spam_comments,
"spam_links": spam_links,
"spam_selfposts": spam_selfposts,
"spoilers_enabled": spoilers_enabled,
"sr": sr,
"submit_link_label": submit_link_label,
"submit_text": submit_text,
"submit_text_label": submit_text_label,
"suggested_comment_sort": suggested_comment_sort,
"title": title,
"type": subreddit_type,
"wiki_edit_age": wiki_edit_age,
"wiki_edit_karma": wiki_edit_karma,
"wikimode": wikimode,
}
model.update(other_settings)
_reddit.post(API_PATH["site_admin"], data=model)
@staticmethod
def _subreddit_list(subreddit, other_subreddits):
if other_subreddits:
return ",".join([str(subreddit)] + [str(x) for x in other_subreddits])
return str(subreddit)
@staticmethod
def _validate_gallery(images):
for image in images:
image_path = image.get("image_path", "")
if image_path:
if not isfile(image_path):
raise TypeError(f"{image_path!r} is not a valid image path.")
else:
raise TypeError("'image_path' is required.")
if not len(image.get("caption", "")) <= 180:
raise TypeError("Caption must be 180 characters or less.")
@staticmethod
def _validate_inline_media(inline_media: InlineMedia):
if not isfile(inline_media.path):
raise ValueError(f"{inline_media.path!r} is not a valid file path.")
@property
def _kind(self) -> str:
"""Return the class's kind."""
return self._reddit.config.kinds["subreddit"]
@cachedproperty
def banned(self):
"""Provide an instance of :class:`.SubredditRelationship`.
For example to ban a user try:
.. code-block:: python
reddit.subreddit("SUBREDDIT").banned.add("NAME", ban_reason="...")
To list the banned users along with any notes, try:
.. code-block:: python
for ban in reddit.subreddit("SUBREDDIT").banned():
print(f'{ban}: {ban.note}')
"""
return SubredditRelationship(self, "banned")
@cachedproperty
def collections(self):
r"""Provide an instance of :class:`.SubredditCollections`.
To see the permalinks of all :class:`.Collection`\ s that belong to a subreddit,
try:
.. code-block:: python
for collection in reddit.subreddit("SUBREDDIT").collections:
print(collection.permalink)
To get a specific :class:`.Collection` by its UUID or permalink, use one of the
following:
.. code-block:: python
collection = reddit.subreddit("SUBREDDIT").collections("some_uuid")
collection = reddit.subreddit("SUBREDDIT").collections(
permalink='https://reddit.com/r/SUBREDDIT/collection/some_uuid'
)
"""
return self._subreddit_collections_class(self._reddit, self)
@cachedproperty
def contributor(self):
"""Provide an instance of :class:`.ContributorRelationship`.
Contributors are also known as approved submitters.
To add a contributor try:
.. code-block:: python
reddit.subreddit("SUBREDDIT").contributor.add("NAME")
"""
return ContributorRelationship(self, "contributor")
@cachedproperty
def emoji(self):
"""Provide an instance of :class:`.SubredditEmoji`.
This attribute can be used to discover all emoji for a subreddit:
.. code-block:: python
for emoji in reddit.subreddit("iama").emoji:
print(emoji)
A single emoji can be lazily retrieved via:
.. code-block:: python
reddit.subreddit("blah").emoji["emoji_name"]
.. note::
Attempting to access attributes of an nonexistent emoji will result in a
:class:`.ClientException`.
"""
return SubredditEmoji(self)
@cachedproperty
def filters(self):
"""Provide an instance of :class:`.SubredditFilters`.
For example, to add a filter, run:
.. code-block:: python
reddit.subreddit("all").filters.add("subreddit_name")
"""
return SubredditFilters(self)
@cachedproperty
def flair(self):
"""Provide an instance of :class:`.SubredditFlair`.
Use this attribute for interacting with a subreddit's flair. For example to list
all the flair for a subreddit which you have the ``flair`` moderator permission
on try:
.. code-block:: python
for flair in reddit.subreddit("NAME").flair():
print(flair)
Flair templates can be interacted with through this attribute via:
.. code-block:: python
for template in reddit.subreddit("NAME").flair.templates:
print(template)
"""
return SubredditFlair(self)
@cachedproperty
def mod(self):
"""Provide an instance of :class:`.SubredditModeration`.
For example, to accept a moderation invite from subreddit ``r/test``:
.. code-block:: python
reddit.subreddit("test").mod.accept_invite()
"""
return SubredditModeration(self)
@cachedproperty
def moderator(self):
"""Provide an instance of :class:`.ModeratorRelationship`.
For example to add a moderator try:
.. code-block:: python
reddit.subreddit("SUBREDDIT").moderator.add("NAME")
To list the moderators along with their permissions try:
.. code-block:: python
for moderator in reddit.subreddit("SUBREDDIT").moderator():
print(f'{moderator}: {moderator.mod_permissions}')
"""
return ModeratorRelationship(self, "moderator")
@cachedproperty
def modmail(self):
"""Provide an instance of :class:`.Modmail`.
For example, to send a new modmail from the subreddit ``r/test`` to user
``u/spez`` with the subject ``test`` along with a message body of ``hello``:
.. code-block:: python
reddit.subreddit("test").modmail.create("test", "hello", "spez")
"""
return Modmail(self)
@cachedproperty
def muted(self):
"""Provide an instance of :class:`.SubredditRelationship`.
For example, muted users can be iterated through like so:
.. code-block:: python
for mute in reddit.subreddit("redditdev").muted():
print(f'{mute}: {mute.note}')
"""
return SubredditRelationship(self, "muted")
@cachedproperty
def quaran(self):
"""Provide an instance of :class:`.SubredditQuarantine`.
This property is named ``quaran`` because ``quarantine`` is a Subreddit
attribute returned by Reddit to indicate whether or not a Subreddit is
quarantined.
To opt-in into a quarantined subreddit:
.. code-block:: python
reddit.subreddit("test").quaran.opt_in()
"""
return SubredditQuarantine(self)
@cachedproperty
def rules(self):
"""Provide an instance of :class:`.SubredditRules`.
Use this attribute for interacting with a subreddit's rules.
For example, to list all the rules for a subreddit:
.. code-block:: python
for rule in reddit.subreddit("AskReddit").rules:
print(rule)
Moderators can also add rules to the subreddit. For example, to make a rule
called ``"No spam"`` in the subreddit ``"NAME"``:
.. code-block:: python
reddit.subreddit("NAME").rules.mod.add(
short_name="No spam", kind="all", description="Do not spam. Spam bad"
)
"""
return SubredditRules(self)
@cachedproperty
def stream(self):
"""Provide an instance of :class:`.SubredditStream`.
Streams can be used to indefinitely retrieve new comments made to a subreddit,
like:
.. code-block:: python
for comment in reddit.subreddit("iama").stream.comments():
print(comment)
Additionally, new submissions can be retrieved via the stream. In the following
example all submissions are fetched via the special subreddit ``r/all``:
.. code-block:: python
for submission in reddit.subreddit("all").stream.submissions():
print(submission)
"""
return SubredditStream(self)
@cachedproperty
def stylesheet(self):
"""Provide an instance of :class:`.SubredditStylesheet`.
For example, to add the css data ``.test{color:blue}`` to the existing
stylesheet:
.. code-block:: python
subreddit = reddit.subreddit("SUBREDDIT")
stylesheet = subreddit.stylesheet()
stylesheet += ".test{color:blue}"
subreddit.stylesheet.update(stylesheet)
"""
return SubredditStylesheet(self)
@cachedproperty
def widgets(self):
"""Provide an instance of :class:`.SubredditWidgets`.
**Example usage**
Get all sidebar widgets:
.. code-block:: python
for widget in reddit.subreddit("redditdev").widgets.sidebar:
print(widget)
Get ID card widget:
.. code-block:: python
print(reddit.subreddit("redditdev").widgets.id_card)
"""
return SubredditWidgets(self)
@cachedproperty
def wiki(self):
"""Provide an instance of :class:`.SubredditWiki`.
This attribute can be used to discover all wikipages for a subreddit:
.. code-block:: python
for wikipage in reddit.subreddit("iama").wiki:
print(wikipage)
To fetch the content for a given wikipage try:
.. code-block:: python
wikipage = reddit.subreddit("iama").wiki["proof"]
print(wikipage.content_md)
"""
return SubredditWiki(self)
def __init__(self, reddit, display_name=None, _data=None):
"""Initialize a Subreddit instance.
:param reddit: An instance of :class:`~.Reddit`.
:param display_name: The name of the subreddit.
.. note::
This class should not be initialized directly. Instead obtain an instance
via: ``reddit.subreddit("subreddit_name")``
"""
if (display_name, _data).count(None) != 1:
raise TypeError("Either `display_name` or `_data` must be provided.")
if display_name:
self.display_name = display_name
super().__init__(reddit, _data=_data)
self._path = API_PATH["subreddit"].format(subreddit=self)
def _convert_to_fancypants(self, markdown_text: str):
"""Convert a Markdown string to a dict for use with the ``richtext_json`` param.
:param markdown_text: A Markdown string to convert.
:returns: A dict in ``richtext_json`` format.
"""
text_data = {"output_mode": "rtjson", "markdown_text": markdown_text}
return self._reddit.post(API_PATH["convert_rte_body"], text_data)["output"]
def _fetch_info(self):
return "subreddit_about", {"subreddit": self}, None
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
other = type(self)(self._reddit, _data=data)
self.__dict__.update(other.__dict__)
self._fetched = True
def _parse_xml_response(self, response):
"""Parse the XML from a response and raise any errors found."""
xml = response.text
root = XML(xml)
tags = [element.tag for element in root]
if tags[:4] == ["Code", "Message", "ProposedSize", "MaxSizeAllowed"]:
# Returned if image is too big
code, message, actual, maximum_size = [element.text for element in root[:4]]
raise TooLargeMediaException(int(maximum_size), int(actual))
def _submit_media(self, data, timeout, websocket_url=None):
"""Submit and return an `image`, `video`, or `videogif`.
This is a helper method for submitting posts that are not link posts or self
posts.
"""
connection = None
if websocket_url is not None:
try:
connection = websocket.create_connection(websocket_url, timeout=timeout)
except (
websocket.WebSocketException,
socket.error,
BlockingIOError,
) as ws_exception:
raise WebSocketException(
"Error establishing websocket connection.", ws_exception
)
self._reddit.post(API_PATH["submit"], data=data)
if connection is None:
return
try:
ws_update = loads(connection.recv())
connection.close()
except (
websocket.WebSocketException,
socket.error,
BlockingIOError,
) as ws_exception:
raise WebSocketException(
"Websocket error. Check your media file. "
"Your post may still have been created.",
ws_exception,
)
if ws_update.get("type") == "failed":
raise MediaPostFailed
url = ws_update["payload"]["redirect"]
return self._reddit.submission(url=url)
def _upload_media(self, media_path, expected_mime_prefix=None, upload_type="link"):
"""Upload media and return its URL and a websocket (Undocumented endpoint).
:param expected_mime_prefix: If provided, enforce that the media has a mime type
that starts with the provided prefix.
:param upload_type: One of ``link``, ``gallery'', or ``selfpost``. (default:
``link``)
:returns: A tuple containing ``(media_url, websocket_url)`` for the
piece of media. The websocket URL can be used to determine when
media processing is finished, or it can be ignored.
"""
if media_path is None:
media_path = join(
dirname(dirname(dirname(__file__))), "images", "PRAW logo.png"
)
file_name = basename(media_path).lower()
file_extension = file_name.rpartition(".")[2]
mime_type = {
"png": "image/png",
"mov": "video/quicktime",
"mp4": "video/mp4",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"gif": "image/gif",
}.get(
file_extension, "image/jpeg"
) # default to JPEG
if (
expected_mime_prefix is not None
and mime_type.partition("/")[0] != expected_mime_prefix
):
raise ClientException(
f"Expected a mimetype starting with {expected_mime_prefix!r} but got mimetype {mime_type!r} (from file extension {file_extension!r})."
)
img_data = {"filepath": file_name, "mimetype": mime_type}
url = API_PATH["media_asset"]
# until we learn otherwise, assume this request always succeeds
upload_response = self._reddit.post(url, data=img_data)
upload_lease = upload_response["args"]
upload_url = f"https:{upload_lease['action']}"
upload_data = {item["name"]: item["value"] for item in upload_lease["fields"]}
with open(media_path, "rb") as media:
response = self._reddit._core._requestor._http.post(
upload_url, data=upload_data, files={"file": media}
)
if not response.ok:
self._parse_xml_response(response)
response.raise_for_status()
websocket_url = upload_response["asset"]["websocket_url"]
if upload_type == "link":
return f"{upload_url}/{upload_data['key']}", websocket_url
else:
return upload_response["asset"]["asset_id"], websocket_url
def _upload_inline_media(self, inline_media: InlineMedia):
"""Upload media for use in self posts and return ``inline_media``.
:param inline_media: An :class:`.InlineMedia` object to validate and upload.
"""
self._validate_inline_media(inline_media)
inline_media.media_id = self._upload_media(
inline_media.path, upload_type="selfpost"
)[0]
return inline_media
def post_requirements(self):
"""Get the post requirements for a subreddit.
:returns: A dict with the various requirements.
The returned dict contains the following keys:
* ``domain_blacklist``
* ``body_restriction_policy``
* ``domain_whitelist``
* ``title_regexes``
* ``body_blacklisted_strings``
* ``body_required_strings``
* ``title_text_min_length``
* ``is_flair_required``
* ``title_text_max_length``
* ``body_regexes``
* ``link_repost_age``
* ``body_text_min_length``
* ``link_restriction_policy``
* ``body_text_max_length``
* ``title_required_strings``
* ``title_blacklisted_strings``
* ``guidelines_text``
* ``guidelines_display_policy``
For example, to fetch the post requirements for ``r/test``:
.. code-block:: python
print(reddit.subreddit("test").post_requirements)
"""
return self._reddit.get(
API_PATH["post_requirements"].format(subreddit=str(self))
)
def random(self):
"""Return a random Submission.
Returns ``None`` on subreddits that do not support the random feature. One
example, at the time of writing, is ``r/wallpapers``.
For example, to get a random submission off of ``r/AskReddit``:
.. code-block:: python
submission = reddit.subreddit("AskReddit").random()
print(submission.title)
"""
url = API_PATH["subreddit_random"].format(subreddit=self)
try:
self._reddit.get(url, params={"unique": self._reddit._next_unique})
except Redirect as redirect:
path = redirect.path
try:
return self._submission_class(
self._reddit, url=urljoin(self._reddit.config.reddit_url, path)
)
except ClientException:
return None
def search(
self,
query,
sort="relevance",
syntax="lucene",
time_filter="all",
**generator_kwargs,
):
"""Return a :class:`.ListingGenerator` for items that match ``query``.
:param query: The query string to search for.
:param sort: Can be one of: relevance, hot, top, new, comments. (default:
relevance).
:param syntax: Can be one of: cloudsearch, lucene, plain (default: lucene).
:param time_filter: Can be one of: all, day, hour, month, week, year (default:
all).
For more information on building a search query see:
https://www.reddit.com/wiki/search
For example to search all subreddits for ``praw`` try:
.. code-block:: python
for submission in reddit.subreddit("all").search("praw"):
print(submission.title)
"""
self._validate_time_filter(time_filter)
not_all = self.display_name.lower() != "all"
self._safely_add_arguments(
generator_kwargs,
"params",
q=query,
restrict_sr=not_all,
sort=sort,
syntax=syntax,
t=time_filter,
)
url = API_PATH["search"].format(subreddit=self)
return ListingGenerator(self._reddit, url, **generator_kwargs)
def sticky(self, number=1):
"""Return a Submission object for a sticky of the subreddit.
:param number: Specify which sticky to return. 1 appears at the top (default:
1).
Raises ``prawcore.NotFound`` if the sticky does not exist.
For example, to get the stickied post on the subreddit ``r/test``:
.. code-block:: python
reddit.subreddit("test").sticky()
"""
url = API_PATH["about_sticky"].format(subreddit=self)
try:
self._reddit.get(url, params={"num": number})
except Redirect as redirect:
path = redirect.path
return self._submission_class(
self._reddit, url=urljoin(self._reddit.config.reddit_url, path)
)
def submit(
self,
title,
selftext=None,
url=None,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
collection_id=None,
discussion_type=None,
inline_media=None,
): # noqa: D301
"""Add a submission to the subreddit.
:param title: The title of the submission.
:param selftext: The Markdown formatted content for a ``text`` submission. Use
an empty string, ``""``, to make a title-only submission.
:param url: The URL for a ``link`` submission.
:param collection_id: The UUID of a :class:`.Collection` to add the newly-
submitted post to.
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is True, this
value will set a custom text (default: None).
:param resubmit: When False, an error will occur if the URL has already been
submitted (default: True).
:param send_replies: When True, messages will be sent to the submission author
when comments are made to the submission (default: True).
:param nsfw: Whether or not the submission should be marked NSFW (default:
False).
:param spoiler: Whether or not the submission should be marked as a spoiler
(default: False).
:param discussion_type: Set to ``CHAT`` to enable live discussion instead of
traditional comments (default: None).
:param inline_media: A dict of :class:`.InlineMedia` objects where the key is
the placeholder name in ``selftext``.
:returns: A :class:`~.Submission` object for the newly created submission.
Either ``selftext`` or ``url`` can be provided, but not both.
For example to submit a URL to ``r/reddit_api_test`` do:
.. code-block:: python
title = "PRAW documentation"
url = 'https://praw.readthedocs.io'
reddit.subreddit("reddit_api_test").submit(title, url=url)
For example to submit a self post with inline media do:
.. code-block:: python
from praw.models import InlineGif, InlineImage, InlineVideo
gif = InlineGif("path/to/image.gif", "optional caption")
image = InlineImage("path/to/image.jpg", "optional caption")
video = InlineVideo("path/to/video.mp4", "optional caption")
selftext = "Text with a gif {gif1} an image {image1} and a video {video1} inline"
media = {'gif1': gif, 'image1': image, 'video1': video}
reddit.subreddit('redditdev').submit('title', selftext=selftext, inline_media=media)
.. note::
Inserted media will have a padding of `\n\n` automatically added. This due
to the weirdness with Reddit's API. Using the example above the result
selftext body will look like so:
.. code-block::
Text with a gif

an image

and video

inline
.. seealso ::
* :meth:`.submit_image` to submit images
* :meth:`.submit_video` to submit videos and videogifs
* :meth:`.submit_poll` to submit polls
* :meth:`.submit_gallery`. to submit more than one image in the same post
"""
if (bool(selftext) or selftext == "") == bool(url):
raise TypeError("Either `selftext` or `url` must be provided.")
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
"validate_on_submit": self._reddit.validate_on_submit,
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
("discussion_type", discussion_type),
):
if value is not None:
data[key] = value
if selftext is not None:
data.update(kind="self")
if inline_media:
body = selftext.format(
**{
placeholder: self._upload_inline_media(media)
for placeholder, media in inline_media.items()
}
)
converted = self._convert_to_fancypants(body)
data.update(richtext_json=dumps(converted))
else:
data.update(text=selftext)
else:
data.update(kind="link", url=url)
return self._reddit.post(API_PATH["submit"], data=data)
def submit_gallery(
self,
title,
images,
*,
collection_id=None,
discussion_type=None,
flair_id=None,
flair_text=None,
nsfw=False,
send_replies=True,
spoiler=False,
):
"""Add an image gallery submission to the subreddit.
:param title: The title of the submission.
:param images: The images to post in dict with the following structure:
``{"image_path": "path", "caption": "caption", "outbound_url": "url"}``,
only ``"image_path"`` is required.
:param collection_id: The UUID of a :class:`.Collection` to add the newly-
submitted post to.
:param discussion_type: Set to ``CHAT`` to enable live discussion instead of
traditional comments (default: None).
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value isTrue, this
value will set a custom text (default: None).
:param nsfw: Whether or not the submission should be marked NSFW (default:
False).
:param send_replies: When True, messages will be sent to the submission author
when comments are made to the submission (default: True).
:param spoiler: Whether or not the submission should be marked asa spoiler
(default: False).
:returns: A :class:`.Submission` object for the newly created submission.
If ``image_path`` in ``images`` refers to a file that is not an image, PRAW will
raise a :class:`.ClientException`.
For example to submit an image gallery to ``r/reddit_api_test`` do:
.. code-block:: python
title = "My favorite pictures"
image = "/path/to/image.png"
image2 = "/path/to/image2.png"
image3 = "/path/to/image3.png"
images = [
{"image_path": image},
{
"image_path": image2,
"caption": "Image caption 2",
},
{
"image_path": image3,
"caption": "Image caption 3",
"outbound_url": "https://example.com/link3",
},
]
reddit.subreddit("reddit_api_test").submit_gallery(title, images)
.. seealso ::
* :meth:`.submit` to submit url posts and selftexts
* :meth:`.submit_image`. to submit single images
* :meth:`.submit_poll` to submit polls
* :meth:`.submit_video`. to submit videos and videogifs
"""
self._validate_gallery(images)
data = {
"api_type": "json",
"items": [],
"nsfw": bool(nsfw),
"sendreplies": bool(send_replies),
"show_error_list": True,
"spoiler": bool(spoiler),
"sr": str(self),
"title": title,
"validate_on_submit": self._reddit.validate_on_submit,
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
("discussion_type", discussion_type),
):
if value is not None:
data[key] = value
for image in images:
data["items"].append(
{
"caption": image.get("caption", ""),
"outbound_url": image.get("outbound_url", ""),
"media_id": self._upload_media(
image["image_path"],
expected_mime_prefix="image",
upload_type="gallery",
)[0],
}
)
response = self._reddit.request(
"POST", API_PATH["submit_gallery_post"], json=data
)["json"]
if response["errors"]:
raise RedditAPIException(response["errors"])
else:
return self._reddit.submission(url=response["data"]["url"])
def submit_image(
self,
title,
image_path,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
timeout=10,
collection_id=None,
without_websockets=False,
discussion_type=None,
):
"""Add an image submission to the subreddit.
:param title: The title of the submission.
:param image_path: The path to an image, to upload and post.
:param collection_id: The UUID of a :class:`.Collection` to add the newly-
submitted post to.
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is True, this
value will set a custom text (default: None).
:param resubmit: When False, an error will occur if the URL has already been
submitted (default: True).
:param send_replies: When True, messages will be sent to the submission author
when comments are made to the submission (default: True).
:param nsfw: Whether or not the submission should be marked NSFW (default:
False).
:param spoiler: Whether or not the submission should be marked as a spoiler
(default: False).
:param timeout: Specifies a particular timeout, in seconds. Use to avoid
"Websocket error" exceptions (default: 10).
:param without_websockets: Set to ``True`` to disable use of WebSockets (see
note below for an explanation). If ``True``, this method doesn't return
anything. (default: ``False``).
:param discussion_type: Set to ``CHAT`` to enable live discussion instead of
traditional comments (default: None).
:returns: A :class:`.Submission` object for the newly created submission,
unless ``without_websockets`` is ``True``.
If ``image_path`` refers to a file that is not an image, PRAW will raise a
:class:`.ClientException`.
.. note::
Reddit's API uses WebSockets to respond with the link of the newly created
post. If this fails, the method will raise :class:`.WebSocketException`.
Occasionally, the Reddit post will still be created. More often, there is an
error with the image file. If you frequently get exceptions but successfully
created posts, try setting the ``timeout`` parameter to a value above 10.
To disable the use of WebSockets, set ``without_websockets=True``. This will
make the method return ``None``, though the post will still be created. You
may wish to do this if you are running your program in a restricted network
environment, or using a proxy that doesn't support WebSockets connections.
For example to submit an image to ``r/reddit_api_test`` do:
.. code-block:: python
title = "My favorite picture"
image = "/path/to/image.png"
reddit.subreddit("reddit_api_test").submit_image(title, image)
.. seealso ::
* :meth:`.submit` to submit url posts and selftexts
* :meth:`.submit_video`. to submit videos and videogifs
* :meth:`.submit_gallery`. to submit more than one image in the same post
"""
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
"validate_on_submit": self._reddit.validate_on_submit,
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
("discussion_type", discussion_type),
):
if value is not None:
data[key] = value
image_url, websocket_url = self._upload_media(
image_path, expected_mime_prefix="image"
)
data.update(kind="image", url=image_url)
if without_websockets:
websocket_url = None
return self._submit_media(
data,
timeout,
websocket_url=websocket_url,
)
def submit_poll(
self,
title: str,
selftext: str,
options: List[str],
duration: int,
flair_id: str = None,
flair_text: str = None,
resubmit: bool = True,
send_replies: bool = True,
nsfw: bool = False,
spoiler: bool = False,
collection_id: str = None,
discussion_type: str = None,
):
"""Add a poll submission to the subreddit.
:param title: The title of the submission.
:param selftext: The Markdown formatted content for the submission. Use an empty
string, ``""``, to make a submission with no text contents.
:param options: A ``list`` of two to six poll options as ``str``.
:param duration: The number of days the poll should accept votes, as an ``int``.
Valid values are between ``1`` and ``7``, inclusive.
:param collection_id: The UUID of a :class:`.Collection` to add the newly-
submitted post to.
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is True, this
value will set a custom text (default: None).
:param resubmit: When False, an error will occur if the URL has already been
submitted (default: True).
:param send_replies: When True, messages will be sent to the submission author
when comments are made to the submission (default: True).
:param nsfw: Whether or not the submission should be marked NSFW (default:
False).
:param spoiler: Whether or not the submission should be marked as a spoiler
(default: False).
:param discussion_type: Set to ``CHAT`` to enable live discussion instead of
traditional comments (default: None).
:returns: A :class:`~.Submission` object for the newly created submission.
For example to submit a poll to ``r/reddit_api_test`` do:
.. code-block:: python
title = "Do you like PRAW?"
reddit.subreddit("reddit_api_test").submit_poll(
title, selftext="", options=["Yes", "No"], duration=3
)
"""
data = {
"sr": str(self),
"text": selftext,
"options": options,
"duration": duration,
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
"validate_on_submit": self._reddit.validate_on_submit,
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
("discussion_type", discussion_type),
):
if value is not None:
data[key] = value
return self._reddit.post(API_PATH["submit_poll_post"], json=data)
def submit_video(
self,
title,
video_path,
videogif=False,
thumbnail_path=None,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
timeout=10,
collection_id=None,
without_websockets=False,
discussion_type=None,
):
"""Add a video or videogif submission to the subreddit.
:param title: The title of the submission.
:param video_path: The path to a video, to upload and post.
:param videogif: A ``bool`` value. If ``True``, the video is uploaded as a
videogif, which is essentially a silent video (default: ``False``).
:param thumbnail_path: (Optional) The path to an image, to be uploaded and used
as the thumbnail for this video. If not provided, the PRAW logo will be used
as the thumbnail.
:param collection_id: The UUID of a :class:`.Collection` to add the newly-
submitted post to.
:param flair_id: The flair template to select (default: ``None``).
:param flair_text: If the template's ``flair_text_editable`` value is True, this
value will set a custom text (default: ``None``).
:param resubmit: When False, an error will occur if the URL has already been
submitted (default: ``True``).
:param send_replies: When True, messages will be sent to the submission author
when comments are made to the submission (default: ``True``).
:param nsfw: Whether or not the submission should be marked NSFW (default:
False).
:param spoiler: Whether or not the submission should be marked as a spoiler
(default: False).
:param timeout: Specifies a particular timeout, in seconds. Use to avoid
"Websocket error" exceptions (default: 10).
:param without_websockets: Set to ``True`` to disable use of WebSockets (see
note below for an explanation). If ``True``, this method doesn't return
anything. (default: ``False``).
:param discussion_type: Set to ``CHAT`` to enable live discussion instead of
traditional comments (default: None).
:returns: A :class:`.Submission` object for the newly created submission, unless
``without_websockets`` is ``True``.
If ``video_path`` refers to a file that is not a video, PRAW will
raise a :class:`.ClientException`.
.. note::
Reddit's API uses WebSockets to respond with the link of the newly created
post. If this fails, the method will raise :class:`.WebSocketException`.
Occasionally, the Reddit post will still be created. More often, there is an
error with the image file. If you frequently get exceptions but successfully
created posts, try setting the ``timeout`` parameter to a value above 10.
To disable the use of WebSockets, set ``without_websockets=True``. This will
make the method return ``None``, though the post will still be created. You
may wish to do this if you are running your program in a restricted network
environment, or using a proxy that doesn't support WebSockets connections.
For example to submit a video to ``r/reddit_api_test`` do:
.. code-block:: python
title = "My favorite movie"
video = "/path/to/video.mp4"
reddit.subreddit("reddit_api_test").submit_video(title, video)
.. seealso ::
* :meth:`.submit` to submit url posts and selftexts
* :meth:`.submit_image` to submit images
* :meth:`.submit_gallery`. to submit more than one image in the same post
"""
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
"validate_on_submit": self._reddit.validate_on_submit,
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
("discussion_type", discussion_type),
):
if value is not None:
data[key] = value
video_url, websocket_url = self._upload_media(
video_path, expected_mime_prefix="video"
)
data.update(
kind="videogif" if videogif else "video",
url=video_url,
# if thumbnail_path is None, it uploads the PRAW logo
video_poster_url=self._upload_media(thumbnail_path)[0],
)
if without_websockets:
websocket_url = None
return self._submit_media(
data,
timeout,
websocket_url=websocket_url,
)
def subscribe(self, other_subreddits=None):
"""Subscribe to the subreddit.
:param other_subreddits: When provided, also subscribe to the provided list of
subreddits.
For example, to subscribe to ``r/test``:
.. code-block:: python
reddit.subreddit("test").subscribe()
"""
data = {
"action": "sub",
"skip_inital_defaults": True,
"sr_name": self._subreddit_list(self, other_subreddits),
}
self._reddit.post(API_PATH["subscribe"], data=data)
def traffic(self):
"""Return a dictionary of the subreddit's traffic statistics.
Raises ``prawcore.NotFound`` when the traffic stats aren't available to the
authenticated user, that is, they are not public and the authenticated user is
not a moderator of the subreddit.
The traffic method returns a dict with three keys. The keys are ``day``,
``hour`` and ``month``. Each key contains a list of lists with 3 or 4 values.
The first value is a timestamp indicating the start of the category (start of
the day for the ``day`` key, start of the hour for the ``hour`` key, etc.). The
second, third, and fourth values indicate the unique pageviews, total pageviews,
and subscribers, respectively.
.. note::
The ``hour`` key does not contain subscribers, and therefore each sub-list
contains three values.
For example, to get the traffic stats for ``r/test``:
.. code-block:: python
stats = reddit.subreddit("test").traffic()
"""
return self._reddit.get(API_PATH["about_traffic"].format(subreddit=self))
def unsubscribe(self, other_subreddits=None):
"""Unsubscribe from the subreddit.
:param other_subreddits: When provided, also unsubscribe from the provided list
of subreddits.
To unsubscribe from ``r/test``:
.. code-block:: python
reddit.subreddit("test").unsubscribe()
"""
data = {
"action": "unsub",
"sr_name": self._subreddit_list(self, other_subreddits),
}
self._reddit.post(API_PATH["subscribe"], data=data)
WidgetEncoder._subreddit_class = Subreddit
class SubredditFilters:
"""Provide functions to interact with the special Subreddit's filters.
Members of this class should be utilized via ``Subreddit.filters``. For example, to
add a filter, run:
.. code-block:: python
reddit.subreddit("all").filters.add("subreddit_name")
"""
def __init__(self, subreddit):
"""Create a SubredditFilters instance.
:param subreddit: The special subreddit whose filters to work with.
As of this writing filters can only be used with the special subreddits ``all``
and ``mod``.
"""
self.subreddit = subreddit
def __iter__(self):
"""Iterate through the special subreddit's filters.
This method should be invoked as:
.. code-block:: python
for subreddit in reddit.subreddit("NAME").filters:
...
"""
url = API_PATH["subreddit_filter_list"].format(
special=self.subreddit, user=self.subreddit._reddit.user.me()
)
params = {"unique": self.subreddit._reddit._next_unique}
response_data = self.subreddit._reddit.get(url, params=params)
for subreddit in response_data.subreddits:
yield subreddit
def add(self, subreddit):
"""Add ``subreddit`` to the list of filtered subreddits.
:param subreddit: The subreddit to add to the filter list.
Items from subreddits added to the filtered list will no longer be included when
obtaining listings for ``r/all``.
Alternatively, you can filter a subreddit temporarily from a special listing in
a manner like so:
.. code-block:: python
reddit.subreddit("all-redditdev-learnpython")
Raises ``prawcore.NotFound`` when calling on a non-special subreddit.
"""
url = API_PATH["subreddit_filter"].format(
special=self.subreddit,
user=self.subreddit._reddit.user.me(),
subreddit=subreddit,
)
self.subreddit._reddit.put(url, data={"model": dumps({"name": str(subreddit)})})
def remove(self, subreddit):
"""Remove ``subreddit`` from the list of filtered subreddits.
:param subreddit: The subreddit to remove from the filter list.
Raises ``prawcore.NotFound`` when calling on a non-special subreddit.
"""
url = API_PATH["subreddit_filter"].format(
special=self.subreddit,
user=self.subreddit._reddit.user.me(),
subreddit=str(subreddit),
)
self.subreddit._reddit.delete(url)
class SubredditFlair:
"""Provide a set of functions to interact with a Subreddit's flair."""
@cachedproperty
def link_templates(self):
"""Provide an instance of :class:`.SubredditLinkFlairTemplates`.
Use this attribute for interacting with a subreddit's link flair templates. For
example to list all the link flair templates for a subreddit which you have the
``flair`` moderator permission on try:
.. code-block:: python
for template in reddit.subreddit("NAME").flair.link_templates:
print(template)
"""
return SubredditLinkFlairTemplates(self.subreddit)
@cachedproperty
def templates(self):
"""Provide an instance of :class:`.SubredditRedditorFlairTemplates`.
Use this attribute for interacting with a subreddit's flair templates. For
example to list all the flair templates for a subreddit which you have the
``flair`` moderator permission on try:
.. code-block:: python
for template in reddit.subreddit("NAME").flair.templates:
print(template)
"""
return SubredditRedditorFlairTemplates(self.subreddit)
def __call__(self, redditor=None, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for Redditors and their flairs.
:param redditor: When provided, yield at most a single :class:`~.Redditor`
instance (default: None).
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
Usage:
.. code-block:: python
for flair in reddit.subreddit("NAME").flair(limit=None):
print(flair)
"""
Subreddit._safely_add_arguments(generator_kwargs, "params", name=redditor)
generator_kwargs.setdefault("limit", None)
url = API_PATH["flairlist"].format(subreddit=self.subreddit)
return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs)
def __init__(self, subreddit):
"""Create a SubredditFlair instance.
:param subreddit: The subreddit whose flair to work with.
"""
self.subreddit = subreddit
def configure(
self,
position="right",
self_assign=False,
link_position="left",
link_self_assign=False,
**settings,
):
"""Update the subreddit's flair configuration.
:param position: One of left, right, or False to disable (default: right).
:param self_assign: (boolean) Permit self assignment of user flair (default:
False).
:param link_position: One of left, right, or False to disable (default: left).
:param link_self_assign: (boolean) Permit self assignment of link flair
(default: False).
Additional keyword arguments can be provided to handle new settings as Reddit
introduces them.
"""
data = {
"flair_enabled": bool(position),
"flair_position": position or "right",
"flair_self_assign_enabled": self_assign,
"link_flair_position": link_position or "",
"link_flair_self_assign_enabled": link_self_assign,
}
data.update(settings)
url = API_PATH["flairconfig"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def delete(self, redditor):
"""Delete flair for a Redditor.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
.. seealso::
:meth:`~praw.models.reddit.subreddit.SubredditFlair.update` to delete the
flair of many Redditors at once.
"""
url = API_PATH["deleteflair"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"name": str(redditor)})
def delete_all(self):
"""Delete all Redditor flair in the Subreddit.
:returns: List of dictionaries indicating the success or failure of each delete.
"""
return self.update(x["user"] for x in self())
def set(self, redditor, text="", css_class="", flair_template_id=None):
"""Set flair for a Redditor.
:param redditor: (Required) A redditor name (e.g., ``"spez"``) or
:class:`~.Redditor` instance.
:param text: The flair text to associate with the Redditor or Submission
(default: "").
:param css_class: The css class to associate with the flair html ((default:
"")). Use either this or ``flair_template_id``.
:param flair_template_id: The ID of the flair template to be used (default:
``None``). Use either this or ``css_class``.
This method can only be used by an authenticated user who is a moderator of the
associated Subreddit.
For example:
.. code-block:: python
reddit.subreddit("redditdev").flair.set("bboe", "PRAW author", css_class="mods")
template = "6bd28436-1aa7-11e9-9902-0e05ab0fad46"
reddit.subreddit("redditdev").flair.set(
"spez", "Reddit CEO", flair_template_id=template
)
"""
if css_class and flair_template_id is not None:
raise TypeError(
"Parameter `css_class` cannot be used in "
"conjunction with `flair_template_id`."
)
data = {"name": str(redditor), "text": text}
if flair_template_id is not None:
data["flair_template_id"] = flair_template_id
url = API_PATH["select_flair"].format(subreddit=self.subreddit)
else:
data["css_class"] = css_class
url = API_PATH["flair"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def update(self, flair_list, text="", css_class=""):
"""Set or clear the flair for many Redditors at once.
:param flair_list: Each item in this list should be either: the name of a
Redditor, an instance of :class:`.Redditor`, or a dictionary mapping keys
``user``, ``flair_text``, and ``flair_css_class`` to their respective
values. The ``user`` key should map to a Redditor, as described above. When
a dictionary isn't provided, or the dictionary is missing one of
``flair_text``, or ``flair_css_class`` attributes the default values will
come from the the following arguments.
:param text: The flair text to use when not explicitly provided in
``flair_list`` ((default: "")).
:param css_class: The css class to use when not explicitly provided in
``flair_list`` ((default: "")).
:returns: List of dictionaries indicating the success or failure of each update.
For example to clear the flair text, and set the ``praw`` flair css class on a
few users try:
.. code-block:: python
subreddit.flair.update(["bboe", "spez", "spladug"], css_class="praw")
"""
templines = StringIO()
for item in flair_list:
if isinstance(item, dict):
writer(templines).writerow(
[
str(item["user"]),
item.get("flair_text", text),
item.get("flair_css_class", css_class),
]
)
else:
writer(templines).writerow([str(item), text, css_class])
lines = templines.getvalue().splitlines()
templines.close()
response = []
url = API_PATH["flaircsv"].format(subreddit=self.subreddit)
while lines:
data = {"flair_csv": "\n".join(lines[:100])}
response.extend(self.subreddit._reddit.post(url, data=data))
lines = lines[100:]
return response
class SubredditFlairTemplates:
"""Provide functions to interact with a Subreddit's flair templates."""
@staticmethod
def flair_type(is_link):
"""Return LINK_FLAIR or USER_FLAIR depending on ``is_link`` value."""
return "LINK_FLAIR" if is_link else "USER_FLAIR"
def __init__(self, subreddit):
"""Create a SubredditFlairTemplate instance.
:param subreddit: The subreddit whose flair templates to work with.
.. note::
This class should not be initialized directly. Instead obtain an instance
via: ``reddit.subreddit("subreddit_name").flair.templates`` or
``reddit.subreddit("subreddit_name").flair.link_templates``.
"""
self.subreddit = subreddit
def __iter__(self):
"""Abstract method to return flair templates."""
raise NotImplementedError()
def _add(
self,
text,
css_class="",
text_editable=False,
is_link=None,
background_color=None,
text_color=None,
mod_only=None,
allowable_content=None,
max_emojis=None,
):
url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit)
data = {
"allowable_content": allowable_content,
"background_color": background_color,
"css_class": css_class,
"flair_type": self.flair_type(is_link),
"max_emojis": max_emojis,
"mod_only": bool(mod_only),
"text": text,
"text_color": text_color,
"text_editable": bool(text_editable),
}
self.subreddit._reddit.post(url, data=data)
def _clear(self, is_link=None):
url = API_PATH["flairtemplateclear"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"flair_type": self.flair_type(is_link)})
def delete(self, template_id):
"""Remove a flair template provided by ``template_id``.
For example, to delete the first Redditor flair template listed, try:
.. code-block:: python
template_info = list(subreddit.flair.templates)[0]
subreddit.flair.templates.delete(template_info["id"])
"""
url = API_PATH["flairtemplatedelete"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"flair_template_id": template_id})
def update(
self,
template_id,
text=None,
css_class=None,
text_editable=None,
background_color=None,
text_color=None,
mod_only=None,
allowable_content=None,
max_emojis=None,
fetch=True,
):
"""Update the flair template provided by ``template_id``.
:param template_id: The flair template to update. If not valid then an exception
will be thrown.
:param text: The flair template's new text (required).
:param css_class: The flair template's new css_class ((default: "")).
:param text_editable: (boolean) Indicate if the flair text can be modified for
each Redditor that sets it (default: False).
:param background_color: The flair template's new background color, as a hex
color.
:param text_color: The flair template's new text color, either ``"light"`` or
``"dark"``.
:param mod_only: (boolean) Indicate if the flair can only be used by moderators.
:param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``,
or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then
the ``"text"`` param must be a valid emoji string, for example ``":snoo:"``.
:param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value
to 10).
:param fetch: Whether or not PRAW will fetch existing information on the
existing flair before updating (Default: True).
.. warning::
If parameter ``fetch`` is set to ``False``, all parameters not provided will
be reset to default (``None`` or ``False``) values.
For example to make a user flair template text_editable, try:
.. code-block:: python
template_info = list(subreddit.flair.templates)[0]
subreddit.flair.templates.update(
template_info["id"], template_info["flair_text"], text_editable=True
)
"""
url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit)
data = {
"allowable_content": allowable_content,
"background_color": background_color,
"css_class": css_class,
"flair_template_id": template_id,
"max_emojis": max_emojis,
"mod_only": mod_only,
"text": text,
"text_color": text_color,
"text_editable": text_editable,
}
if fetch:
_existing_data = [
template for template in iter(self) if template["id"] == template_id
]
if len(_existing_data) != 1:
raise InvalidFlairTemplateID(template_id)
else:
existing_data = _existing_data[0]
for key, value in existing_data.items():
if data.get(key) is None:
data[key] = value
self.subreddit._reddit.post(url, data=data)
class SubredditRedditorFlairTemplates(SubredditFlairTemplates):
"""Provide functions to interact with Redditor flair templates."""
def __iter__(self):
"""Iterate through the user flair templates.
For example:
.. code-block:: python
for template in reddit.subreddit("NAME").flair.templates:
print(template)
"""
url = API_PATH["user_flair"].format(subreddit=self.subreddit)
params = {"unique": self.subreddit._reddit._next_unique}
for template in self.subreddit._reddit.get(url, params=params):
yield template
def add(
self,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
allowable_content=None,
max_emojis=None,
):
"""Add a Redditor flair template to the associated subreddit.
:param text: The flair template's text (required).
:param css_class: The flair template's css_class ((default: "")).
:param text_editable: (boolean) Indicate if the flair text can be modified for
each Redditor that sets it (default: False).
:param background_color: The flair template's new background color, as a hex
color.
:param text_color: The flair template's new text color, either ``"light"`` or
``"dark"``.
:param mod_only: (boolean) Indicate if the flair can only be used by moderators.
:param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``,
or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then
the ``"text"`` param must be a valid emoji string, for example ``":snoo:"``.
:param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value
to 10).
For example, to add an editable Redditor flair try:
.. code-block:: python
reddit.subreddit("NAME").flair.templates.add(css_class="praw", text_editable=True)
"""
self._add(
text,
css_class=css_class,
text_editable=text_editable,
is_link=False,
background_color=background_color,
text_color=text_color,
mod_only=mod_only,
allowable_content=allowable_content,
max_emojis=max_emojis,
)
def clear(self):
"""Remove all Redditor flair templates from the subreddit.
For example:
.. code-block:: python
reddit.subreddit("NAME").flair.templates.clear()
"""
self._clear(is_link=False)
class SubredditLinkFlairTemplates(SubredditFlairTemplates):
"""Provide functions to interact with link flair templates."""
def __iter__(self):
"""Iterate through the link flair templates.
For example:
.. code-block:: python
for template in reddit.subreddit("NAME").flair.link_templates:
print(template)
"""
url = API_PATH["link_flair"].format(subreddit=self.subreddit)
for template in self.subreddit._reddit.get(url):
yield template
def add(
self,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
allowable_content=None,
max_emojis=None,
):
"""Add a link flair template to the associated subreddit.
:param text: The flair template's text (required).
:param css_class: The flair template's css_class ((default: "")).
:param text_editable: (boolean) Indicate if the flair text can be modified for
each Redditor that sets it (default: False).
:param background_color: The flair template's new background color, as a hex
color.
:param text_color: The flair template's new text color, either ``"light"`` or
``"dark"``.
:param mod_only: (boolean) Indicate if the flair can only be used by moderators.
:param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``,
or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then
the ``"text"`` param must be a valid emoji string, for example ``":snoo:"``.
:param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value
to 10).
For example, to add an editable link flair try:
.. code-block:: python
reddit.subreddit("NAME").flair.link_templates.add(css_class="praw", text_editable=True)
"""
self._add(
text,
css_class=css_class,
text_editable=text_editable,
is_link=True,
background_color=background_color,
text_color=text_color,
mod_only=mod_only,
allowable_content=allowable_content,
max_emojis=max_emojis,
)
def clear(self):
"""Remove all link flair templates from the subreddit.
For example:
.. code-block:: python
reddit.subreddit("NAME").flair.link_templates.clear()
"""
self._clear(is_link=True)
class SubredditModeration:
"""Provides a set of moderation functions to a Subreddit.
For example, to accept a moderation invite from subreddit ``r/test``:
.. code-block:: python
reddit.subreddit("test").mod.accept_invite()
"""
@staticmethod
def _handle_only(only, generator_kwargs):
if only is not None:
if only == "submissions":
only = "links"
RedditBase._safely_add_arguments(generator_kwargs, "params", only=only)
def __init__(self, subreddit):
"""Create a SubredditModeration instance.
:param subreddit: The subreddit to moderate.
"""
self.subreddit = subreddit
self._stream = None
def accept_invite(self):
"""Accept an invitation as a moderator of the community."""
url = API_PATH["accept_mod_invite"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def edited(self, only=None, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for edited comments and submissions.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print all items in the edited queue try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.edited(limit=None):
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_edited"].format(subreddit=self.subreddit),
**generator_kwargs,
)
def inbox(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for moderator messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
.. seealso::
:meth:`~.unread` for unread moderator messages.
To print the last 5 moderator mail messages and their replies, try:
.. code-block:: python
for message in reddit.subreddit("mod").mod.inbox(limit=5):
print(f"From: {message.author}, Body: {message.body}")
for reply in message.replies:
print(f"From: {reply.author}, Body: {reply.body}")
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["moderator_messages"].format(subreddit=self.subreddit),
**generator_kwargs,
)
def log(self, action=None, mod=None, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for moderator log entries.
:param action: If given, only return log entries for the specified action.
:param mod: If given, only return log entries for actions made by the passed in
Redditor.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the moderator and subreddit of the last 5 modlog entries try:
.. code-block:: python
for log in reddit.subreddit("mod").mod.log(limit=5):
print(f"Mod: {log.mod}, Subreddit: {log.subreddit}")
"""
params = {"mod": str(mod) if mod else mod, "type": action}
Subreddit._safely_add_arguments(generator_kwargs, "params", **params)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_log"].format(subreddit=self.subreddit),
**generator_kwargs,
)
def modqueue(self, only=None, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for modqueue items.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print all modqueue items try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.modqueue(limit=None):
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_modqueue"].format(subreddit=self.subreddit),
**generator_kwargs,
)
@cachedproperty
def stream(self):
"""Provide an instance of :class:`.SubredditModerationStream`.
Streams can be used to indefinitely retrieve Moderator only items from
:class:`.SubredditModeration` made to moderated subreddits, like:
.. code-block:: python
for log in reddit.subreddit("mod").mod.stream.log():
print(f"Mod: {log.mod}, Subreddit: {log.subreddit}")
"""
return SubredditModerationStream(self.subreddit)
@cachedproperty
def removal_reasons(self):
"""Provide an instance of :class:`.SubredditRemovalReasons`.
Use this attribute for interacting with a subreddit's removal reasons. For
example to list all the removal reasons for a subreddit which you have the
``posts`` moderator permission on, try:
.. code-block:: python
for removal_reason in reddit.subreddit("NAME").mod.removal_reasons:
print(removal_reason)
A single removal reason can be lazily retrieved via:
.. code-block:: python
reddit.subreddit("NAME").mod.removal_reasons["reason_id"]
.. note::
Attempting to access attributes of an nonexistent removal reason will result
in a :class:`.ClientException`.
"""
return SubredditRemovalReasons(self.subreddit)
def reports(self, only=None, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for reported comments and submissions.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the user and mod report reasons in the report queue try:
.. code-block:: python
for reported_item in reddit.subreddit("mod").mod.reports():
print(f"User Reports: {reported_item.user_reports}")
print(f"Mod Reports: {reported_item.mod_reports}")
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_reports"].format(subreddit=self.subreddit),
**generator_kwargs,
)
def settings(self):
"""Return a dictionary of the subreddit's current settings."""
url = API_PATH["subreddit_settings"].format(subreddit=self.subreddit)
return self.subreddit._reddit.get(url)["data"]
def spam(self, only=None, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for spam comments and submissions.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the items in the spam queue try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.spam():
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_spam"].format(subreddit=self.subreddit),
**generator_kwargs,
)
def unmoderated(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for unmoderated submissions.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the items in the unmoderated queue try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.unmoderated():
print(item)
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_unmoderated"].format(subreddit=self.subreddit),
**generator_kwargs,
)
def unread(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for unread moderator messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
.. seealso::
:meth:`inbox` for all messages.
To print the mail in the unread modmail queue try:
.. code-block:: python
for message in reddit.subreddit("mod").mod.unread():
print(f"From: {message.author}, To: {message.dest}")
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["moderator_unread"].format(subreddit=self.subreddit),
**generator_kwargs,
)
def update(self, **settings):
"""Update the subreddit's settings.
See https://www.reddit.com/dev/api#POST_api_site_admin for the full list.
:param all_original_content: Mandate all submissions to be original content
only.
:param allow_chat_post_creation: Allow users to create chat submissions.
:param allow_images: Allow users to upload images using the native image
hosting.
:param allow_polls: Allow users to post polls to the subreddit.
:param allow_post_crossposts: Allow users to crosspost submissions from other
subreddits.
:param allow_top: Allow the subreddit to appear on ``r/all`` as well as the
default and trending lists.
:param allow_videos: Allow users to upload videos using the native image
hosting.
:param collapse_deleted_comments: Collapse deleted and removed comments on
comments pages by default.
:param crowd_control_chat_level: Controls the crowd control level for chat
rooms. Goes from 0-3.
:param crowd_control_level: Controls the crowd control level for submissions.
Goes from 0-3.
:param crowd_control_mode: Enables/disables crowd control.
:param comment_score_hide_mins: The number of minutes to hide comment scores.
:param description: Shown in the sidebar of your subreddit.
:param disable_contributor_requests: Specifies whether redditors may send
automated modmail messages requesting approval as a submitter.
:param exclude_banned_modqueue: Exclude posts by site-wide banned users from
modqueue/unmoderated.
:param free_form_reports: Allow users to specify custom reasons in the report
menu.
:param header-title: The text seen when hovering over the snoo.
:param hide_ads: Don't show ads within this subreddit. Only applies to Premium-
user only subreddits.
:param key_color: A 6-digit rgb hex color (e.g. ``"#AABBCC"``), used as a
thematic color for your subreddit on mobile.
:param lang: A valid IETF language tag (underscore separated).
:param link_type: The types of submissions users can make. One of ``any``,
``link``, ``self``.
:param original_content_tag_enabled: Enables the use of the ``original content``
label for submissions.
:param over_18: Viewers must be over 18 years old (i.e. NSFW).
:param public_description: Public description blurb. Appears in search results
and on the landing page for private subreddits.
:param public_traffic: Make the traffic stats page public.
:param restrict_commenting: Specifies whether approved users have the ability to
comment.
:param restrict_posting: Specifies whether approved users have the ability to
submit posts.
:param show_media: Show thumbnails on submissions.
:param show_media_preview: Expand media previews on comments pages.
:param spam_comments: Spam filter strength for comments. One of ``all``,
``low``, ``high``.
:param spam_links: Spam filter strength for links. One of ``all``, ``low``,
``high``.
:param spam_selfposts: Spam filter strength for selfposts. One of ``all``,
``low``, ``high``.
:param spoilers_enabled: Enable marking posts as containing spoilers.
:param submit_link_label: Custom label for submit link button (None for
default).
:param submit_text: Text to show on submission page.
:param submit_text_label: Custom label for submit text post button (None for
default).
:param suggested_comment_sort: All comment threads will use this sorting method
by default. Leave None, or choose one of ``confidence``, ``controversial``,
``live``, ``new``, ``old``, ``qa``, ``random``, ``top``.
:param title: The title of the subreddit.
:param type: One of ``archived``, ``employees_only``, ``gold_only``,
``gold_restricted``, ``private``, ``public``, ``restricted``.
:param welcome_message_enabled: Enables the subreddit welcome message.
:param welcome_message_text: The text to be used as a welcome message. A welcome
message is sent to all new subscribers by a Reddit bot.
:param wiki_edit_age: Account age, in days, required to edit and create wiki
pages.
:param wiki_edit_karma: Subreddit karma required to edit and create wiki pages.
:param wikimode: One of ``anyone``, ``disabled``, ``modonly``.
Additional keyword arguments can be provided to handle new settings as Reddit
introduces them.
Settings that are documented here and aren't explicitly set by you in a call to
:meth:`.SubredditModeration.update` should retain their current value. If they
do not please file a bug.
"""
settings["sr"] = self.subreddit.fullname
return self.subreddit._reddit.patch(API_PATH["update_settings"], json=settings)
class SubredditModerationStream:
"""Provides moderator streams."""
def __init__(self, subreddit):
"""Create a SubredditModerationStream instance.
:param subreddit: The moderated subreddit associated with the streams.
"""
self.subreddit = subreddit
def edited(self, only=None, **stream_options):
"""Yield edited comments and submissions as they become available.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Keyword arguments are passed to :func:`.stream_generator`.
For example, to retrieve all new edited submissions/comments made to all
moderated subreddits, try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.stream.edited():
print(item)
"""
return stream_generator(self.subreddit.mod.edited, only=only, **stream_options)
def log(self, action=None, mod=None, **stream_options):
"""Yield moderator log entries as they become available.
:param action: If given, only return log entries for the specified action.
:param mod: If given, only return log entries for actions made by the passed in
Redditor.
For example, to retrieve all new mod actions made to all moderated subreddits,
try:
.. code-block:: python
for log in reddit.subreddit("mod").mod.stream.log():
print(f"Mod: {log.mod}, Subreddit: {log.subreddit}")
"""
return stream_generator(
self.subreddit.mod.log,
action=action,
mod=mod,
attribute_name="id",
**stream_options,
)
def modmail_conversations(
self, other_subreddits=None, sort=None, state=None, **stream_options
):
"""Yield new-modmail conversations as they become available.
:param other_subreddits: A list of :class:`.Subreddit` instances for
which to fetch conversations (default: None).
:param sort: Can be one of: mod, recent, unread, user
(default: recent).
:param state: Can be one of: all, appeals, archived, default, highlighted,
inbox, inprogress, mod, new, notifications (default: all). "all" does not
include mod or archived conversations. "inbox" does not include appeals
conversations.
Keyword arguments are passed to :func:`.stream_generator`.
To print new mail in the unread modmail queue try:
.. code-block:: python
subreddit = reddit.subreddit("all")
for message in subreddit.mod.stream.modmail_conversations():
print(f"From: {message.owner}, To: {message.participant}")
""" # noqa: E501
if self.subreddit == "mod":
self.subreddit = self.subreddit._reddit.subreddit("all")
return stream_generator(
self.subreddit.modmail.conversations,
other_subreddits=other_subreddits,
sort=sort,
state=state,
attribute_name="id",
exclude_before=True,
**stream_options,
)
def modqueue(self, only=None, **stream_options):
"""Yield comments/submissions in the modqueue as they become available.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Keyword arguments are passed to :func:`.stream_generator`.
To print all new modqueue items try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.stream.modqueue():
print(item)
"""
return stream_generator(
self.subreddit.mod.modqueue, only=only, **stream_options
)
def reports(self, only=None, **stream_options):
"""Yield reported comments and submissions as they become available.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Keyword arguments are passed to :func:`.stream_generator`.
To print new user and mod report reasons in the report queue try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.stream.reports():
print(item)
"""
return stream_generator(self.subreddit.mod.reports, only=only, **stream_options)
def spam(self, only=None, **stream_options):
"""Yield spam comments and submissions as they become available.
:param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield
only results of that type.
Keyword arguments are passed to :func:`.stream_generator`.
To print new items in the spam queue try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.stream.spam():
print(item)
"""
return stream_generator(self.subreddit.mod.spam, only=only, **stream_options)
def unmoderated(self, **stream_options):
"""Yield unmoderated submissions as they become available.
Keyword arguments are passed to :func:`.stream_generator`.
To print new items in the unmoderated queue try:
.. code-block:: python
for item in reddit.subreddit("mod").mod.stream.unmoderated():
print(item)
"""
return stream_generator(self.subreddit.mod.unmoderated, **stream_options)
def unread(self, **stream_options):
"""Yield unread old modmail messages as they become available.
Keyword arguments are passed to :func:`.stream_generator`.
.. seealso::
:meth:`~.inbox` for all messages.
To print new mail in the unread modmail queue try:
.. code-block:: python
for message in reddit.subreddit("mod").mod.stream.unread():
print(f"From: {message.author}, To: {message.dest}")
"""
return stream_generator(self.subreddit.mod.unread, **stream_options)
class SubredditQuarantine:
"""Provides subreddit quarantine related methods.
To opt-in into a quarantined subreddit:
.. code-block:: python
reddit.subreddit("test").quaran.opt_in()
"""
def __init__(self, subreddit):
"""Create a SubredditQuarantine instance.
:param subreddit: The subreddit associated with the quarantine.
"""
self.subreddit = subreddit
def opt_in(self):
"""Permit your user access to the quarantined subreddit.
Usage:
.. code-block:: python
subreddit = reddit.subreddit("QUESTIONABLE")
next(subreddit.hot()) # Raises prawcore.Forbidden
subreddit.quaran.opt_in()
next(subreddit.hot()) # Returns Submission
"""
data = {"sr_name": self.subreddit}
try:
self.subreddit._reddit.post(API_PATH["quarantine_opt_in"], data=data)
except Redirect:
pass
def opt_out(self):
"""Remove access to the quarantined subreddit.
Usage:
.. code-block:: python
subreddit = reddit.subreddit("QUESTIONABLE")
next(subreddit.hot()) # Returns Submission
subreddit.quaran.opt_out()
next(subreddit.hot()) # Raises prawcore.Forbidden
"""
data = {"sr_name": self.subreddit}
try:
self.subreddit._reddit.post(API_PATH["quarantine_opt_out"], data=data)
except Redirect:
pass
class SubredditRelationship:
"""Represents a relationship between a redditor and subreddit.
Instances of this class can be iterated through in order to discover the Redditors
that make up the relationship.
For example, banned users of a subreddit can be iterated through like so:
.. code-block:: python
for ban in reddit.subreddit("redditdev").banned():
print(f'{ban}: {ban.note}')
"""
def __call__(self, redditor=None, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for Redditors in the relationship.
:param redditor: When provided, yield at most a single :class:`~.Redditor`
instance. This is useful to confirm if a relationship exists, or to fetch
the metadata associated with a particular relationship (default: None).
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
Subreddit._safely_add_arguments(generator_kwargs, "params", user=redditor)
url = API_PATH[f"list_{self.relationship}"].format(subreddit=self.subreddit)
return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs)
def __init__(self, subreddit, relationship):
"""Create a SubredditRelationship instance.
:param subreddit: The subreddit for the relationship.
:param relationship: The name of the relationship.
"""
self.relationship = relationship
self.subreddit = subreddit
def add(self, redditor, **other_settings):
"""Add ``redditor`` to this relationship.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
"""
data = {"name": str(redditor), "type": self.relationship}
data.update(other_settings)
url = API_PATH["friend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def remove(self, redditor):
"""Remove ``redditor`` from this relationship.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
"""
data = {"name": str(redditor), "type": self.relationship}
url = API_PATH["unfriend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
class ContributorRelationship(SubredditRelationship):
"""Provides methods to interact with a Subreddit's contributors.
Contributors are also known as approved submitters.
Contributors of a subreddit can be iterated through like so:
.. code-block:: python
for contributor in reddit.subreddit("redditdev").contributor():
print(contributor)
"""
def leave(self):
"""Abdicate the contributor position."""
self.subreddit._reddit.post(
API_PATH["leavecontributor"], data={"id": self.subreddit.fullname}
)
class ModeratorRelationship(SubredditRelationship):
"""Provides methods to interact with a Subreddit's moderators.
Moderators of a subreddit can be iterated through like so:
.. code-block:: python
for moderator in reddit.subreddit("redditdev").moderator():
print(moderator)
"""
PERMISSIONS = {"access", "config", "flair", "mail", "posts", "wiki"}
@staticmethod
def _handle_permissions(permissions, other_settings):
other_settings = deepcopy(other_settings) if other_settings else {}
other_settings["permissions"] = permissions_string(
permissions, ModeratorRelationship.PERMISSIONS
)
return other_settings
def __call__(self, redditor=None): # pylint: disable=arguments-differ
"""Return a list of Redditors who are moderators.
:param redditor: When provided, return a list containing at most one
:class:`~.Redditor` instance. This is useful to confirm if a relationship
exists, or to fetch the metadata associated with a particular relationship
(default: None).
.. note::
Unlike other relationship callables, this relationship is not paginated.
Thus it simply returns the full list, rather than an iterator for the
results.
To be used like:
.. code-block:: python
moderators = reddit.subreddit("nameofsub").moderator()
For example, to list the moderators along with their permissions try:
.. code-block:: python
for moderator in reddit.subreddit("SUBREDDIT").moderator():
print(f'{moderator}: {moderator.mod_permissions}')
"""
params = {} if redditor is None else {"user": redditor}
url = API_PATH[f"list_{self.relationship}"].format(subreddit=self.subreddit)
return self.subreddit._reddit.get(url, params=params)
# pylint: disable=arguments-differ
def add(self, redditor, permissions=None, **other_settings):
"""Add or invite ``redditor`` to be a moderator of the subreddit.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
:param permissions: When provided (not ``None``), permissions should be a list
of strings specifying which subset of permissions to grant. An empty list
``[]`` indicates no permissions, and when not provided ``None``, indicates
full permissions.
An invite will be sent unless the user making this call is an admin user.
For example, to invite ``"spez"`` with ``"posts"`` and ``"mail"`` permissions to
``r/test``, try:
.. code-block:: python
reddit.subreddit("test").moderator.add("spez", ["posts", "mail"])
"""
other_settings = self._handle_permissions(permissions, other_settings)
super().add(redditor, **other_settings)
# pylint: enable=arguments-differ
def invite(self, redditor, permissions=None, **other_settings):
"""Invite ``redditor`` to be a moderator of the subreddit.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
:param permissions: When provided (not ``None``), permissions should be a list
of strings specifying which subset of permissions to grant. An empty list
``[]`` indicates no permissions, and when not provided ``None``, indicates
full permissions.
For example, to invite ``"spez"`` with ``posts`` and ``mail`` permissions to
``r/test``, try:
.. code-block:: python
reddit.subreddit("test").moderator.invite("spez", ["posts", "mail"])
"""
data = self._handle_permissions(permissions, other_settings)
data.update({"name": str(redditor), "type": "moderator_invite"})
url = API_PATH["friend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def invited(self, redditor=None, fetch_all=False, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for Redditors invited to be moderators.
:param redditor: When provided, return a list containing at most one
:class:`~.Redditor` instance. This is useful to confirm if a relationship
exists, or to fetch the metadata associated with a particular relationship
(default: None).
:param fetch_all: If True, all invited moderators are fetched.
(default: False)
.. note::
If True, requests will be made until all invited moderators are fetched.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
.. note::
Unlike other usages of :class:`.ListingGenerator`, ``limit`` has no effect in
the quantity returned. This endpoint always returns moderators in batches of
25 at a time regardless of what ``limit`` is set to.
Usage:
.. code-block:: python
for invited_mod in reddit.subreddit("NAME").moderator.invited():
print(invited_mod)
"""
generator_kwargs["params"] = {"username": redditor} if redditor else None
url = API_PATH["list_invited_moderator"].format(subreddit=self.subreddit)
generator = ListingGenerator(self.subreddit._reddit, url, **generator_kwargs)
return [mod for mod in generator] if fetch_all else generator
def leave(self):
"""Abdicate the moderator position (use with care).
For example:
.. code-block:: python
reddit.subreddit("subredditname").moderator.leave()
"""
self.remove(
self.subreddit._reddit.config.username or self.subreddit._reddit.user.me()
)
def remove_invite(self, redditor):
"""Remove the moderator invite for ``redditor``.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
For example:
.. code-block:: python
reddit.subreddit("subredditname").moderator.remove_invite("spez")
"""
data = {"name": str(redditor), "type": "moderator_invite"}
url = API_PATH["unfriend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def update(self, redditor, permissions=None):
"""Update the moderator permissions for ``redditor``.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
:param permissions: When provided (not ``None``), permissions should be a list
of strings specifying which subset of permissions to grant. An empty list
``[]`` indicates no permissions, and when not provided, ``None``, indicates
full permissions.
For example, to add all permissions to the moderator, try:
.. code-block:: python
subreddit.moderator.update("spez")
To remove all permissions from the moderator, try:
.. code-block:: python
subreddit.moderator.update("spez", [])
"""
url = API_PATH["setpermissions"].format(subreddit=self.subreddit)
data = self._handle_permissions(
permissions, {"name": str(redditor), "type": "moderator"}
)
self.subreddit._reddit.post(url, data=data)
def update_invite(self, redditor, permissions=None):
"""Update the moderator invite permissions for ``redditor``.
:param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor`
instance.
:param permissions: When provided (not ``None``), permissions should be a list
of strings specifying which subset of permissions to grant. An empty list
``[]`` indicates no permissions, and when not provided, ``None``, indicates
full permissions.
For example, to grant the ``flair``` and ``mail``` permissions to the moderator
invite, try:
.. code-block:: python
subreddit.moderator.update_invite("spez", ["flair", "mail"])
"""
url = API_PATH["setpermissions"].format(subreddit=self.subreddit)
data = self._handle_permissions(
permissions, {"name": str(redditor), "type": "moderator_invite"}
)
self.subreddit._reddit.post(url, data=data)
class Modmail:
"""Provides modmail functions for a subreddit.
For example, to send a new modmail from the subreddit ``r/test`` to user ``u/spez``
with the subject ``test`` along with a message body of ``hello``:
.. code-block:: python
reddit.subreddit("test").modmail.create("test", "hello", "spez")
"""
def __call__(self, id=None, mark_read=False): # noqa: D207, D301
"""Return an individual conversation.
:param id: A reddit base36 conversation ID, e.g., ``2gmz``.
:param mark_read: If True, conversation is marked as read (default: False).
For example:
.. code-block:: python
reddit.subreddit("redditdev").modmail("2gmz", mark_read=True)
To print all messages from a conversation as Markdown source:
.. code-block:: python
conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True)
for message in conversation.messages:
print(message.body_markdown)
``ModmailConversation.user`` is a special instance of :class:`.Redditor` with
extra attributes describing the non-moderator user's recent posts, comments, and
modmail messages within the subreddit, as well as information on active bans and
mutes. This attribute does not exist on internal moderator discussions.
For example, to print the user's ban status:
.. code-block:: python
conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True)
print(conversation.user.ban_status)
To print a list of recent submissions by the user:
.. code-block:: python
conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True)
print(conversation.user.recent_posts)
"""
# pylint: disable=invalid-name,redefined-builtin
return ModmailConversation(self.subreddit._reddit, id=id, mark_read=mark_read)
def __init__(self, subreddit):
"""Construct an instance of the Modmail object."""
self.subreddit = subreddit
def _build_subreddit_list(self, other_subreddits):
"""Return a comma-separated list of subreddit display names."""
subreddits = [self.subreddit] + (other_subreddits or [])
return ",".join(str(subreddit) for subreddit in subreddits)
def bulk_read(self, other_subreddits=None, state=None):
"""Mark conversations for subreddit(s) as read.
Due to server-side restrictions, "all" is not a valid subreddit for this method.
Instead, use :meth:`~.Modmail.subreddits` to get a list of subreddits using the
new modmail.
:param other_subreddits: A list of :class:`.Subreddit` instances for which to
mark conversations (default: None).
:param state: Can be one of: all, archived, highlighted, inprogress, mod, new,
notifications, or appeals, (default: all). "all" does not include internal,
archived, or appeals conversations.
:returns: A list of :class:`.ModmailConversation` instances that were marked read.
For example, to mark all notifications for a subreddit as read:
.. code-block:: python
subreddit = reddit.subreddit("redditdev")
subreddit.modmail.bulk_read(state="notifications")
"""
params = {"entity": self._build_subreddit_list(other_subreddits)}
if state:
params["state"] = state
response = self.subreddit._reddit.post(
API_PATH["modmail_bulk_read"], params=params
)
return [
self(conversation_id) for conversation_id in response["conversation_ids"]
]
def conversations(
self, after=None, limit=None, other_subreddits=None, sort=None, state=None
): # noqa: D207, D301
"""Generate :class:`.ModmailConversation` objects for subreddit(s).
:param after: A base36 modmail conversation id. When provided, the listing
begins after this conversation (default: None).
:param limit: The maximum number of conversations to fetch. If None, the server-
side default is 25 at the time of writing (default: None).
:param other_subreddits: A list of :class:`.Subreddit` instances for which to
fetch conversations (default: None).
:param sort: Can be one of: mod, recent, unread, user (default: recent).
:param state: Can be one of: all, archived, highlighted, inprogress, mod, new,
notifications, or appeals, (default: all). "all" does not include internal,
archived, or appeals conversations.
For example:
.. code-block:: python
conversations = reddit.subreddit("all").modmail.conversations(state="mod")
"""
params = {}
if self.subreddit != "all":
params["entity"] = self._build_subreddit_list(other_subreddits)
for name, value in {
"after": after,
"limit": limit,
"sort": sort,
"state": state,
}.items():
if value:
params[name] = value
response = self.subreddit._reddit.get(
API_PATH["modmail_conversations"], params=params
)
for conversation_id in response["conversationIds"]:
data = {
"conversation": response["conversations"][conversation_id],
"messages": response["messages"],
}
yield ModmailConversation.parse(
data, self.subreddit._reddit, convert_objects=False
)
def create(self, subject, body, recipient, author_hidden=False):
"""Create a new modmail conversation.
:param subject: The message subject. Cannot be empty.
:param body: The message body. Cannot be empty.
:param recipient: The recipient; a username or an instance of
:class:`.Redditor`.
:param author_hidden: When True, author is hidden from non-moderators (default:
False).
:returns: A :class:`.ModmailConversation` object for the newly created
conversation.
.. code-block:: python
subreddit = reddit.subreddit("redditdev")
redditor = reddit.redditor("bboe")
subreddit.modmail.create("Subject", "Body", redditor)
"""
data = {
"body": body,
"isAuthorHidden": author_hidden,
"srName": self.subreddit,
"subject": subject,
"to": recipient,
}
return self.subreddit._reddit.post(API_PATH["modmail_conversations"], data=data)
def subreddits(self):
"""Yield subreddits using the new modmail that the user moderates.
For example:
.. code-block:: python
subreddits = reddit.subreddit("all").modmail.subreddits()
"""
response = self.subreddit._reddit.get(API_PATH["modmail_subreddits"])
for value in response["subreddits"].values():
subreddit = self.subreddit._reddit.subreddit(value["display_name"])
subreddit.last_updated = value["lastUpdated"]
yield subreddit
def unread_count(self):
"""Return unread conversation count by conversation state.
At time of writing, possible states are: archived, highlighted, inprogress, mod,
new, notifications, or appeals.
:returns: A dict mapping conversation states to unread counts.
For example, to print the count of unread moderator discussions:
.. code-block:: python
subreddit = reddit.subreddit("redditdev")
unread_counts = subreddit.modmail.unread_count()
print(unread_counts["mod"])
"""
return self.subreddit._reddit.get(API_PATH["modmail_unread_count"])
class SubredditStream:
"""Provides submission and comment streams."""
def __init__(self, subreddit):
"""Create a SubredditStream instance.
:param subreddit: The subreddit associated with the streams.
"""
self.subreddit = subreddit
def comments(self, **stream_options):
"""Yield new comments as they become available.
Comments are yielded oldest first. Up to 100 historical comments will initially
be returned.
Keyword arguments are passed to :func:`.stream_generator`.
.. note::
While PRAW tries to catch all new comments, some high-volume streams,
especially the r/all stream, may drop some comments.
For example, to retrieve all new comments made to the ``iama`` subreddit, try:
.. code-block:: python
for comment in reddit.subreddit("iama").stream.comments():
print(comment)
To only retrieve new submissions starting when the stream is created, pass
``skip_existing=True``:
.. code-block:: python
subreddit = reddit.subreddit("iama")
for comment in subreddit.stream.comments(skip_existing=True):
print(comment)
"""
return stream_generator(self.subreddit.comments, **stream_options)
def submissions(self, **stream_options):
"""Yield new submissions as they become available.
Submissions are yielded oldest first. Up to 100 historical submissions will
initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
.. note::
While PRAW tries to catch all new submissions, some high-volume streams,
especially the r/all stream, may drop some submissions.
For example to retrieve all new submissions made to all of Reddit, try:
.. code-block:: python
for submission in reddit.subreddit("all").stream.submissions():
print(submission)
"""
return stream_generator(self.subreddit.new, **stream_options)
class SubredditStylesheet:
"""Provides a set of stylesheet functions to a Subreddit.
For example, to add the css data ``.test{color:blue}`` to the existing stylesheet:
.. code-block:: python
subreddit = reddit.subreddit("SUBREDDIT")
stylesheet = subreddit.stylesheet()
stylesheet += ".test{color:blue}"
subreddit.stylesheet.update(stylesheet)
"""
def __call__(self):
"""Return the subreddit's stylesheet.
To be used as:
.. code-block:: python
stylesheet = reddit.subreddit("SUBREDDIT").stylesheet()
"""
url = API_PATH["about_stylesheet"].format(subreddit=self.subreddit)
return self.subreddit._reddit.get(url)
def __init__(self, subreddit):
"""Create a SubredditStylesheet instance.
:param subreddit: The subreddit associated with the stylesheet.
An instance of this class is provided as:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet
"""
self.subreddit = subreddit
def _update_structured_styles(self, style_data):
url = API_PATH["structured_styles"].format(subreddit=self.subreddit)
self.subreddit._reddit.patch(url, style_data)
def _upload_image(self, image_path, data):
with open(image_path, "rb") as image:
header = image.read(len(JPEG_HEADER))
image.seek(0)
data["img_type"] = "jpg" if header == JPEG_HEADER else "png"
url = API_PATH["upload_image"].format(subreddit=self.subreddit)
response = self.subreddit._reddit.post(
url, data=data, files={"file": image}
)
if response["errors"]:
error_type = response["errors"][0]
error_value = response.get("errors_values", [""])[0]
assert error_type in [
"BAD_CSS_NAME",
"IMAGE_ERROR",
], "Please file a bug with PRAW"
raise RedditAPIException([[error_type, error_value, None]])
return response
def _upload_style_asset(self, image_path, image_type):
data = {"imagetype": image_type, "filepath": basename(image_path)}
data["mimetype"] = "image/jpeg"
if image_path.lower().endswith(".png"):
data["mimetype"] = "image/png"
url = API_PATH["style_asset_lease"].format(subreddit=self.subreddit)
upload_lease = self.subreddit._reddit.post(url, data=data)["s3UploadLease"]
upload_data = {item["name"]: item["value"] for item in upload_lease["fields"]}
upload_url = f"https:{upload_lease['action']}"
with open(image_path, "rb") as image:
response = self.subreddit._reddit._core._requestor._http.post(
upload_url, data=upload_data, files={"file": image}
)
response.raise_for_status()
return f"{upload_url}/{upload_data['key']}"
def delete_banner(self):
"""Remove the current subreddit (redesign) banner image.
Succeeds even if there is no banner image.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.delete_banner()
"""
data = {"bannerBackgroundImage": ""}
self._update_structured_styles(data)
def delete_banner_additional_image(self):
"""Remove the current subreddit (redesign) banner additional image.
Succeeds even if there is no additional image. Will also delete any
configured hover image.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.delete_banner_additional_image()
"""
data = {"bannerPositionedImage": "", "secondaryBannerPositionedImage": ""}
self._update_structured_styles(data)
def delete_banner_hover_image(self):
"""Remove the current subreddit (redesign) banner hover image.
Succeeds even if there is no hover image.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.delete_banner_hover_image()
"""
data = {"secondaryBannerPositionedImage": ""}
self._update_structured_styles(data)
def delete_header(self):
"""Remove the current subreddit header image.
Succeeds even if there is no header image.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.delete_header()
"""
url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def delete_image(self, name):
"""Remove the named image from the subreddit.
Succeeds even if the named image does not exist.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.delete_image("smile")
"""
url = API_PATH["delete_sr_image"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"img_name": name})
def delete_mobile_header(self):
"""Remove the current subreddit mobile header.
Succeeds even if there is no mobile header.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.delete_mobile_header()
"""
url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def delete_mobile_icon(self):
"""Remove the current subreddit mobile icon.
Succeeds even if there is no mobile icon.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.delete_mobile_icon()
"""
url = API_PATH["delete_sr_icon"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def update(self, stylesheet, reason=None):
"""Update the subreddit's stylesheet.
:param stylesheet: The CSS for the new stylesheet.
:param reason: The reason for updating the stylesheet.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.update(
"p { color: green; }", "color text green"
)
"""
data = {"op": "save", "reason": reason, "stylesheet_contents": stylesheet}
url = API_PATH["subreddit_stylesheet"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def upload(self, name, image_path):
"""Upload an image to the Subreddit.
:param name: The name to use for the image. If an image already exists with the
same name, it will be replaced.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under the key
``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.RedditAPIException` if there are other issues with the uploaded
image. Unfortunately the exception info might not be very specific, so try
through the website with the same image to see what the problem actually might
be.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.upload("smile", "img.png")
"""
return self._upload_image(image_path, {"name": name, "upload_type": "img"})
def upload_banner(self, image_path):
"""Upload an image for the subreddit's (redesign) banner image.
:param image_path: A path to a jpeg or png image.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.RedditAPIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.upload_banner("banner.png")
"""
image_type = "bannerBackgroundImage"
image_url = self._upload_style_asset(image_path, image_type)
self._update_structured_styles({image_type: image_url})
def upload_banner_additional_image(self, image_path, align=None):
"""Upload an image for the subreddit's (redesign) additional image.
:param image_path: A path to a jpeg or png image.
:param align: Either ``left``, ``centered``, or ``right``. (default: ``left``).
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.RedditAPIException` if there are other issues with the uploaded
image. Unfortunately the exception info might not be very specific, so try
through the website with the same image to see what the problem actually might
be.
For example:
.. code-block:: python
subreddit = reddit.subreddit("SUBREDDIT")
subreddit.stylesheet.upload_banner_additional_image("banner.png")
"""
alignment = {}
if align is not None:
if align not in {"left", "centered", "right"}:
raise ValueError(
"align argument must be either `left`, `centered`, or `right`"
)
alignment["bannerPositionedImagePosition"] = align
image_type = "bannerPositionedImage"
image_url = self._upload_style_asset(image_path, image_type)
style_data = {image_type: image_url}
if alignment:
style_data.update(alignment)
self._update_structured_styles(style_data)
def upload_banner_hover_image(self, image_path):
"""Upload an image for the subreddit's (redesign) additional image.
:param image_path: A path to a jpeg or png image.
Fails if the Subreddit does not have an additional image defined
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.RedditAPIException` if there are other issues with the uploaded
image. Unfortunately the exception info might not be very specific, so try
through the website with the same image to see what the problem actually might
be.
For example:
.. code-block:: python
subreddit = reddit.subreddit("SUBREDDIT")
subreddit.stylesheet.upload_banner_hover_image("banner.png")
"""
image_type = "secondaryBannerPositionedImage"
image_url = self._upload_style_asset(image_path, image_type)
self._update_structured_styles({image_type: image_url})
def upload_header(self, image_path):
"""Upload an image to be used as the Subreddit's header image.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under the key
``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.RedditAPIException` if there are other issues with the uploaded
image. Unfortunately the exception info might not be very specific, so try
through the website with the same image to see what the problem actually might
be.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.upload_header("header.png")
"""
return self._upload_image(image_path, {"upload_type": "header"})
def upload_mobile_header(self, image_path):
"""Upload an image to be used as the Subreddit's mobile header.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under the key
``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.RedditAPIException` if there are other issues with the uploaded
image. Unfortunately the exception info might not be very specific, so try
through the website with the same image to see what the problem actually might
be.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.upload_mobile_header("header.png")
"""
return self._upload_image(image_path, {"upload_type": "banner"})
def upload_mobile_icon(self, image_path):
"""Upload an image to be used as the Subreddit's mobile icon.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under the key
``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.RedditAPIException` if there are other issues with the uploaded
image. Unfortunately the exception info might not be very specific, so try
through the website with the same image to see what the problem actually might
be.
For example:
.. code-block:: python
reddit.subreddit("SUBREDDIT").stylesheet.upload_mobile_icon("icon.png")
"""
return self._upload_image(image_path, {"upload_type": "icon"})
class SubredditWiki:
"""Provides a set of wiki functions to a Subreddit."""
def __getitem__(self, page_name):
"""Lazily return the WikiPage for the subreddit named ``page_name``.
This method is to be used to fetch a specific wikipage, like so:
.. code-block:: python
wikipage = reddit.subreddit("iama").wiki["proof"]
print(wikipage.content_md)
"""
return WikiPage(self.subreddit._reddit, self.subreddit, page_name.lower())
def __init__(self, subreddit):
"""Create a SubredditWiki instance.
:param subreddit: The subreddit whose wiki to work with.
"""
self.banned = SubredditRelationship(subreddit, "wikibanned")
self.contributor = SubredditRelationship(subreddit, "wikicontributor")
self.subreddit = subreddit
def __iter__(self):
"""Iterate through the pages of the wiki.
This method is to be used to discover all wikipages for a subreddit:
.. code-block:: python
for wikipage in reddit.subreddit("iama").wiki:
print(wikipage)
"""
response = self.subreddit._reddit.get(
API_PATH["wiki_pages"].format(subreddit=self.subreddit),
params={"unique": self.subreddit._reddit._next_unique},
)
for page_name in response["data"]:
yield WikiPage(self.subreddit._reddit, self.subreddit, page_name)
def create(self, name, content, reason=None, **other_settings):
"""Create a new wiki page.
:param name: The name of the new WikiPage. This name will be normalized.
:param content: The content of the new WikiPage.
:param reason: (Optional) The reason for the creation.
:param other_settings: Additional keyword arguments to pass.
To create the wiki page ``praw_test`` in ``r/test`` try:
.. code-block:: python
reddit.subreddit("test").wiki.create(
"praw_test", "wiki body text", reason="PRAW Test Creation"
)
"""
name = name.replace(" ", "_").lower()
new = WikiPage(self.subreddit._reddit, self.subreddit, name)
new.edit(content=content, reason=reason, **other_settings)
return new
def revisions(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for recent wiki revisions.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To view the wiki revisions for ``"praw_test"`` in ``r/test`` try:
.. code-block:: python
for item in reddit.subreddit("test").wiki["praw_test"].revisions():
print(item)
"""
url = API_PATH["wiki_revisions"].format(subreddit=self.subreddit)
return WikiPage._revision_generator(self.subreddit, url, generator_kwargs)
| 35.841477
| 150
| 0.613261
|
acfd2e8c255838bb59a630a182ac8d89876df5c4
| 1,273
|
py
|
Python
|
tools/scimitar-gdb/python/scimitar/threads/hpx_gdb_state.py
|
parsa/scmitar
|
e8a95a12e99284bb55b710bdf7acf16d584cd008
|
[
"BSL-1.0"
] | null | null | null |
tools/scimitar-gdb/python/scimitar/threads/hpx_gdb_state.py
|
parsa/scmitar
|
e8a95a12e99284bb55b710bdf7acf16d584cd008
|
[
"BSL-1.0"
] | null | null | null |
tools/scimitar-gdb/python/scimitar/threads/hpx_gdb_state.py
|
parsa/scmitar
|
e8a95a12e99284bb55b710bdf7acf16d584cd008
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Scimitar: Ye Distributed Debugger
#
# Copyright (c) 2016 Parsa Amini
# Copyright (c) 2016 Hartmut Kaiser
# Copyright (c) 2016 Thomas Heller
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
import gdb
import threading
class HPXGdbState():
def __init__(self):
self.context = None
self.lock = threading.Lock()
def save_context(self, ctx):
if self.context is None:
self.context = {}
os_thread = gdb.selected_thread().num
if os_thread in self.context:
return
self.context[os_thread] = ctx
def restore(self):
self.lock.acquire()
cur_os_thread = gdb.selected_thread().num
try:
if self.context is not None:
for os_thread in self.context:
ctx = self.context[os_thread]
gdb.execute("thread %d" % os_thread, False, True)
ctx.switch()
self.context = None
finally:
gdb.execute("thread %d" % cur_os_thread, False, True)
self.lock.release()
# vim: :ai:sw=4:ts=4:sts=4:et:ft=python:fo=corqj2:sm:tw=79:
| 27.085106
| 78
| 0.593087
|
acfd2eb94e133920aff0d9f750491502a9e3bebd
| 7,900
|
py
|
Python
|
monai/handlers/metrics_saver.py
|
albarqounilab/MONAI
|
bb0b307d68021a243011a58fd82a1d275f00a51a
|
[
"Apache-2.0"
] | null | null | null |
monai/handlers/metrics_saver.py
|
albarqounilab/MONAI
|
bb0b307d68021a243011a58fd82a1d275f00a51a
|
[
"Apache-2.0"
] | null | null | null |
monai/handlers/metrics_saver.py
|
albarqounilab/MONAI
|
bb0b307d68021a243011a58fd82a1d275f00a51a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Callable, List, Optional, Sequence, Union
from monai.config import IgniteInfo
from monai.data import decollate_batch
from monai.handlers.utils import write_metrics_reports
from monai.utils import ImageMetaKey as Key
from monai.utils import ensure_tuple, min_version, optional_import, string_list_all_gather
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
idist, _ = optional_import("ignite", IgniteInfo.OPT_IMPORT_VERSION, min_version, "distributed")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
class MetricsSaver:
"""
ignite handler to save metrics values and details into expected files.
Args:
save_dir: directory to save the metrics and metric details.
metrics: expected final metrics to save into files, can be: None, "*" or list of strings.
None - don't save any metrics into files.
"*" - save all the existing metrics in `engine.state.metrics` dict into separate files.
list of strings - specify the expected metrics to save.
default to "*" to save all the metrics into `metrics.csv`.
metric_details: expected metric details to save into files, the data comes from
`engine.state.metric_details`, which should be provided by different `Metrics`,
typically, it's some intermediate values in metric computation.
for example: mean dice of every channel of every image in the validation dataset.
it must contain at least 2 dims: (batch, classes, ...),
if not, will unsequeeze to 2 dims.
this arg can be: None, "*" or list of strings.
None - don't save any metric_details into files.
"*" - save all the existing metric_details in `engine.state.metric_details` dict into separate files.
list of strings - specify the metric_details of expected metrics to save.
if not None, every metric_details array will save a separate `{metric name}_raw.csv` file.
batch_transform: a callable that is used to extract the `meta_data` dictionary of
the input images from `ignite.engine.state.batch` if saving metric details. the purpose is to get the
input filenames from the `meta_data` and store with metric details together.
summary_ops: expected computation operations to generate the summary report.
it can be: None, "*" or list of strings, default to None.
None - don't generate summary report for every expected metric_details.
"*" - generate summary report for every metric_details with all the supported operations.
list of strings - generate summary report for every metric_details with specified operations, they
should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"].
the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile".
for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html.
note that: for the overall summary, it computes `nanmean` of all classes for each image first,
then compute summary. example of the generated summary report::
class mean median max 5percentile 95percentile notnans
class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000
class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000
mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000
save_rank: only the handler on specified rank will save to files in multi-gpus validation, default to 0.
delimiter: the delimiter character in CSV file, default to "\t".
output_type: expected output file type, supported types: ["csv"], default to "csv".
"""
def __init__(
self,
save_dir: str,
metrics: Optional[Union[str, Sequence[str]]] = "*",
metric_details: Optional[Union[str, Sequence[str]]] = None,
batch_transform: Callable = lambda x: x,
summary_ops: Optional[Union[str, Sequence[str]]] = None,
save_rank: int = 0,
delimiter: str = "\t",
output_type: str = "csv",
) -> None:
self.save_dir = save_dir
self.metrics = ensure_tuple(metrics) if metrics is not None else None
self.metric_details = ensure_tuple(metric_details) if metric_details is not None else None
self.batch_transform = batch_transform
self.summary_ops = ensure_tuple(summary_ops) if summary_ops is not None else None
self.save_rank = save_rank
self.deli = delimiter
self.output_type = output_type
self._filenames: List[str] = []
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
engine.add_event_handler(Events.EPOCH_STARTED, self._started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self._get_filenames)
engine.add_event_handler(Events.EPOCH_COMPLETED, self)
def _started(self, engine: Engine) -> None:
self._filenames = []
def _get_filenames(self, engine: Engine) -> None:
if self.metric_details is not None:
meta_data = self.batch_transform(engine.state.batch)
if isinstance(meta_data, dict):
# decollate the `dictionary of list` to `list of dictionaries`
meta_data = decollate_batch(meta_data)
for m in meta_data:
self._filenames.append(f"{m.get(Key.FILENAME_OR_OBJ)}")
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
ws = idist.get_world_size()
if self.save_rank >= ws:
raise ValueError("target save rank is greater than the distributed group size.")
# all gather file names across ranks
_images = string_list_all_gather(strings=self._filenames) if ws > 1 else self._filenames
# only save metrics to file in specified rank
if idist.get_rank() == self.save_rank:
_metrics = {}
if self.metrics is not None and len(engine.state.metrics) > 0:
_metrics = {k: v for k, v in engine.state.metrics.items() if k in self.metrics or "*" in self.metrics}
_metric_details = {}
if self.metric_details is not None and len(engine.state.metric_details) > 0:
for k, v in engine.state.metric_details.items():
if k in self.metric_details or "*" in self.metric_details:
_metric_details[k] = v
write_metrics_reports(
save_dir=self.save_dir,
images=None if len(_images) == 0 else _images,
metrics=_metrics,
metric_details=_metric_details,
summary_ops=self.summary_ops,
deli=self.deli,
output_type=self.output_type,
)
| 53.020134
| 118
| 0.657342
|
acfd2ebc60eacee83aa05597b5736a2bd5df46dc
| 3,592
|
py
|
Python
|
tutorials/02-intermediate/bidirectional_recurrent_neural_network/main.py
|
ksopyla/pytorch-tutorial
|
1173679acf8c5eeeb24a271fdc109d4c818cf005
|
[
"MIT"
] | 1
|
2018-12-17T10:33:03.000Z
|
2018-12-17T10:33:03.000Z
|
tutorials/02-intermediate/bidirectional_recurrent_neural_network/main.py
|
ksopyla/pytorch-tutorial
|
1173679acf8c5eeeb24a271fdc109d4c818cf005
|
[
"MIT"
] | null | null | null |
tutorials/02-intermediate/bidirectional_recurrent_neural_network/main.py
|
ksopyla/pytorch-tutorial
|
1173679acf8c5eeeb24a271fdc109d4c818cf005
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.003
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='./data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='./data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Bidirectional recurrent neural network (many-to-one)
class BiRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(BiRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
self.fc = nn.Linear(hidden_size*2, num_classes) # 2 for bidirection
def forward(self, x):
# Set initial states
h0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(device) # 2 for bidirection
c0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
model = BiRNN(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
| 35.215686
| 104
| 0.59716
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.